summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMyles Borins <mylesborins@google.com>2017-08-01 11:36:44 -0500
committerMyles Borins <mylesborins@google.com>2017-08-01 15:23:15 -0500
commit0a66b223e149a841669bfad5598e4254589730cb (patch)
tree5ec050f7f78aafbf5b1e0e50d639fb843141e162
parent1782b3836ba58ef0da6b687f2bb970c0bd8199ad (diff)
downloadandroid-node-v8-0a66b223e149a841669bfad5598e4254589730cb.tar.gz
android-node-v8-0a66b223e149a841669bfad5598e4254589730cb.tar.bz2
android-node-v8-0a66b223e149a841669bfad5598e4254589730cb.zip
deps: update V8 to 6.0.286.52
PR-URL: https://github.com/nodejs/node/pull/14004 Reviewed-By: Anna Henningsen <anna@addaleax.net> Reviewed-By: Anna Henningsen <anna@addaleax.net> Reviewed-By: Franziska Hinkelmann <franziska.hinkelmann@gmail.com> Reviewed-By: James M Snell <jasnell@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
-rw-r--r--deps/v8/.gitignore10
-rw-r--r--deps/v8/.gn6
-rw-r--r--deps/v8/AUTHORS1
-rw-r--r--deps/v8/BUILD.gn262
-rw-r--r--deps/v8/ChangeLog1490
-rw-r--r--deps/v8/DEPS37
-rw-r--r--deps/v8/Makefile7
-rw-r--r--deps/v8/PRESUBMIT.py14
-rw-r--r--deps/v8/base/trace_event/common/trace_event_common.h6
-rw-r--r--deps/v8/codereview.settings6
-rw-r--r--deps/v8/gypfiles/all.gyp1
-rwxr-xr-xdeps/v8/gypfiles/coverage_wrapper.py2
-rw-r--r--deps/v8/gypfiles/features.gypi8
-rw-r--r--deps/v8/gypfiles/standalone.gypi7
-rw-r--r--deps/v8/gypfiles/toolchain.gypi2
-rw-r--r--deps/v8/include/libplatform/libplatform.h19
-rw-r--r--deps/v8/include/v8-value-serializer-version.h24
-rw-r--r--deps/v8/include/v8-version.h8
-rw-r--r--deps/v8/include/v8.h135
-rw-r--r--deps/v8/include/v8config.h2
-rw-r--r--deps/v8/infra/config/cq.cfg6
-rw-r--r--deps/v8/infra/mb/mb_config.pyl29
-rw-r--r--deps/v8/src/DEPS2
-rw-r--r--deps/v8/src/OWNERS6
-rw-r--r--deps/v8/src/accessors.cc24
-rw-r--r--deps/v8/src/accessors.h2
-rw-r--r--deps/v8/src/api-arguments.h5
-rw-r--r--deps/v8/src/api-natives.cc45
-rw-r--r--deps/v8/src/api.cc414
-rw-r--r--deps/v8/src/api.h15
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h118
-rw-r--r--deps/v8/src/arm/assembler-arm.cc471
-rw-r--r--deps/v8/src/arm/assembler-arm.h50
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc80
-rw-r--r--deps/v8/src/arm/constants-arm.h19
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc57
-rw-r--r--deps/v8/src/arm/disasm-arm.cc99
-rw-r--r--deps/v8/src/arm/frames-arm.cc8
-rw-r--r--deps/v8/src/arm/frames-arm.h16
-rw-r--r--deps/v8/src/arm/interface-descriptors-arm.cc18
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc278
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h25
-rw-r--r--deps/v8/src/arm/simulator-arm.cc185
-rw-r--r--deps/v8/src/arm/simulator-arm.h17
-rw-r--r--deps/v8/src/arm64/assembler-arm64-inl.h14
-rw-r--r--deps/v8/src/arm64/assembler-arm64.cc9
-rw-r--r--deps/v8/src/arm64/assembler-arm64.h3
-rw-r--r--deps/v8/src/arm64/code-stubs-arm64.cc71
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc25
-rw-r--r--deps/v8/src/arm64/interface-descriptors-arm64.cc18
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.cc107
-rw-r--r--deps/v8/src/arm64/macro-assembler-arm64.h18
-rw-r--r--deps/v8/src/arm64/simulator-arm64.cc69
-rw-r--r--deps/v8/src/arm64/simulator-arm64.h11
-rw-r--r--deps/v8/src/asmjs/OWNERS3
-rw-r--r--deps/v8/src/asmjs/asm-js.cc523
-rw-r--r--deps/v8/src/asmjs/asm-js.h20
-rw-r--r--deps/v8/src/asmjs/asm-names.h19
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc463
-rw-r--r--deps/v8/src/asmjs/asm-parser.h115
-rw-r--r--deps/v8/src/asmjs/asm-scanner.cc23
-rw-r--r--deps/v8/src/asmjs/asm-scanner.h33
-rw-r--r--deps/v8/src/asmjs/asm-typer.cc2963
-rw-r--r--deps/v8/src/asmjs/asm-typer.h420
-rw-r--r--deps/v8/src/asmjs/asm-types.h6
-rw-r--r--deps/v8/src/asmjs/asm-wasm-builder.cc2025
-rw-r--r--deps/v8/src/asmjs/asm-wasm-builder.h45
-rw-r--r--deps/v8/src/asmjs/switch-logic.h7
-rw-r--r--deps/v8/src/assembler.cc54
-rw-r--r--deps/v8/src/assembler.h16
-rw-r--r--deps/v8/src/ast/ast-expression-rewriter.cc2
-rw-r--r--deps/v8/src/ast/ast-numbering.cc3
-rw-r--r--deps/v8/src/ast/ast-types.cc8
-rw-r--r--deps/v8/src/ast/ast-value-factory.cc10
-rw-r--r--deps/v8/src/ast/ast-value-factory.h21
-rw-r--r--deps/v8/src/ast/ast.cc106
-rw-r--r--deps/v8/src/ast/ast.h198
-rw-r--r--deps/v8/src/ast/compile-time-value.cc20
-rw-r--r--deps/v8/src/ast/compile-time-value.h15
-rw-r--r--deps/v8/src/ast/scopes.cc86
-rw-r--r--deps/v8/src/ast/scopes.h20
-rw-r--r--deps/v8/src/ast/variables.cc1
-rw-r--r--deps/v8/src/ast/variables.h51
-rw-r--r--deps/v8/src/background-parsing-task.cc3
-rw-r--r--deps/v8/src/bailout-reason.h2
-rw-r--r--deps/v8/src/base/bits.cc36
-rw-r--r--deps/v8/src/base/bits.h12
-rw-r--r--deps/v8/src/base/build_config.h6
-rw-r--r--deps/v8/src/base/debug/stack_trace_win.cc4
-rw-r--r--deps/v8/src/base/export-template.h163
-rw-r--r--deps/v8/src/base/logging.h12
-rw-r--r--deps/v8/src/base/platform/platform-aix.cc5
-rw-r--r--deps/v8/src/base/platform/platform-cygwin.cc8
-rw-r--r--deps/v8/src/base/platform/platform-freebsd.cc7
-rw-r--r--deps/v8/src/base/platform/platform-linux.cc4
-rw-r--r--deps/v8/src/base/platform/platform-macos.cc8
-rw-r--r--deps/v8/src/base/platform/platform-openbsd.cc7
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc24
-rw-r--r--deps/v8/src/base/platform/platform-posix.h3
-rw-r--r--deps/v8/src/base/platform/platform-qnx.cc7
-rw-r--r--deps/v8/src/base/platform/platform-solaris.cc7
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc27
-rw-r--r--deps/v8/src/base/platform/platform.h12
-rw-r--r--deps/v8/src/bootstrapper.cc347
-rw-r--r--deps/v8/src/bootstrapper.h10
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc577
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc547
-rw-r--r--deps/v8/src/builtins/builtins-api.cc9
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc1465
-rw-r--r--deps/v8/src/builtins/builtins-array.cc17
-rw-r--r--deps/v8/src/builtins/builtins-arraybuffer.cc3
-rw-r--r--deps/v8/src/builtins/builtins-async-generator-gen.cc66
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc5
-rw-r--r--deps/v8/src/builtins/builtins-console-gen.cc38
-rw-r--r--deps/v8/src/builtins/builtins-console.cc59
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc240
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.h6
-rw-r--r--deps/v8/src/builtins/builtins-constructor.h18
-rw-r--r--deps/v8/src/builtins/builtins-conversion-gen.cc30
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h134
-rw-r--r--deps/v8/src/builtins/builtins-forin-gen.cc2
-rw-r--r--deps/v8/src/builtins/builtins-function.cc3
-rw-r--r--deps/v8/src/builtins/builtins-handler-gen.cc17
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc125
-rw-r--r--deps/v8/src/builtins/builtins-intl-gen.cc124
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc19
-rw-r--r--deps/v8/src/builtins/builtins-number.cc15
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc178
-rw-r--r--deps/v8/src/builtins/builtins-object.cc53
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc214
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.h8
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc195
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer.cc26
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc889
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.h95
-rw-r--r--deps/v8/src/builtins/builtins-string.cc8
-rw-r--r--deps/v8/src/builtins/builtins-typedarray-gen.cc351
-rw-r--r--deps/v8/src/builtins/builtins-typedarray.cc80
-rw-r--r--deps/v8/src/builtins/builtins.cc69
-rw-r--r--deps/v8/src/builtins/builtins.h23
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc536
-rw-r--r--deps/v8/src/builtins/mips/OWNERS7
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc581
-rw-r--r--deps/v8/src/builtins/mips64/OWNERS7
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc934
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc568
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc562
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc492
-rw-r--r--deps/v8/src/char-predicates.cc12
-rw-r--r--deps/v8/src/code-factory.cc67
-rw-r--r--deps/v8/src/code-factory.h15
-rw-r--r--deps/v8/src/code-stub-assembler.cc1083
-rw-r--r--deps/v8/src/code-stub-assembler.h212
-rw-r--r--deps/v8/src/code-stubs.h32
-rw-r--r--deps/v8/src/codegen.cc12
-rw-r--r--deps/v8/src/compilation-cache.cc10
-rw-r--r--deps/v8/src/compilation-cache.h6
-rw-r--r--deps/v8/src/compilation-dependencies.h1
-rw-r--r--deps/v8/src/compilation-info.cc3
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc12
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h9
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc35
-rw-r--r--deps/v8/src/compiler-dispatcher/compiler-dispatcher.h7
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc2
-rw-r--r--deps/v8/src/compiler.cc176
-rw-r--r--deps/v8/src/compiler.h1
-rw-r--r--deps/v8/src/compiler/OWNERS2
-rw-r--r--deps/v8/src/compiler/access-builder.cc69
-rw-r--r--deps/v8/src/compiler/access-builder.h24
-rw-r--r--deps/v8/src/compiler/access-info.cc166
-rw-r--r--deps/v8/src/compiler/access-info.h38
-rw-r--r--deps/v8/src/compiler/arm/code-generator-arm.cc538
-rw-r--r--deps/v8/src/compiler/arm/instruction-codes-arm.h33
-rw-r--r--deps/v8/src/compiler/arm/instruction-scheduler-arm.cc33
-rw-r--r--deps/v8/src/compiler/arm/instruction-selector-arm.cc388
-rw-r--r--deps/v8/src/compiler/arm64/code-generator-arm64.cc75
-rw-r--r--deps/v8/src/compiler/arm64/instruction-selector-arm64.cc15
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.cc51
-rw-r--r--deps/v8/src/compiler/ast-graph-builder.h9
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.cc13
-rw-r--r--deps/v8/src/compiler/bytecode-analysis.h6
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc199
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.h15
-rw-r--r--deps/v8/src/compiler/c-linkage.cc2
-rw-r--r--deps/v8/src/compiler/code-assembler.cc39
-rw-r--r--deps/v8/src/compiler/code-assembler.h22
-rw-r--r--deps/v8/src/compiler/code-generator.cc102
-rw-r--r--deps/v8/src/compiler/code-generator.h37
-rw-r--r--deps/v8/src/compiler/common-operator.cc28
-rw-r--r--deps/v8/src/compiler/common-operator.h6
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc10
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h1
-rw-r--r--deps/v8/src/compiler/escape-analysis.cc86
-rw-r--r--deps/v8/src/compiler/escape-analysis.h2
-rw-r--r--deps/v8/src/compiler/frame.h13
-rw-r--r--deps/v8/src/compiler/graph-reducer.cc8
-rw-r--r--deps/v8/src/compiler/graph-reducer.h2
-rw-r--r--deps/v8/src/compiler/ia32/code-generator-ia32.cc2
-rw-r--r--deps/v8/src/compiler/ia32/instruction-selector-ia32.cc20
-rw-r--r--deps/v8/src/compiler/instruction-selector.cc206
-rw-r--r--deps/v8/src/compiler/instruction.h2
-rw-r--r--deps/v8/src/compiler/js-builtin-reducer.cc33
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc355
-rw-r--r--deps/v8/src/compiler/js-call-reducer.h13
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc53
-rw-r--r--deps/v8/src/compiler/js-context-specialization.h20
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc185
-rw-r--r--deps/v8/src/compiler/js-create-lowering.h7
-rw-r--r--deps/v8/src/compiler/js-frame-specialization.cc9
-rw-r--r--deps/v8/src/compiler/js-frame-specialization.h1
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc49
-rw-r--r--deps/v8/src/compiler/js-graph.cc8
-rw-r--r--deps/v8/src/compiler/js-graph.h5
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc84
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.h9
-rw-r--r--deps/v8/src/compiler/js-inlining.cc119
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.cc50
-rw-r--r--deps/v8/src/compiler/js-intrinsic-lowering.h2
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc305
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.h28
-rw-r--r--deps/v8/src/compiler/js-operator.cc53
-rw-r--r--deps/v8/src/compiler/js-operator.h106
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc296
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.h5
-rw-r--r--deps/v8/src/compiler/linkage.cc8
-rw-r--r--deps/v8/src/compiler/load-elimination.cc57
-rw-r--r--deps/v8/src/compiler/load-elimination.h26
-rw-r--r--deps/v8/src/compiler/machine-operator.cc113
-rw-r--r--deps/v8/src/compiler/machine-operator.h45
-rw-r--r--deps/v8/src/compiler/mips/OWNERS7
-rw-r--r--deps/v8/src/compiler/mips/code-generator-mips.cc345
-rw-r--r--deps/v8/src/compiler/mips/instruction-codes-mips.h54
-rw-r--r--deps/v8/src/compiler/mips/instruction-selector-mips.cc218
-rw-r--r--deps/v8/src/compiler/mips64/OWNERS7
-rw-r--r--deps/v8/src/compiler/mips64/code-generator-mips64.cc464
-rw-r--r--deps/v8/src/compiler/mips64/instruction-codes-mips64.h54
-rw-r--r--deps/v8/src/compiler/mips64/instruction-selector-mips64.cc219
-rw-r--r--deps/v8/src/compiler/node-properties.cc50
-rw-r--r--deps/v8/src/compiler/node-properties.h10
-rw-r--r--deps/v8/src/compiler/opcodes.h23
-rw-r--r--deps/v8/src/compiler/operator-properties.cc5
-rw-r--r--deps/v8/src/compiler/operator.cc6
-rw-r--r--deps/v8/src/compiler/osr.cc23
-rw-r--r--deps/v8/src/compiler/pipeline.cc122
-rw-r--r--deps/v8/src/compiler/ppc/code-generator-ppc.cc13
-rw-r--r--deps/v8/src/compiler/ppc/instruction-selector-ppc.cc8
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc45
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h20
-rw-r--r--deps/v8/src/compiler/representation-change.cc29
-rw-r--r--deps/v8/src/compiler/representation-change.h4
-rw-r--r--deps/v8/src/compiler/s390/instruction-selector-s390.cc26
-rw-r--r--deps/v8/src/compiler/schedule.cc19
-rw-r--r--deps/v8/src/compiler/schedule.h6
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.cc834
-rw-r--r--deps/v8/src/compiler/simd-scalar-lowering.h46
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc100
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc1
-rw-r--r--deps/v8/src/compiler/simplified-operator.h3
-rw-r--r--deps/v8/src/compiler/typer.cc36
-rw-r--r--deps/v8/src/compiler/types.cc13
-rw-r--r--deps/v8/src/compiler/types.h7
-rw-r--r--deps/v8/src/compiler/verifier.cc47
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc279
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h39
-rw-r--r--deps/v8/src/compiler/x64/code-generator-x64.cc224
-rw-r--r--deps/v8/src/compiler/x64/instruction-codes-x64.h43
-rw-r--r--deps/v8/src/compiler/x64/instruction-scheduler-x64.cc43
-rw-r--r--deps/v8/src/compiler/x64/instruction-selector-x64.cc97
-rw-r--r--deps/v8/src/compiler/x87/instruction-selector-x87.cc8
-rw-r--r--deps/v8/src/compiler/zone-stats.cc6
-rw-r--r--deps/v8/src/compiler/zone-stats.h6
-rw-r--r--deps/v8/src/contexts-inl.h4
-rw-r--r--deps/v8/src/contexts.cc46
-rw-r--r--deps/v8/src/contexts.h136
-rw-r--r--deps/v8/src/counters.h44
-rw-r--r--deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc2
-rw-r--r--deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc3
-rw-r--r--deps/v8/src/crankshaft/hydrogen-environment-liveness.cc2
-rw-r--r--deps/v8/src/crankshaft/hydrogen-gvn.cc28
-rw-r--r--deps/v8/src/crankshaft/hydrogen-instructions.h5
-rw-r--r--deps/v8/src/crankshaft/hydrogen.cc321
-rw-r--r--deps/v8/src/crankshaft/hydrogen.h21
-rw-r--r--deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc3
-rw-r--r--deps/v8/src/crankshaft/mips/OWNERS7
-rw-r--r--deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc12
-rw-r--r--deps/v8/src/crankshaft/mips64/OWNERS7
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc283
-rw-r--r--deps/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc47
-rw-r--r--deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc3
-rw-r--r--deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc3
-rw-r--r--deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc2
-rw-r--r--deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc3
-rw-r--r--deps/v8/src/d8-console.cc111
-rw-r--r--deps/v8/src/d8-console.h34
-rw-r--r--deps/v8/src/d8.cc292
-rw-r--r--deps/v8/src/d8.gyp2
-rw-r--r--deps/v8/src/d8.h14
-rw-r--r--deps/v8/src/date.cc6
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc151
-rw-r--r--deps/v8/src/debug/debug-evaluate.h1
-rw-r--r--deps/v8/src/debug/debug-interface.h14
-rw-r--r--deps/v8/src/debug/debug.cc103
-rw-r--r--deps/v8/src/debug/debug.h5
-rw-r--r--deps/v8/src/debug/interface-types.h48
-rw-r--r--deps/v8/src/debug/mips/OWNERS7
-rw-r--r--deps/v8/src/debug/mips/debug-mips.cc8
-rw-r--r--deps/v8/src/debug/mips64/OWNERS7
-rw-r--r--deps/v8/src/debug/mips64/debug-mips64.cc6
-rw-r--r--deps/v8/src/deoptimizer.cc136
-rw-r--r--deps/v8/src/deoptimizer.h3
-rw-r--r--deps/v8/src/elements.cc423
-rw-r--r--deps/v8/src/elements.h12
-rw-r--r--deps/v8/src/execution.cc6
-rw-r--r--deps/v8/src/execution.h3
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc6
-rw-r--r--deps/v8/src/external-reference-table.cc44
-rw-r--r--deps/v8/src/external-reference-table.h9
-rw-r--r--deps/v8/src/factory.cc77
-rw-r--r--deps/v8/src/factory.h16
-rw-r--r--deps/v8/src/feedback-vector-inl.h9
-rw-r--r--deps/v8/src/feedback-vector.cc71
-rw-r--r--deps/v8/src/feedback-vector.h34
-rw-r--r--deps/v8/src/field-type.h1
-rw-r--r--deps/v8/src/flag-definitions.h121
-rw-r--r--deps/v8/src/flags.cc25
-rw-r--r--deps/v8/src/frames.cc64
-rw-r--r--deps/v8/src/frames.h43
-rw-r--r--deps/v8/src/full-codegen/arm/full-codegen-arm.cc57
-rw-r--r--deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc18
-rw-r--r--deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc19
-rw-r--r--deps/v8/src/full-codegen/mips/OWNERS7
-rw-r--r--deps/v8/src/full-codegen/mips/full-codegen-mips.cc19
-rw-r--r--deps/v8/src/full-codegen/mips64/OWNERS7
-rw-r--r--deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc217
-rw-r--r--deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc19
-rw-r--r--deps/v8/src/full-codegen/s390/full-codegen-s390.cc19
-rw-r--r--deps/v8/src/full-codegen/x64/full-codegen-x64.cc19
-rw-r--r--deps/v8/src/full-codegen/x87/full-codegen-x87.cc19
-rw-r--r--deps/v8/src/global-handles.cc133
-rw-r--r--deps/v8/src/global-handles.h40
-rw-r--r--deps/v8/src/globals.h31
-rw-r--r--deps/v8/src/handles-inl.h1
-rw-r--r--deps/v8/src/handles.h5
-rw-r--r--deps/v8/src/heap-symbols.h2
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.cc27
-rw-r--r--deps/v8/src/heap/array-buffer-tracker.h15
-rw-r--r--deps/v8/src/heap/concurrent-marking-deque.h175
-rw-r--r--deps/v8/src/heap/concurrent-marking.cc278
-rw-r--r--deps/v8/src/heap/concurrent-marking.h12
-rw-r--r--deps/v8/src/heap/gc-tracer.cc40
-rw-r--r--deps/v8/src/heap/gc-tracer.h142
-rw-r--r--deps/v8/src/heap/heap-inl.h54
-rw-r--r--deps/v8/src/heap/heap.cc571
-rw-r--r--deps/v8/src/heap/heap.h256
-rw-r--r--deps/v8/src/heap/incremental-marking-job.cc5
-rw-r--r--deps/v8/src/heap/incremental-marking.cc321
-rw-r--r--deps/v8/src/heap/incremental-marking.h115
-rw-r--r--deps/v8/src/heap/item-parallel-job.h178
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h44
-rw-r--r--deps/v8/src/heap/mark-compact.cc2054
-rw-r--r--deps/v8/src/heap/mark-compact.h421
-rw-r--r--deps/v8/src/heap/marking.h27
-rw-r--r--deps/v8/src/heap/memory-reducer.cc3
-rw-r--r--deps/v8/src/heap/object-stats.cc12
-rw-r--r--deps/v8/src/heap/objects-visiting-inl.h138
-rw-r--r--deps/v8/src/heap/objects-visiting.cc10
-rw-r--r--deps/v8/src/heap/objects-visiting.h114
-rw-r--r--deps/v8/src/heap/page-parallel-job.h27
-rw-r--r--deps/v8/src/heap/scavenger.cc30
-rw-r--r--deps/v8/src/heap/scavenger.h9
-rw-r--r--deps/v8/src/heap/sequential-marking-deque.cc98
-rw-r--r--deps/v8/src/heap/sequential-marking-deque.h172
-rw-r--r--deps/v8/src/heap/spaces-inl.h9
-rw-r--r--deps/v8/src/heap/spaces.cc121
-rw-r--r--deps/v8/src/heap/spaces.h88
-rw-r--r--deps/v8/src/heap/workstealing-marking-deque.h167
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h16
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc42
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h68
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc80
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc35
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc60
-rw-r--r--deps/v8/src/ia32/interface-descriptors-ia32.cc18
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc3
-rw-r--r--deps/v8/src/ic/accessor-assembler.cc94
-rw-r--r--deps/v8/src/ic/accessor-assembler.h2
-rw-r--r--deps/v8/src/ic/arm/handler-compiler-arm.cc5
-rw-r--r--deps/v8/src/ic/arm64/handler-compiler-arm64.cc5
-rw-r--r--deps/v8/src/ic/ia32/handler-compiler-ia32.cc5
-rw-r--r--deps/v8/src/ic/ic.cc114
-rw-r--r--deps/v8/src/ic/ic.h17
-rw-r--r--deps/v8/src/ic/keyed-store-generic.cc10
-rw-r--r--deps/v8/src/ic/mips/OWNERS7
-rw-r--r--deps/v8/src/ic/mips/handler-compiler-mips.cc5
-rw-r--r--deps/v8/src/ic/mips64/OWNERS7
-rw-r--r--deps/v8/src/ic/mips64/handler-compiler-mips64.cc48
-rw-r--r--deps/v8/src/ic/ppc/handler-compiler-ppc.cc5
-rw-r--r--deps/v8/src/ic/s390/handler-compiler-s390.cc5
-rw-r--r--deps/v8/src/ic/stub-cache.cc38
-rw-r--r--deps/v8/src/ic/stub-cache.h3
-rw-r--r--deps/v8/src/ic/x64/handler-compiler-x64.cc5
-rw-r--r--deps/v8/src/icu_util.cc8
-rw-r--r--deps/v8/src/identity-map.cc6
-rw-r--r--deps/v8/src/inspector/DEPS1
-rw-r--r--deps/v8/src/inspector/debugger-script.js7
-rw-r--r--deps/v8/src/inspector/debugger_script_externs.js3
-rw-r--r--deps/v8/src/inspector/inspected-context.cc28
-rw-r--r--deps/v8/src/inspector/inspector.gyp32
-rw-r--r--deps/v8/src/inspector/js_protocol.json11
-rw-r--r--deps/v8/src/inspector/string-util.cc12
-rw-r--r--deps/v8/src/inspector/string-util.h1
-rw-r--r--deps/v8/src/inspector/test-interface.cc8
-rw-r--r--deps/v8/src/inspector/test-interface.h1
-rw-r--r--deps/v8/src/inspector/v8-console.cc183
-rw-r--r--deps/v8/src/inspector/v8-console.h57
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc62
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.cc6
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc298
-rw-r--r--deps/v8/src/inspector/v8-debugger.h58
-rw-r--r--deps/v8/src/inspector/v8-function-call.cc2
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.cc8
-rw-r--r--deps/v8/src/inspector/v8-regex.cc1
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.cc473
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.h162
-rw-r--r--deps/v8/src/interface-descriptors.cc30
-rw-r--r--deps/v8/src/interface-descriptors.h64
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.cc99
-rw-r--r--deps/v8/src/interpreter/bytecode-array-accessor.h59
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.cc125
-rw-r--r--deps/v8/src/interpreter/bytecode-array-builder.h41
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.cc55
-rw-r--r--deps/v8/src/interpreter/bytecode-array-writer.h26
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.cc15
-rw-r--r--deps/v8/src/interpreter/bytecode-flags.h10
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc444
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h9
-rw-r--r--deps/v8/src/interpreter/bytecode-jump-table.h88
-rw-r--r--deps/v8/src/interpreter/bytecode-node.cc (renamed from deps/v8/src/interpreter/bytecode-pipeline.cc)12
-rw-r--r--deps/v8/src/interpreter/bytecode-node.h (renamed from deps/v8/src/interpreter/bytecode-pipeline.h)166
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.cc25
-rw-r--r--deps/v8/src/interpreter/bytecode-register-optimizer.h12
-rw-r--r--deps/v8/src/interpreter/bytecode-source-info.cc24
-rw-r--r--deps/v8/src/interpreter/bytecode-source-info.h98
-rw-r--r--deps/v8/src/interpreter/bytecodes.h15
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.cc39
-rw-r--r--deps/v8/src/interpreter/constant-array-builder.h29
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.cc36
-rw-r--r--deps/v8/src/interpreter/control-flow-builders.h16
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc118
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc685
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics-generator.cc109
-rw-r--r--deps/v8/src/interpreter/interpreter-intrinsics.h51
-rw-r--r--deps/v8/src/interpreter/interpreter.cc10
-rw-r--r--deps/v8/src/interpreter/interpreter.h3
-rw-r--r--deps/v8/src/intl.cc403
-rw-r--r--deps/v8/src/intl.h69
-rw-r--r--deps/v8/src/isolate.cc112
-rw-r--r--deps/v8/src/isolate.h52
-rw-r--r--deps/v8/src/js/array.js63
-rw-r--r--deps/v8/src/js/harmony-string-padding.js73
-rw-r--r--deps/v8/src/js/intl.js (renamed from deps/v8/src/js/i18n.js)187
-rw-r--r--deps/v8/src/js/macros.py13
-rw-r--r--deps/v8/src/js/max-min.js28
-rw-r--r--deps/v8/src/js/runtime.js104
-rw-r--r--deps/v8/src/js/string.js100
-rw-r--r--deps/v8/src/js/typedarray.js240
-rw-r--r--deps/v8/src/js/v8natives.js11
-rw-r--r--deps/v8/src/json-parser.cc76
-rw-r--r--deps/v8/src/libplatform/default-platform.cc48
-rw-r--r--deps/v8/src/libplatform/default-platform.h9
-rw-r--r--deps/v8/src/libplatform/task-queue.h2
-rw-r--r--deps/v8/src/list-inl.h42
-rw-r--r--deps/v8/src/list.h11
-rw-r--r--deps/v8/src/lookup.h4
-rw-r--r--deps/v8/src/macro-assembler.h21
-rw-r--r--deps/v8/src/map-updater.cc41
-rw-r--r--deps/v8/src/map-updater.h20
-rw-r--r--deps/v8/src/messages.cc7
-rw-r--r--deps/v8/src/messages.h14
-rw-r--r--deps/v8/src/mips/OWNERS7
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h29
-rw-r--r--deps/v8/src/mips/assembler-mips.cc28
-rw-r--r--deps/v8/src/mips/assembler-mips.h27
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc94
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc75
-rw-r--r--deps/v8/src/mips/interface-descriptors-mips.cc18
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc243
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h14
-rw-r--r--deps/v8/src/mips/simulator-mips.cc24
-rw-r--r--deps/v8/src/mips/simulator-mips.h18
-rw-r--r--deps/v8/src/mips64/OWNERS7
-rw-r--r--deps/v8/src/mips64/assembler-mips64-inl.h16
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc190
-rw-r--r--deps/v8/src/mips64/assembler-mips64.h13
-rw-r--r--deps/v8/src/mips64/code-stubs-mips64.cc356
-rw-r--r--deps/v8/src/mips64/codegen-mips64.cc180
-rw-r--r--deps/v8/src/mips64/constants-mips64.h8
-rw-r--r--deps/v8/src/mips64/deoptimizer-mips64.cc133
-rw-r--r--deps/v8/src/mips64/disasm-mips64.cc90
-rw-r--r--deps/v8/src/mips64/interface-descriptors-mips64.cc18
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.cc626
-rw-r--r--deps/v8/src/mips64/macro-assembler-mips64.h90
-rw-r--r--deps/v8/src/mips64/simulator-mips64.cc139
-rw-r--r--deps/v8/src/mips64/simulator-mips64.h14
-rw-r--r--deps/v8/src/objects-body-descriptors-inl.h95
-rw-r--r--deps/v8/src/objects-body-descriptors.h4
-rw-r--r--deps/v8/src/objects-debug.cc134
-rw-r--r--deps/v8/src/objects-inl.h458
-rw-r--r--deps/v8/src/objects-printer.cc42
-rw-r--r--deps/v8/src/objects.cc997
-rw-r--r--deps/v8/src/objects.h1789
-rw-r--r--deps/v8/src/objects/descriptor-array.h34
-rw-r--r--deps/v8/src/objects/dictionary.h15
-rw-r--r--deps/v8/src/objects/hash-table-inl.h34
-rw-r--r--deps/v8/src/objects/hash-table.h46
-rw-r--r--deps/v8/src/objects/intl-objects.cc (renamed from deps/v8/src/i18n.cc)763
-rw-r--r--deps/v8/src/objects/intl-objects.h (renamed from deps/v8/src/i18n.h)84
-rw-r--r--deps/v8/src/objects/literal-objects.h11
-rw-r--r--deps/v8/src/objects/map-inl.h68
-rw-r--r--deps/v8/src/objects/map.h829
-rw-r--r--deps/v8/src/objects/string-table.h1
-rw-r--r--deps/v8/src/parsing/parse-info.cc36
-rw-r--r--deps/v8/src/parsing/parse-info.h27
-rw-r--r--deps/v8/src/parsing/parser-base.h179
-rw-r--r--deps/v8/src/parsing/parser.cc464
-rw-r--r--deps/v8/src/parsing/parser.h41
-rw-r--r--deps/v8/src/parsing/preparse-data-format.h2
-rw-r--r--deps/v8/src/parsing/preparse-data.cc9
-rw-r--r--deps/v8/src/parsing/preparse-data.h31
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.cc119
-rw-r--r--deps/v8/src/parsing/preparsed-scope-data.h39
-rw-r--r--deps/v8/src/parsing/preparser.cc54
-rw-r--r--deps/v8/src/parsing/preparser.h91
-rw-r--r--deps/v8/src/parsing/rewriter.cc4
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc14
-rw-r--r--deps/v8/src/parsing/scanner.cc45
-rw-r--r--deps/v8/src/parsing/scanner.h8
-rw-r--r--deps/v8/src/perf-jit.cc6
-rw-r--r--deps/v8/src/ppc/assembler-ppc-inl.h16
-rw-r--r--deps/v8/src/ppc/assembler-ppc.cc9
-rw-r--r--deps/v8/src/ppc/assembler-ppc.h3
-rw-r--r--deps/v8/src/ppc/code-stubs-ppc.cc75
-rw-r--r--deps/v8/src/ppc/deoptimizer-ppc.cc35
-rw-r--r--deps/v8/src/ppc/interface-descriptors-ppc.cc18
-rw-r--r--deps/v8/src/ppc/macro-assembler-ppc.cc15
-rw-r--r--deps/v8/src/ppc/simulator-ppc.cc26
-rw-r--r--deps/v8/src/ppc/simulator-ppc.h18
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc49
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h2
-rw-r--r--deps/v8/src/property-details.h2
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc24
-rw-r--r--deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h3
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc30
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h3
-rw-r--r--deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc9
-rw-r--r--deps/v8/src/regexp/interpreter-irregexp.cc4
-rw-r--r--deps/v8/src/regexp/jsregexp-inl.h1
-rw-r--r--deps/v8/src/regexp/jsregexp.cc47
-rw-r--r--deps/v8/src/regexp/mips/OWNERS7
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc24
-rw-r--r--deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h3
-rw-r--r--deps/v8/src/regexp/mips64/OWNERS7
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc187
-rw-r--r--deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h3
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc16
-rw-r--r--deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h3
-rw-r--r--deps/v8/src/regexp/regexp-ast.h24
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc25
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc124
-rw-r--r--deps/v8/src/regexp/regexp-parser.h14
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc16
-rw-r--r--deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h3
-rw-r--r--deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc9
-rw-r--r--deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc9
-rw-r--r--deps/v8/src/register-configuration.cc19
-rw-r--r--deps/v8/src/runtime-profiler.cc4
-rw-r--r--deps/v8/src/runtime/runtime-array.cc38
-rw-r--r--deps/v8/src/runtime/runtime-classes.cc9
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc43
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc33
-rw-r--r--deps/v8/src/runtime/runtime-debug.cc18
-rw-r--r--deps/v8/src/runtime/runtime-function.cc8
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc13
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc19
-rw-r--r--deps/v8/src/runtime/runtime-interpreter.cc15
-rw-r--r--deps/v8/src/runtime/runtime-intl.cc (renamed from deps/v8/src/runtime/runtime-i18n.cc)46
-rw-r--r--deps/v8/src/runtime/runtime-literals.cc116
-rw-r--r--deps/v8/src/runtime/runtime-object.cc120
-rw-r--r--deps/v8/src/runtime/runtime-regexp.cc4
-rw-r--r--deps/v8/src/runtime/runtime-strings.cc6
-rw-r--r--deps/v8/src/runtime/runtime-test.cc52
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc39
-rw-r--r--deps/v8/src/runtime/runtime-wasm.cc4
-rw-r--r--deps/v8/src/runtime/runtime.h174
-rw-r--r--deps/v8/src/s390/assembler-s390-inl.h16
-rw-r--r--deps/v8/src/s390/assembler-s390.cc9
-rw-r--r--deps/v8/src/s390/assembler-s390.h3
-rw-r--r--deps/v8/src/s390/code-stubs-s390.cc85
-rw-r--r--deps/v8/src/s390/codegen-s390.cc3
-rw-r--r--deps/v8/src/s390/deoptimizer-s390.cc35
-rw-r--r--deps/v8/src/s390/interface-descriptors-s390.cc19
-rw-r--r--deps/v8/src/s390/macro-assembler-s390.cc5
-rw-r--r--deps/v8/src/s390/simulator-s390.cc27
-rw-r--r--deps/v8/src/s390/simulator-s390.h15
-rw-r--r--deps/v8/src/safepoint-table.h3
-rw-r--r--deps/v8/src/setup-isolate-deserialize.cc9
-rw-r--r--deps/v8/src/setup-isolate-full.cc17
-rw-r--r--deps/v8/src/setup-isolate.h9
-rw-r--r--deps/v8/src/snapshot/code-serializer.cc3
-rw-r--r--deps/v8/src/snapshot/deserializer.cc90
-rw-r--r--deps/v8/src/snapshot/deserializer.h22
-rw-r--r--deps/v8/src/snapshot/partial-serializer.cc39
-rw-r--r--deps/v8/src/snapshot/partial-serializer.h8
-rw-r--r--deps/v8/src/snapshot/serializer-common.cc4
-rw-r--r--deps/v8/src/snapshot/serializer-common.h5
-rw-r--r--deps/v8/src/snapshot/serializer.cc30
-rw-r--r--deps/v8/src/snapshot/serializer.h20
-rw-r--r--deps/v8/src/snapshot/snapshot-common.cc13
-rw-r--r--deps/v8/src/snapshot/snapshot.h14
-rw-r--r--deps/v8/src/snapshot/startup-serializer.cc30
-rw-r--r--deps/v8/src/snapshot/startup-serializer.h10
-rw-r--r--deps/v8/src/source-position-table.cc17
-rw-r--r--deps/v8/src/source-position-table.h15
-rw-r--r--deps/v8/src/string-hasher-inl.h147
-rw-r--r--deps/v8/src/string-hasher.h90
-rw-r--r--deps/v8/src/string-search.h13
-rw-r--r--deps/v8/src/string-stream.cc20
-rw-r--r--deps/v8/src/transitions-inl.h2
-rw-r--r--deps/v8/src/transitions.cc42
-rw-r--r--deps/v8/src/transitions.h6
-rw-r--r--deps/v8/src/type-info.cc48
-rw-r--r--deps/v8/src/type-info.h12
-rw-r--r--deps/v8/src/utils.h18
-rw-r--r--deps/v8/src/v8.gyp286
-rw-r--r--deps/v8/src/v8threads.cc6
-rw-r--r--deps/v8/src/v8threads.h10
-rw-r--r--deps/v8/src/value-serializer.cc29
-rw-r--r--deps/v8/src/value-serializer.h2
-rw-r--r--deps/v8/src/vector.h37
-rw-r--r--deps/v8/src/visitors.cc22
-rw-r--r--deps/v8/src/visitors.h79
-rw-r--r--deps/v8/src/wasm/OWNERS2
-rw-r--r--deps/v8/src/wasm/decoder.h128
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h14
-rw-r--r--deps/v8/src/wasm/function-body-decoder.cc141
-rw-r--r--deps/v8/src/wasm/function-body-decoder.h11
-rw-r--r--deps/v8/src/wasm/leb-helper.h8
-rw-r--r--deps/v8/src/wasm/local-decl-encoder.cc51
-rw-r--r--deps/v8/src/wasm/local-decl-encoder.h50
-rw-r--r--deps/v8/src/wasm/module-decoder.cc1163
-rw-r--r--deps/v8/src/wasm/module-decoder.h25
-rw-r--r--deps/v8/src/wasm/streaming-decoder.cc379
-rw-r--r--deps/v8/src/wasm/streaming-decoder.h152
-rw-r--r--deps/v8/src/wasm/wasm-code-specialization.cc5
-rw-r--r--deps/v8/src/wasm/wasm-debug.cc174
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc60
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.cc765
-rw-r--r--deps/v8/src/wasm/wasm-interpreter.h111
-rw-r--r--deps/v8/src/wasm/wasm-js.cc247
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc254
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h127
-rw-r--r--deps/v8/src/wasm/wasm-module.cc1284
-rw-r--r--deps/v8/src/wasm/wasm-module.h33
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc80
-rw-r--r--deps/v8/src/wasm/wasm-objects.h42
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.cc42
-rw-r--r--deps/v8/src/wasm/wasm-opcodes.h127
-rw-r--r--deps/v8/src/wasm/wasm-result.cc140
-rw-r--r--deps/v8/src/wasm/wasm-result.h138
-rw-r--r--deps/v8/src/wasm/wasm-text.cc1
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h16
-rw-r--r--deps/v8/src/x64/assembler-x64.cc25
-rw-r--r--deps/v8/src/x64/assembler-x64.h2
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc89
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc37
-rw-r--r--deps/v8/src/x64/disasm-x64.cc25
-rw-r--r--deps/v8/src/x64/interface-descriptors-x64.cc18
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc3
-rw-r--r--deps/v8/src/x64/sse-instr.h4
-rw-r--r--deps/v8/src/x87/assembler-x87-inl.h16
-rw-r--r--deps/v8/src/x87/code-stubs-x87.cc63
-rw-r--r--deps/v8/src/x87/deoptimizer-x87.cc35
-rw-r--r--deps/v8/src/x87/interface-descriptors-x87.cc5
-rw-r--r--deps/v8/src/x87/macro-assembler-x87.cc4
-rw-r--r--deps/v8/src/zone/accounting-allocator.h2
-rw-r--r--deps/v8/test/BUILD.gn19
-rw-r--r--deps/v8/test/benchmarks/testcfg.py2
-rw-r--r--deps/v8/test/bot_default.gyp1
-rw-r--r--deps/v8/test/bot_default.isolate2
-rw-r--r--deps/v8/test/cctest/BUILD.gn6
-rw-r--r--deps/v8/test/cctest/OWNERS7
-rw-r--r--deps/v8/test/cctest/asmjs/OWNERS11
-rw-r--r--deps/v8/test/cctest/asmjs/test-asm-typer.cc2089
-rw-r--r--deps/v8/test/cctest/cctest.cc32
-rw-r--r--deps/v8/test/cctest/cctest.gyp5
-rw-r--r--deps/v8/test/cctest/cctest.h1
-rw-r--r--deps/v8/test/cctest/cctest.status56
-rw-r--r--deps/v8/test/cctest/compiler/call-tester.h194
-rw-r--r--deps/v8/test/cctest/compiler/test-js-context-specialization.cc20
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc78
-rw-r--r--deps/v8/test/cctest/heap/test-array-buffer-tracker.cc8
-rw-r--r--deps/v8/test/cctest/heap/test-compaction.cc8
-rw-r--r--deps/v8/test/cctest/heap/test-concurrent-marking.cc5
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc380
-rw-r--r--deps/v8/test/cctest/heap/test-mark-compact.cc14
-rw-r--r--deps/v8/test/cctest/heap/test-page-promotion.cc4
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc23
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden40
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden50
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden108
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden32
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden18
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden217
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden24
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden68
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden34
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden2156
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden42
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden412
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden1860
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GenerateTestUndetectable.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden674
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden28
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden544
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden15
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden264
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden26
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden969
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden131
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden88
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden44
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden286
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden528
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden1048
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden1070
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden524
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden34
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden46
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden87
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden18
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden16
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden10
-rw-r--r--deps/v8/test/cctest/interpreter/source-position-matcher.cc4
-rw-r--r--deps/v8/test/cctest/interpreter/test-bytecode-generator.cc102
-rw-r--r--deps/v8/test/cctest/interpreter/test-interpreter.cc92
-rw-r--r--deps/v8/test/cctest/parsing/test-preparser.cc187
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner-streams.cc46
-rw-r--r--deps/v8/test/cctest/parsing/test-scanner.cc2
-rw-r--r--deps/v8/test/cctest/scope-test-helper.h69
-rw-r--r--deps/v8/test/cctest/test-accessor-assembler.cc8
-rw-r--r--deps/v8/test/cctest/test-accessors.cc2
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc4
-rw-r--r--deps/v8/test/cctest/test-api.cc481
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc113
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc91
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc74
-rw-r--r--deps/v8/test/cctest/test-assembler-mips64.cc1039
-rw-r--r--deps/v8/test/cctest/test-ast.cc2
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc112
-rw-r--r--deps/v8/test/cctest/test-code-stubs-mips64.cc8
-rw-r--r--deps/v8/test/cctest/test-compiler.cc8
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc14
-rw-r--r--deps/v8/test/cctest/test-date.cc4
-rw-r--r--deps/v8/test/cctest/test-debug.cc37
-rw-r--r--deps/v8/test/cctest/test-deoptimization.cc10
-rw-r--r--deps/v8/test/cctest/test-dictionary.cc12
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc61
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc36
-rw-r--r--deps/v8/test/cctest/test-disasm-mips64.cc45
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc4
-rw-r--r--deps/v8/test/cctest/test-feedback-vector.cc27
-rw-r--r--deps/v8/test/cctest/test-field-type-tracking.cc40
-rw-r--r--deps/v8/test/cctest/test-flags.cc2
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc12
-rw-r--r--deps/v8/test/cctest/test-identity-map.cc54
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-arm.cc121
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-mips64.cc62
-rw-r--r--deps/v8/test/cctest/test-mementos.cc5
-rw-r--r--deps/v8/test/cctest/test-modules.cc45
-rw-r--r--deps/v8/test/cctest/test-parsing.cc295
-rw-r--r--deps/v8/test/cctest/test-serialize.cc49
-rw-r--r--deps/v8/test/cctest/test-strings.cc3
-rw-r--r--deps/v8/test/cctest/test-symbols.cc2
-rw-r--r--deps/v8/test/cctest/test-types.cc5
-rw-r--r--deps/v8/test/cctest/test-unboxed-doubles.cc8
-rw-r--r--deps/v8/test/cctest/test-weakmaps.cc10
-rw-r--r--deps/v8/test/cctest/test-weaksets.cc8
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-64.cc12
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc3
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc21
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-js.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-module.cc65
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc1
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc486
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc116
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc150
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-stack.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-trap-position.cc2
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.h77
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h (renamed from deps/v8/src/wasm/wasm-macro-gen.h)155
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.cc14
-rw-r--r--deps/v8/test/common/wasm/wasm-module-runner.h2
-rw-r--r--deps/v8/test/debugger/debug/debug-compile-optimized.js2
-rw-r--r--deps/v8/test/debugger/debug/debug-evaluate-nested-let.js2
-rw-r--r--deps/v8/test/debugger/debug/debug-optimize.js2
-rw-r--r--deps/v8/test/debugger/debug/debug-scopes.js4
-rw-r--r--deps/v8/test/debugger/debug/debug-stepin-accessor.js3
-rw-r--r--deps/v8/test/debugger/debug/debug-stepin-property-function-call.js2
-rw-r--r--deps/v8/test/debugger/debug/es6/debug-blockscopes.js2
-rw-r--r--deps/v8/test/debugger/debug/harmony/modules-debug-scopes1.js4
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-465298.js2
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-517592.js2
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-crbug-633999.js2
-rw-r--r--deps/v8/test/debugger/debugger.status13
-rw-r--r--deps/v8/test/default.gyp1
-rw-r--r--deps/v8/test/default.isolate2
-rw-r--r--deps/v8/test/fuzzer/fuzzer.gyp34
-rw-r--r--deps/v8/test/fuzzer/parser.cc48
-rw-r--r--deps/v8/test/fuzzer/testcfg.py3
-rw-r--r--deps/v8/test/fuzzer/wasm-call.cc249
-rw-r--r--deps/v8/test/fuzzer/wasm-code.cc174
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc180
-rw-r--r--deps/v8/test/fuzzer/wasm-data-section.cc6
-rw-r--r--deps/v8/test/fuzzer/wasm-function-sigs-section.cc7
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc194
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.h40
-rw-r--r--deps/v8/test/fuzzer/wasm-globals-section.cc6
-rw-r--r--deps/v8/test/fuzzer/wasm-imports-section.cc6
-rw-r--r--deps/v8/test/fuzzer/wasm-memory-section.cc6
-rw-r--r--deps/v8/test/fuzzer/wasm-names-section.cc6
-rw-r--r--deps/v8/test/fuzzer/wasm-section-fuzzers.cc63
-rw-r--r--deps/v8/test/fuzzer/wasm-section-fuzzers.h16
-rw-r--r--deps/v8/test/fuzzer/wasm-types-section.cc6
-rw-r--r--deps/v8/test/fuzzer/wasm.tar.gz.sha11
-rw-r--r--deps/v8/test/fuzzer/wasm/foo0
-rw-r--r--deps/v8/test/fuzzer/wasm_asmjs.tar.gz.sha11
-rw-r--r--deps/v8/test/fuzzer/wasm_asmjs/foo0
-rw-r--r--deps/v8/test/fuzzer/wasm_asmjs_corpus.tar.gz.sha11
-rw-r--r--deps/v8/test/fuzzer/wasm_corpus.tar.gz.sha11
-rw-r--r--deps/v8/test/inspector/BUILD.gn4
-rw-r--r--deps/v8/test/inspector/console/destroy-context-during-log-expected.txt1
-rw-r--r--deps/v8/test/inspector/console/destroy-context-during-log.js10
-rw-r--r--deps/v8/test/inspector/console/let-const-with-api-expected.txt3
-rw-r--r--deps/v8/test/inspector/console/let-const-with-api.js2
-rw-r--r--deps/v8/test/inspector/cpu-profiler/console-profile-end-parameterless-crash-expected.txt2
-rw-r--r--deps/v8/test/inspector/cpu-profiler/console-profile-end-parameterless-crash.js4
-rw-r--r--deps/v8/test/inspector/cpu-profiler/console-profile-expected.txt2
-rw-r--r--deps/v8/test/inspector/cpu-profiler/console-profile.js4
-rw-r--r--deps/v8/test/inspector/cpu-profiler/coverage.js4
-rw-r--r--deps/v8/test/inspector/cpu-profiler/enable-disable-expected.txt2
-rw-r--r--deps/v8/test/inspector/cpu-profiler/enable-disable.js28
-rw-r--r--deps/v8/test/inspector/cpu-profiler/record-cpu-profile-expected.txt2
-rw-r--r--deps/v8/test/inspector/cpu-profiler/record-cpu-profile.js28
-rw-r--r--deps/v8/test/inspector/cpu-profiler/stop-without-preceeding-start-expected.txt2
-rw-r--r--deps/v8/test/inspector/cpu-profiler/stop-without-preceeding-start.js18
-rw-r--r--deps/v8/test/inspector/debugger/access-obsolete-frame-expected.txt3
-rw-r--r--deps/v8/test/inspector/debugger/access-obsolete-frame.js4
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec.js4
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec.js4
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-stack-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/asm-js-stack.js4
-rw-r--r--deps/v8/test/inspector/debugger/async-console-count-doesnt-crash.js4
-rw-r--r--deps/v8/test/inspector/debugger/async-for-await-of-promise-stack-expected.txt53
-rw-r--r--deps/v8/test/inspector/debugger/async-for-await-of-promise-stack.js22
-rw-r--r--deps/v8/test/inspector/debugger/async-instrumentation-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/async-instrumentation.js10
-rw-r--r--deps/v8/test/inspector/debugger/async-promise-late-then-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/async-promise-late-then.js10
-rw-r--r--deps/v8/test/inspector/debugger/async-set-timeout.js10
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-await-expected.txt15
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-await.js10
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt57
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-created-frame.js11
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt129
-rw-r--r--deps/v8/test/inspector/debugger/async-stack-for-promise.js10
-rw-r--r--deps/v8/test/inspector/debugger/async-stacks-limit-expected.txt140
-rw-r--r--deps/v8/test/inspector/debugger/async-stacks-limit.js164
-rw-r--r--deps/v8/test/inspector/debugger/break-on-exception-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/break-on-exception.js4
-rw-r--r--deps/v8/test/inspector/debugger/call-frame-function-location-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/call-frame-function-location.js4
-rw-r--r--deps/v8/test/inspector/debugger/caught-exception-from-framework-inside-async.js8
-rw-r--r--deps/v8/test/inspector/debugger/caught-uncaught-exceptions.js4
-rw-r--r--deps/v8/test/inspector/debugger/collect-obsolete-async-tasks-expected.txt37
-rw-r--r--deps/v8/test/inspector/debugger/collect-obsolete-async-tasks.js35
-rw-r--r--deps/v8/test/inspector/debugger/collect-old-async-call-chains-expected.txt211
-rw-r--r--deps/v8/test/inspector/debugger/collect-old-async-call-chains.js171
-rw-r--r--deps/v8/test/inspector/debugger/command-line-api-with-bound-function-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/command-line-api-with-bound-function.js4
-rw-r--r--deps/v8/test/inspector/debugger/continue-to-location-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/continue-to-location-target-call-frames-expected.txt81
-rw-r--r--deps/v8/test/inspector/debugger/continue-to-location-target-call-frames.js139
-rw-r--r--deps/v8/test/inspector/debugger/continue-to-location.js4
-rw-r--r--deps/v8/test/inspector/debugger/destory-in-break-program-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/destory-in-break-program.js13
-rw-r--r--deps/v8/test/inspector/debugger/doesnt-step-into-injected-script.js6
-rw-r--r--deps/v8/test/inspector/debugger/es6-module-script-parsed.js12
-rw-r--r--deps/v8/test/inspector/debugger/es6-module-set-script-source.js8
-rw-r--r--deps/v8/test/inspector/debugger/eval-scopes-expected.txt3
-rw-r--r--deps/v8/test/inspector/debugger/eval-scopes.js4
-rw-r--r--deps/v8/test/inspector/debugger/framework-break-expected.txt4
-rw-r--r--deps/v8/test/inspector/debugger/framework-break.js18
-rw-r--r--deps/v8/test/inspector/debugger/framework-nested-scheduled-break-expected.txt18
-rw-r--r--deps/v8/test/inspector/debugger/framework-nested-scheduled-break.js20
-rw-r--r--deps/v8/test/inspector/debugger/framework-precise-ranges.js10
-rw-r--r--deps/v8/test/inspector/debugger/framework-stepping-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/framework-stepping.js18
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-array-literal-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-array-literal.js4
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error-expected.txt26
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error.js28
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt12
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-master.js4
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints-restrict-to-function.js8
-rw-r--r--deps/v8/test/inspector/debugger/get-possible-breakpoints.js4
-rw-r--r--deps/v8/test/inspector/debugger/inspector-break-api-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/inspector-break-api.js18
-rw-r--r--deps/v8/test/inspector/debugger/max-async-call-chain-depth-expected.txt91
-rw-r--r--deps/v8/test/inspector/debugger/max-async-call-chain-depth.js162
-rw-r--r--deps/v8/test/inspector/debugger/object-preview-internal-properties.js2
-rw-r--r--deps/v8/test/inspector/debugger/pause-expected.txt3
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-oom.js4
-rw-r--r--deps/v8/test/inspector/debugger/pause.js128
-rw-r--r--deps/v8/test/inspector/debugger/promise-chain-when-limit-hit-expected.txt235
-rw-r--r--deps/v8/test/inspector/debugger/promise-chain-when-limit-hit.js54
-rw-r--r--deps/v8/test/inspector/debugger/protocol-string-to-double-locale-expected.txt8
-rw-r--r--deps/v8/test/inspector/debugger/protocol-string-to-double-locale.js24
-rw-r--r--deps/v8/test/inspector/debugger/restore-breakpoint.js2
-rw-r--r--deps/v8/test/inspector/debugger/return-break-locations.js4
-rw-r--r--deps/v8/test/inspector/debugger/schedule-step-into-async-set-timeout.js8
-rw-r--r--deps/v8/test/inspector/debugger/schedule-step-into-async.js10
-rw-r--r--deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name-expected.txt3
-rw-r--r--deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name.js4
-rw-r--r--deps/v8/test/inspector/debugger/script-end-location.js4
-rw-r--r--deps/v8/test/inspector/debugger/script-on-after-compile.js2
-rw-r--r--deps/v8/test/inspector/debugger/script-parsed-for-runtime-evaluate.js6
-rw-r--r--deps/v8/test/inspector/debugger/script-parsed-hash-expected.txt3
-rw-r--r--deps/v8/test/inspector/debugger/script-parsed-hash.js2
-rw-r--r--deps/v8/test/inspector/debugger/script-with-negative-offset-expected.txt19
-rw-r--r--deps/v8/test/inspector/debugger/script-with-negative-offset.js31
-rw-r--r--deps/v8/test/inspector/debugger/set-async-call-stack-depth-expected.txt37
-rw-r--r--deps/v8/test/inspector/debugger/set-async-call-stack-depth.js79
-rw-r--r--deps/v8/test/inspector/debugger/set-blackbox-patterns-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/set-blackbox-patterns.js10
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/set-breakpoint-before-enabling.js2
-rw-r--r--deps/v8/test/inspector/debugger/set-script-source-exception.js4
-rw-r--r--deps/v8/test/inspector/debugger/set-script-source-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/set-script-source.js4
-rw-r--r--deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate.js3
-rw-r--r--deps/v8/test/inspector/debugger/step-into-expected.txt45
-rw-r--r--deps/v8/test/inspector/debugger/step-into-nested-arrow.js8
-rw-r--r--deps/v8/test/inspector/debugger/step-into-next-script.js16
-rw-r--r--deps/v8/test/inspector/debugger/step-into.js8
-rw-r--r--deps/v8/test/inspector/debugger/step-out-async-await.js8
-rw-r--r--deps/v8/test/inspector/debugger/step-over-another-context-group.js40
-rw-r--r--deps/v8/test/inspector/debugger/step-over-caught-exception-expected.txt3
-rw-r--r--deps/v8/test/inspector/debugger/step-over-caught-exception.js4
-rw-r--r--deps/v8/test/inspector/debugger/step-snapshot-expected.txt35
-rw-r--r--deps/v8/test/inspector/debugger/step-snapshot.js31
-rw-r--r--deps/v8/test/inspector/debugger/stepping-after-get-possible-breakpoints-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/stepping-after-get-possible-breakpoints.js4
-rw-r--r--deps/v8/test/inspector/debugger/stepping-and-break-program-api-expected.txt6
-rw-r--r--deps/v8/test/inspector/debugger/stepping-and-break-program-api.js10
-rw-r--r--deps/v8/test/inspector/debugger/stepping-ignores-injected-script-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/stepping-ignores-injected-script.js6
-rw-r--r--deps/v8/test/inspector/debugger/stepping-tail-call.js8
-rw-r--r--deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges.js8
-rw-r--r--deps/v8/test/inspector/debugger/stepping-with-exposed-injected-script-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/stepping-with-exposed-injected-script.js6
-rw-r--r--deps/v8/test/inspector/debugger/stepping-with-natives-and-frameworks.js8
-rw-r--r--deps/v8/test/inspector/debugger/suspended-generator-scopes-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/suspended-generator-scopes.js4
-rw-r--r--deps/v8/test/inspector/debugger/update-call-frame-scopes-expected.txt2
-rw-r--r--deps/v8/test/inspector/debugger/update-call-frame-scopes.js4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-get-breakable-locations-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/wasm-get-breakable-locations.js2
-rw-r--r--deps/v8/test/inspector/debugger/wasm-imports-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/wasm-imports.js8
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scripts-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/wasm-scripts.js7
-rw-r--r--deps/v8/test/inspector/debugger/wasm-source-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/wasm-source.js6
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stack-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stack.js4
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping-expected.txt295
-rw-r--r--deps/v8/test/inspector/debugger/wasm-stepping.js49
-rw-r--r--deps/v8/test/inspector/heap-profiler/take-heap-snapshot-on-pause.js6
-rw-r--r--deps/v8/test/inspector/inspector-impl.cc341
-rw-r--r--deps/v8/test/inspector/inspector-impl.h92
-rw-r--r--deps/v8/test/inspector/inspector-test.cc814
-rw-r--r--deps/v8/test/inspector/inspector.gyp10
-rw-r--r--deps/v8/test/inspector/inspector.status6
-rw-r--r--deps/v8/test/inspector/isolate-data.cc95
-rw-r--r--deps/v8/test/inspector/isolate-data.h67
-rw-r--r--deps/v8/test/inspector/json-parse-expected.txt1
-rw-r--r--deps/v8/test/inspector/json-parse.js6
-rw-r--r--deps/v8/test/inspector/protocol-test.js485
-rw-r--r--deps/v8/test/inspector/runtime/await-promise.js4
-rw-r--r--deps/v8/test/inspector/runtime/call-function-on-async-expected.txt6
-rw-r--r--deps/v8/test/inspector/runtime/call-function-on-async.js2
-rw-r--r--deps/v8/test/inspector/runtime/clear-of-command-line-api-expected.txt2
-rw-r--r--deps/v8/test/inspector/runtime/clear-of-command-line-api.js4
-rw-r--r--deps/v8/test/inspector/runtime/client-console-api-message-expected.txt8
-rw-r--r--deps/v8/test/inspector/runtime/client-console-api-message.js28
-rw-r--r--deps/v8/test/inspector/runtime/command-line-api.js8
-rw-r--r--deps/v8/test/inspector/runtime/compile-script-expected.txt3
-rw-r--r--deps/v8/test/inspector/runtime/compile-script.js2
-rw-r--r--deps/v8/test/inspector/runtime/console-api-repeated-in-console-expected.txt1
-rw-r--r--deps/v8/test/inspector/runtime/console-api-repeated-in-console.js2
-rw-r--r--deps/v8/test/inspector/runtime/console-assert.js4
-rw-r--r--deps/v8/test/inspector/runtime/console-deprecated-methods-expected.txt1
-rw-r--r--deps/v8/test/inspector/runtime/console-deprecated-methods.js2
-rw-r--r--deps/v8/test/inspector/runtime/console-line-and-column-expected.txt3
-rw-r--r--deps/v8/test/inspector/runtime/console-line-and-column.js2
-rw-r--r--deps/v8/test/inspector/runtime/console-log-doesnt-run-microtasks-expected.txt1
-rw-r--r--deps/v8/test/inspector/runtime/console-log-doesnt-run-microtasks.js4
-rw-r--r--deps/v8/test/inspector/runtime/console-memory.js2
-rw-r--r--deps/v8/test/inspector/runtime/console-messages-limits.js4
-rw-r--r--deps/v8/test/inspector/runtime/console-methods.js4
-rw-r--r--deps/v8/test/inspector/runtime/console-spec-expected.txt30
-rw-r--r--deps/v8/test/inspector/runtime/console-spec.js60
-rw-r--r--deps/v8/test/inspector/runtime/console-time-end-format.js2
-rw-r--r--deps/v8/test/inspector/runtime/console-timestamp-expected.txt2
-rw-r--r--deps/v8/test/inspector/runtime/console-timestamp.js2
-rw-r--r--deps/v8/test/inspector/runtime/create-context.js57
-rw-r--r--deps/v8/test/inspector/runtime/es6-module-expected.txt4
-rw-r--r--deps/v8/test/inspector/runtime/es6-module.js16
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-async-expected.txt4
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-async-with-wrap-error.js4
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-async.js4
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-empty-stack.js4
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-with-context-id-equal-zero-expected.txt1
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-with-context-id-equal-zero.js2
-rw-r--r--deps/v8/test/inspector/runtime/evaluate-with-generate-preview.js8
-rw-r--r--deps/v8/test/inspector/runtime/exception-thrown-expected.txt4
-rw-r--r--deps/v8/test/inspector/runtime/exception-thrown.js4
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt2
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-on-proxy.js4
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-preview-expected.txt2
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-preview.js4
-rw-r--r--deps/v8/test/inspector/runtime/get-properties.js2
-rw-r--r--deps/v8/test/inspector/runtime/internal-properties-entries.js2
-rw-r--r--deps/v8/test/inspector/runtime/internal-properties.js4
-rw-r--r--deps/v8/test/inspector/runtime/length-or-size-description.js4
-rw-r--r--deps/v8/test/inspector/runtime/property-on-console-proto-expected.txt1
-rw-r--r--deps/v8/test/inspector/runtime/property-on-console-proto.js4
-rw-r--r--deps/v8/test/inspector/runtime/protocol-works-with-different-locale-expected.txt1
-rw-r--r--deps/v8/test/inspector/runtime/protocol-works-with-different-locale.js2
-rw-r--r--deps/v8/test/inspector/runtime/run-script-async-expected.txt6
-rw-r--r--deps/v8/test/inspector/runtime/run-script-async.js2
-rw-r--r--deps/v8/test/inspector/runtime/runtime-evaluate-with-dirty-context.js4
-rw-r--r--deps/v8/test/inspector/runtime/runtime-restore.js12
-rw-r--r--deps/v8/test/inspector/runtime/set-or-map-entries.js6
-rw-r--r--deps/v8/test/inspector/task-runner.cc175
-rw-r--r--deps/v8/test/inspector/task-runner.h89
-rw-r--r--deps/v8/test/inspector/testcfg.py3
-rw-r--r--deps/v8/test/intl/date-format/date-format-to-parts.js2
-rw-r--r--deps/v8/test/intl/general/case-mapping.js2
-rw-r--r--deps/v8/test/js-perf-test/BytecodeHandlers/compare.js157
-rw-r--r--deps/v8/test/js-perf-test/BytecodeHandlers/run.js29
-rw-r--r--deps/v8/test/js-perf-test/ForLoops/for_loop.js42
-rw-r--r--deps/v8/test/js-perf-test/ForLoops/run.js25
-rw-r--r--deps/v8/test/js-perf-test/JSTests.json86
-rw-r--r--deps/v8/test/js-perf-test/Modules/basic-export.js7
-rw-r--r--deps/v8/test/js-perf-test/Modules/basic-import.js8
-rw-r--r--deps/v8/test/js-perf-test/Modules/basic-namespace.js8
-rw-r--r--deps/v8/test/js-perf-test/Modules/run.js63
-rw-r--r--deps/v8/test/js-perf-test/Modules/value.js6
-rw-r--r--deps/v8/test/js-perf-test/PropertyQueries/property-queries.js3
-rw-r--r--deps/v8/test/js-perf-test/RestParameters/rest.js66
-rw-r--r--deps/v8/test/js-perf-test/SixSpeed.json126
-rw-r--r--deps/v8/test/js-perf-test/SixSpeed/classes/babel.js51
-rw-r--r--deps/v8/test/js-perf-test/SixSpeed/classes/es5.js20
-rw-r--r--deps/v8/test/js-perf-test/SixSpeed/classes/es6.js21
-rw-r--r--deps/v8/test/js-perf-test/SixSpeed/map_set_add/es5.js21
-rw-r--r--deps/v8/test/js-perf-test/SixSpeed/map_set_add/es6.js22
-rw-r--r--deps/v8/test/js-perf-test/SixSpeed/map_set_lookup/es5.js32
-rw-r--r--deps/v8/test/js-perf-test/SixSpeed/map_set_lookup/es6.js30
-rw-r--r--deps/v8/test/js-perf-test/SixSpeed/map_set_object/es5.js24
-rw-r--r--deps/v8/test/js-perf-test/SixSpeed/map_set_object/es6.js22
-rw-r--r--deps/v8/test/js-perf-test/SixSpeed/map_string/es5.js26
-rw-r--r--deps/v8/test/js-perf-test/SixSpeed/map_string/es6.js26
-rw-r--r--deps/v8/test/js-perf-test/SixSpeed/super/babel.js135
-rw-r--r--deps/v8/test/js-perf-test/SixSpeed/super/es5.js34
-rw-r--r--deps/v8/test/js-perf-test/SixSpeed/super/es6.js34
-rw-r--r--deps/v8/test/message/arrow-invalid-rest-2.out6
-rw-r--r--deps/v8/test/message/arrow-invalid-rest.out6
-rw-r--r--deps/v8/test/message/asm-assignment-undeclared.js2
-rw-r--r--deps/v8/test/message/asm-function-mismatch-def.js2
-rw-r--r--deps/v8/test/message/asm-function-mismatch-use.js2
-rw-r--r--deps/v8/test/message/asm-function-redefined.js2
-rw-r--r--deps/v8/test/message/asm-function-undefined.js2
-rw-r--r--deps/v8/test/message/asm-function-variable-collision.js2
-rw-r--r--deps/v8/test/message/asm-import-wrong-annotation.js11
-rw-r--r--deps/v8/test/message/asm-import-wrong-annotation.out5
-rw-r--r--deps/v8/test/message/asm-import-wrong-object.js11
-rw-r--r--deps/v8/test/message/asm-import-wrong-object.out5
-rw-r--r--deps/v8/test/message/asm-linking-bogus-heap.js15
-rw-r--r--deps/v8/test/message/asm-linking-bogus-heap.out5
-rw-r--r--deps/v8/test/message/asm-linking-bogus-stdlib.js15
-rw-r--r--deps/v8/test/message/asm-linking-bogus-stdlib.out5
-rw-r--r--deps/v8/test/message/asm-linking-missing-heap.js15
-rw-r--r--deps/v8/test/message/asm-linking-missing-heap.out5
-rw-r--r--deps/v8/test/message/asm-missing-parameter-annotation.js2
-rw-r--r--deps/v8/test/message/asm-missing-return-annotation.js2
-rw-r--r--deps/v8/test/message/asm-table-mismatch-def.js2
-rw-r--r--deps/v8/test/message/asm-table-mismatch-use.js2
-rw-r--r--deps/v8/test/message/asm-table-redefined.js2
-rw-r--r--deps/v8/test/message/asm-table-undefined.js2
-rw-r--r--deps/v8/test/message/asm-table-variable-collision.js2
-rw-r--r--deps/v8/test/message/async-arrow-invalid-rest-2.js8
-rw-r--r--deps/v8/test/message/async-arrow-invalid-rest-2.out4
-rw-r--r--deps/v8/test/message/async-arrow-invalid-rest.js8
-rw-r--r--deps/v8/test/message/async-arrow-invalid-rest.out4
-rw-r--r--deps/v8/test/message/async-arrow-param-after-rest.js7
-rw-r--r--deps/v8/test/message/async-arrow-param-after-rest.out5
-rw-r--r--deps/v8/test/message/class-spread-property.js5
-rw-r--r--deps/v8/test/message/class-spread-property.out4
-rw-r--r--deps/v8/test/message/console.js25
-rw-r--r--deps/v8/test/message/console.out15
-rw-r--r--deps/v8/test/message/function-param-after-rest.js7
-rw-r--r--deps/v8/test/message/function-param-after-rest.out5
-rw-r--r--deps/v8/test/message/testcfg.py1
-rw-r--r--deps/v8/test/mjsunit/allocation-site-info.js12
-rw-r--r--deps/v8/test/mjsunit/arguments.js81
-rw-r--r--deps/v8/test/mjsunit/array-constructor-feedback.js11
-rw-r--r--deps/v8/test/mjsunit/array-feedback.js2
-rw-r--r--deps/v8/test/mjsunit/array-literal-feedback.js2
-rw-r--r--deps/v8/test/mjsunit/array-literal-transitions.js2
-rw-r--r--deps/v8/test/mjsunit/array-push5.js2
-rw-r--r--deps/v8/test/mjsunit/array-shift4.js2
-rw-r--r--deps/v8/test/mjsunit/array-slice.js35
-rw-r--r--deps/v8/test/mjsunit/array-store-and-grow.js2
-rw-r--r--deps/v8/test/mjsunit/asm/asm-memory.js55
-rw-r--r--deps/v8/test/mjsunit/asm/asm-stdlib.js46
-rw-r--r--deps/v8/test/mjsunit/asm/asm-validation.js18
-rw-r--r--deps/v8/test/mjsunit/asm/call-annotation.js179
-rw-r--r--deps/v8/test/mjsunit/asm/call-stdlib.js85
-rw-r--r--deps/v8/test/mjsunit/asm/global-imports.js74
-rw-r--r--deps/v8/test/mjsunit/asm/immutable.js48
-rw-r--r--deps/v8/test/mjsunit/asm/int32-mul.js4
-rw-r--r--deps/v8/test/mjsunit/asm/regress-718745.js13
-rw-r--r--deps/v8/test/mjsunit/asm/return-types.js123
-rw-r--r--deps/v8/test/mjsunit/basic-promise.js10
-rw-r--r--deps/v8/test/mjsunit/compiler/array-constructor.js89
-rw-r--r--deps/v8/test/mjsunit/compiler/constructor-inlining-no-harmony-restrict-constructor-return.js12
-rw-r--r--deps/v8/test/mjsunit/compiler/constructor-inlining.js128
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-inlined-from-call.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-numberoroddball-binop.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/deopt-string-outofbounds.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-16.js18
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-phi-type-2.js41
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-phi-type.js24
-rw-r--r--deps/v8/test/mjsunit/compiler/escape-analysis-representation.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/increment-typefeedback.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-accessors.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/inline-arguments.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/inlined-array-pop-opt.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/inlined-call.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/instanceof.js12
-rw-r--r--deps/v8/test/mjsunit/compiler/integral32-add-sub.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/object-getprototypeof.js15
-rw-r--r--deps/v8/test/mjsunit/compiler/opt-next-call-turbo.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/opt-next-call.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-float32array-length.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-float64array-length.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-int32array-length.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/optimized-uint32array-length.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/reflect-getprototypeof.js15
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-5320.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-715204.js13
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-715651.js38
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-compare-negate.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-string-to-number-add.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/turbo-number-feedback.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/uint8-clamped-array.js2
-rw-r--r--deps/v8/test/mjsunit/const-field-tracking.js2
-rw-r--r--deps/v8/test/mjsunit/constant-folding-2.js2
-rw-r--r--deps/v8/test/mjsunit/deopt-minus-zero.js2
-rw-r--r--deps/v8/test/mjsunit/deopt-recursive-eager-once.js2
-rw-r--r--deps/v8/test/mjsunit/deopt-recursive-lazy-once.js2
-rw-r--r--deps/v8/test/mjsunit/deopt-recursive-soft-once.js2
-rw-r--r--deps/v8/test/mjsunit/deopt-unlinked.js2
-rw-r--r--deps/v8/test/mjsunit/deopt-with-fp-regs.js2
-rw-r--r--deps/v8/test/mjsunit/deserialize-optimize-inner.js3
-rw-r--r--deps/v8/test/mjsunit/dictionary-properties.js2
-rw-r--r--deps/v8/test/mjsunit/div-mul-minus-one.js2
-rw-r--r--deps/v8/test/mjsunit/element-read-only.js47
-rw-r--r--deps/v8/test/mjsunit/elements-transition-hoisting.js2
-rw-r--r--deps/v8/test/mjsunit/ensure-growing-store-learns.js6
-rw-r--r--deps/v8/test/mjsunit/es6/array-iterator-turbo.js40
-rw-r--r--deps/v8/test/mjsunit/es6/block-let-crankshaft-sloppy.js2
-rw-r--r--deps/v8/test/mjsunit/es6/block-let-crankshaft.js2
-rw-r--r--deps/v8/test/mjsunit/es6/block-scoping-sloppy.js2
-rw-r--r--deps/v8/test/mjsunit/es6/block-scoping.js2
-rw-r--r--deps/v8/test/mjsunit/es6/destructuring-assignment.js90
-rw-r--r--deps/v8/test/mjsunit/es6/regress/regress-6322.js6
-rw-r--r--deps/v8/test/mjsunit/es6/rest-params-lazy-parsing.js2
-rw-r--r--deps/v8/test/mjsunit/es6/string-replace.js6
-rw-r--r--deps/v8/test/mjsunit/es6/string-split.js5
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like-prototype-element-added.js32
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js195
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-construct-offset-not-smi.js35
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-copywithin.js4
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-every.js5
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-fill.js14
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-find.js15
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-findindex.js13
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-foreach.js5
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-indexing.js20
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-iteration.js17
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-map.js49
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-reduce.js16
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-reverse.js9
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-slice.js14
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-sort.js7
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-tostring.js9
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray.js25
-rw-r--r--deps/v8/test/mjsunit/es8/constructor-returning-primitive.js318
-rw-r--r--deps/v8/test/mjsunit/field-type-tracking.js2
-rw-r--r--deps/v8/test/mjsunit/fixed-context-shapes-when-recompiling.js2
-rw-r--r--deps/v8/test/mjsunit/function-arguments-duplicate.js4
-rw-r--r--deps/v8/test/mjsunit/getters-on-elements.js35
-rw-r--r--deps/v8/test/mjsunit/global-accessors.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/async-generators-basic.js70
-rw-r--r--deps/v8/test/mjsunit/harmony/atomics.js39
-rw-r--r--deps/v8/test/mjsunit/harmony/do-expressions.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/global-accessors-strict.js54
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-dotall.js6
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-named-captures.js14
-rw-r--r--deps/v8/test/mjsunit/harmony/regexp-property-binary.js22
-rw-r--r--deps/v8/test/mjsunit/indexed-accessors.js2
-rw-r--r--deps/v8/test/mjsunit/keyed-load-hole-to-undefined.js2
-rw-r--r--deps/v8/test/mjsunit/keyed-load-with-string-key.js2
-rw-r--r--deps/v8/test/mjsunit/keyed-load-with-symbol-key.js2
-rw-r--r--deps/v8/test/mjsunit/keyed-store-generic.js22
-rw-r--r--deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js2
-rw-r--r--deps/v8/test/mjsunit/math-imul.js2
-rw-r--r--deps/v8/test/mjsunit/messages.js42
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js49
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status53
-rw-r--r--deps/v8/test/mjsunit/modules-turbo1.js2
-rw-r--r--deps/v8/test/mjsunit/never-optimize.js2
-rw-r--r--deps/v8/test/mjsunit/object-keys.js34
-rw-r--r--deps/v8/test/mjsunit/object-literal.js1399
-rw-r--r--deps/v8/test/mjsunit/object-seal.js2
-rw-r--r--deps/v8/test/mjsunit/osr-elements-kind.js2
-rw-r--r--deps/v8/test/mjsunit/parse-tasks.js55
-rw-r--r--deps/v8/test/mjsunit/polymorph-arrays.js2
-rw-r--r--deps/v8/test/mjsunit/proto-elements-add-during-foreach.js2
-rw-r--r--deps/v8/test/mjsunit/regexp.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-105.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1119.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-115452.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1240.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1493017.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2132.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2250.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2315.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2339.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2451.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-252797.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2618.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3176.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-330046.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3408144.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-347914.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-353004.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3650-3.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-3709.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-385565.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-410912.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4380.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4665.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-475705.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4825.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5404.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5790.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5802.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-5902.js1
-rw-r--r--deps/v8/test/mjsunit/regress/regress-618608.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6248.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6280.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6288.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6298.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-6337.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-641091.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-645680.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-707066.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-709782.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-711165.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-716044.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-718285.js46
-rw-r--r--deps/v8/test/mjsunit/regress/regress-718891.js68
-rw-r--r--deps/v8/test/mjsunit/regress/regress-719380.js7
-rw-r--r--deps/v8/test/mjsunit/regress/regress-722978.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-725858.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-727218.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-conditional-position.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-157019.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-157520.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-244461.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-504787.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-506443.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-513507.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-554831.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-587068.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-594183.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-651403-global.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-707580.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-711166.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-712802.js12
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-714696.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-714872.js8
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-714971.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-714981.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-715151.js15
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-715404.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-715455.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-715862.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-716520.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-716804.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-716912.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-718779.js21
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-719479.js24
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-721835.js31
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-723132.js16
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-723455.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-736451.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-736575.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-738763.js25
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-740803.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-747979.js32
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-748539.js22
-rw-r--r--deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-map-invalidation-2.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-param-local-type.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-r4998.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-store-uncacheable.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-5697.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-712569.js20
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-02256.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-02256b.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-647649.js5
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-699485.js2
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-710844.js23
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-711203.js30
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-715216-a.js12
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-715216-b.js13
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-717056.js16
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-719175.js16
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-722445.js16
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-731351.js23
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-734108.js16
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regression-737069.js35
-rw-r--r--deps/v8/test/mjsunit/setters-on-elements.js6
-rw-r--r--deps/v8/test/mjsunit/shared-function-tier-up-turbo.js2
-rw-r--r--deps/v8/test/mjsunit/shift-for-integer-div.js2
-rw-r--r--deps/v8/test/mjsunit/shifts.js2
-rw-r--r--deps/v8/test/mjsunit/sin-cos.js2
-rw-r--r--deps/v8/test/mjsunit/skipping-inner-functions.js37
-rw-r--r--deps/v8/test/mjsunit/smi-mul-const.js2
-rw-r--r--deps/v8/test/mjsunit/smi-mul.js2
-rw-r--r--deps/v8/test/mjsunit/stack-traces.js2
-rw-r--r--deps/v8/test/mjsunit/string-case.js5
-rw-r--r--deps/v8/test/mjsunit/strong-rooted-literals.js2
-rw-r--r--deps/v8/test/mjsunit/type-profile/regress-707223.js8
-rw-r--r--deps/v8/test/mjsunit/unary-minus-deopt.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm-expr.js10
-rw-r--r--deps/v8/test/mjsunit/wasm/asm-wasm.js4
-rw-r--r--deps/v8/test/mjsunit/wasm/async-compile.js91
-rw-r--r--deps/v8/test/mjsunit/wasm/compilation-limits.js6
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-buffer.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-stress.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/huge-memory.js76
-rw-r--r--deps/v8/test/mjsunit/wasm/indirect-tables.js31
-rw-r--r--deps/v8/test/mjsunit/wasm/instantiate-module-basic.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/instantiate-run-basic.js20
-rw-r--r--deps/v8/test/mjsunit/wasm/js-api.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/wasm-api-overloading.js53
-rw-r--r--deps/v8/test/mkgrokdump/BUILD.gn27
-rw-r--r--deps/v8/test/mkgrokdump/DEPS3
-rw-r--r--deps/v8/test/mkgrokdump/README3
-rw-r--r--deps/v8/test/mkgrokdump/mkgrokdump.cc133
-rw-r--r--deps/v8/test/mkgrokdump/mkgrokdump.gyp46
-rw-r--r--deps/v8/test/mkgrokdump/mkgrokdump.isolate17
-rw-r--r--deps/v8/test/mkgrokdump/mkgrokdump.status10
-rw-r--r--deps/v8/test/mkgrokdump/testcfg.py49
-rw-r--r--deps/v8/test/mozilla/mozilla.status16
-rw-r--r--deps/v8/test/test262/local-tests/test/language/module-code/comment-single-line-html-close-comment-before-function.js16
-rw-r--r--deps/v8/test/test262/local-tests/test/language/module-code/comment-single-line-html-close-comment-newline-before-function.js15
-rw-r--r--deps/v8/test/test262/test262.status112
-rw-r--r--deps/v8/test/unittests/BUILD.gn36
-rw-r--r--deps/v8/test/unittests/asmjs/asm-scanner-unittest.cc23
-rw-r--r--deps/v8/test/unittests/asmjs/asm-types-unittest.cc (renamed from deps/v8/test/unittests/wasm/asm-types-unittest.cc)30
-rw-r--r--deps/v8/test/unittests/asmjs/switch-logic-unittest.cc (renamed from deps/v8/test/unittests/wasm/switch-logic-unittest.cc)0
-rw-r--r--deps/v8/test/unittests/base/bits-unittest.cc22
-rw-r--r--deps/v8/test/unittests/base/iterator-unittest.cc2
-rw-r--r--deps/v8/test/unittests/base/logging-unittest.cc15
-rw-r--r--deps/v8/test/unittests/base/platform/platform-unittest.cc103
-rw-r--r--deps/v8/test/unittests/char-predicates-unittest.cc5
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-job-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc2
-rw-r--r--deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc42
-rw-r--r--deps/v8/test/unittests/compiler/int64-lowering-unittest.cc6
-rw-r--r--deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc17
-rw-r--r--deps/v8/test/unittests/compiler/mips/OWNERS7
-rw-r--r--deps/v8/test/unittests/compiler/mips64/OWNERS7
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc20
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h2
-rw-r--r--deps/v8/test/unittests/heap/concurrent-marking-deque-unittest.cc57
-rw-r--r--deps/v8/test/unittests/heap/item-parallel-job-unittest.cc211
-rw-r--r--deps/v8/test/unittests/heap/spaces-unittest.cc6
-rw-r--r--deps/v8/test/unittests/heap/workstealing-marking-deque-unittest.cc33
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc166
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc11
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-node-unittest.cc (renamed from deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc)40
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc5
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-source-info-unittest.cc52
-rw-r--r--deps/v8/test/unittests/object-unittest.cc60
-rw-r--r--deps/v8/test/unittests/parser/preparser-unittest.cc41
-rw-r--r--deps/v8/test/unittests/test-helpers.cc (renamed from deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-helper.cc)2
-rw-r--r--deps/v8/test/unittests/test-helpers.h (renamed from deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-helper.h)6
-rw-r--r--deps/v8/test/unittests/unittests.gyp42
-rw-r--r--deps/v8/test/unittests/value-serializer-unittest.cc71
-rw-r--r--deps/v8/test/unittests/wasm/control-transfer-unittest.cc213
-rw-r--r--deps/v8/test/unittests/wasm/decoder-unittest.cc2
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc21
-rw-r--r--deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc10
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc70
-rw-r--r--deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc498
-rw-r--r--deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc2
-rw-r--r--deps/v8/test/wasm-spec-tests/OWNERS4
-rw-r--r--deps/v8/test/wasm-spec-tests/testcfg.py35
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha11
-rw-r--r--deps/v8/test/wasm-spec-tests/wasm-spec-tests.gyp26
-rw-r--r--deps/v8/test/wasm-spec-tests/wasm-spec-tests.isolate15
-rw-r--r--deps/v8/test/wasm-spec-tests/wasm-spec-tests.status37
-rw-r--r--deps/v8/test/webkit/webkit.status6
-rw-r--r--deps/v8/third_party/inspector_protocol/lib/Parser_cpp.template10
-rwxr-xr-xdeps/v8/tools/dev/gm.py30
-rw-r--r--deps/v8/tools/disasm.py6
-rwxr-xr-xdeps/v8/tools/eval_gc_time.sh1
-rw-r--r--deps/v8/tools/foozzie/testdata/failure_output.txt2
-rw-r--r--deps/v8/tools/foozzie/testdata/fuzz-123.js1
-rwxr-xr-xdeps/v8/tools/foozzie/v8_foozzie.py15
-rw-r--r--deps/v8/tools/foozzie/v8_foozzie_test.py9
-rw-r--r--deps/v8/tools/foozzie/v8_suppressions.py54
-rw-r--r--deps/v8/tools/gcmole/gcmole.lua2
-rw-r--r--deps/v8/tools/gdbinit25
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py60
-rwxr-xr-xdeps/v8/tools/get_byteorder.py17
-rwxr-xr-xdeps/v8/tools/grokdump.py22
-rw-r--r--deps/v8/tools/ic-processor.js2
-rwxr-xr-xdeps/v8/tools/ignition/linux_perf_report.py4
-rw-r--r--deps/v8/tools/link_clicker.extension/README.txt12
-rw-r--r--deps/v8/tools/link_clicker.extension/background.js74
-rw-r--r--deps/v8/tools/link_clicker.extension/content.js66
-rw-r--r--deps/v8/tools/link_clicker.extension/icon.pngbin0 -> 230 bytes
-rw-r--r--deps/v8/tools/link_clicker.extension/manifest.json21
-rw-r--r--deps/v8/tools/link_clicker.extension/popup.html50
-rw-r--r--deps/v8/tools/link_clicker.extension/popup.js53
-rw-r--r--deps/v8/tools/luci-go/linux64/isolate.sha12
-rw-r--r--deps/v8/tools/luci-go/mac64/isolate.sha12
-rw-r--r--deps/v8/tools/luci-go/win64/isolate.exe.sha12
-rw-r--r--deps/v8/tools/memory/lsan/suppressions.txt3
-rw-r--r--deps/v8/tools/perf_tests/chromium_revision1
-rwxr-xr-xdeps/v8/tools/plot-timer-events4
-rwxr-xr-xdeps/v8/tools/presubmit.py8
-rw-r--r--deps/v8/tools/profile.js9
-rw-r--r--deps/v8/tools/release/git_recipes.py8
-rwxr-xr-xdeps/v8/tools/release/update_node.py23
-rwxr-xr-xdeps/v8/tools/run-tests.py13
-rwxr-xr-xdeps/v8/tools/run_perf.py13
-rw-r--r--deps/v8/tools/testrunner/local/execution.py12
-rw-r--r--deps/v8/tools/testrunner/local/testsuite.py2
-rw-r--r--deps/v8/tools/testrunner/local/variants.py22
-rw-r--r--deps/v8/tools/testrunner/testrunner.isolate2
-rwxr-xr-xdeps/v8/tools/try_perf.py2
-rw-r--r--deps/v8/tools/turbolizer/disassembly-view.js2
-rwxr-xr-xdeps/v8/tools/v8-info.sh161
-rw-r--r--deps/v8/tools/v8heapconst.py448
-rw-r--r--deps/v8/tools/v8heapconst.py.tmpl30
-rwxr-xr-xdeps/v8/tools/verify_source_deps.py6
-rwxr-xr-xdeps/v8/tools/wasm/update-wasm-fuzzers.sh (renamed from deps/v8/tools/update-wasm-fuzzers.sh)36
-rwxr-xr-xdeps/v8/tools/wasm/update-wasm-spec-tests.sh30
-rw-r--r--deps/v8/tools/whitespace.txt2
1529 files changed, 61331 insertions, 41237 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index f85d0d30b1..bcec3768f3 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -45,10 +45,10 @@
/src/inspector/build/closure-compiler
/src/inspector/build/closure-compiler.tar.gz
/test/benchmarks/data
-/test/fuzzer/wasm
-/test/fuzzer/wasm.tar.gz
-/test/fuzzer/wasm_asmjs
-/test/fuzzer/wasm_asmjs.tar.gz
+/test/fuzzer/wasm_corpus
+/test/fuzzer/wasm_corpus.tar.gz
+/test/fuzzer/wasm_asmjs_corpus
+/test/fuzzer/wasm_asmjs_corpus.tar.gz
/test/mozilla/data
/test/promises-aplus/promises-tests
/test/promises-aplus/promises-tests.tar.gz
@@ -57,6 +57,8 @@
/test/test262/data.tar
/test/test262/harness
/test/wasm-js
+/test/wasm-spec-tests/tests
+/test/wasm-spec-tests/tests.tar.gz
/testing/gmock
/testing/gtest/*
!/testing/gtest/include
diff --git a/deps/v8/.gn b/deps/v8/.gn
index b3adcc74cd..c80980ea09 100644
--- a/deps/v8/.gn
+++ b/deps/v8/.gn
@@ -21,7 +21,5 @@ check_targets = []
# These are the list of GN files that run exec_script. This whitelist exists
# to force additional review for new uses of exec_script, which is strongly
# discouraged except for gypi_to_gn calls.
-exec_script_whitelist = build_dotfile_settings.exec_script_whitelist + [
- "//test/test262/BUILD.gn",
- "//BUILD.gn",
- ]
+exec_script_whitelist =
+ build_dotfile_settings.exec_script_whitelist + [ "//test/test262/BUILD.gn" ]
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index 27882149a4..07cf319144 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -70,6 +70,7 @@ Hirofumi Mako <mkhrfm@gmail.com>
Honggyu Kim <honggyu.kp@gmail.com>
Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
Isiah Meadows <impinball@gmail.com>
+Jaime Bernardo <jaime@janeasystems.com>
Jan de Mooij <jandemooij@gmail.com>
Jay Freeman <saurik@saurik.com>
James Pike <g00gle@chilon.net>
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index becf4177c1..80d6f34133 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -5,6 +5,7 @@
import("//build/config/android/config.gni")
import("//build/config/arm.gni")
import("//build/config/dcheck_always_on.gni")
+import("//build/config/host_byteorder.gni")
import("//build/config/mips.gni")
import("//build/config/sanitizers/sanitizers.gni")
@@ -69,14 +70,17 @@ declare_args() {
# Sets -dOBJECT_PRINT.
v8_enable_object_print = ""
- # Sets -dTRACE_MAPS.
+ # Sets -dV8_TRACE_MAPS.
v8_enable_trace_maps = ""
# Sets -dV8_ENABLE_CHECKS.
v8_enable_v8_checks = ""
- # Builds the snapshot with --trace-ignition
- v8_trace_ignition = false
+ # Sets -dV8_TRACE_IGNITION.
+ v8_enable_trace_ignition = false
+
+ # Sets -dV8_CONCURRENT_MARKING
+ v8_enable_concurrent_marking = false
# With post mortem support enabled, metadata is embedded into libv8 that
# describes various parameters of the VM for use by debuggers. See
@@ -110,19 +114,6 @@ declare_args() {
v8_enable_gdbjit = ((v8_current_cpu == "x86" || v8_current_cpu == "x64" ||
v8_current_cpu == "x87") && (is_linux || is_mac)) ||
(v8_current_cpu == "ppc64" && is_linux)
-
- # Set v8_host_byteorder
- v8_host_byteorder = "little"
-
- # ppc64 can be either BE or LE
- if (host_cpu == "ppc64") {
- v8_host_byteorder =
- exec_script("//tools/get_byteorder.py", [], "trim string")
- }
- if (host_cpu == "ppc" || host_cpu == "s390" || host_cpu == "s390x" ||
- host_cpu == "mips" || host_cpu == "mips64") {
- v8_host_byteorder = "big"
- }
}
# Derived defaults.
@@ -247,7 +238,10 @@ config("features") {
defines += [ "VERIFY_PREDICTABLE" ]
}
if (v8_enable_trace_maps) {
- defines += [ "TRACE_MAPS" ]
+ defines += [ "V8_TRACE_MAPS" ]
+ }
+ if (v8_enable_trace_ignition) {
+ defines += [ "V8_TRACE_IGNITION" ]
}
if (v8_enable_v8_checks) {
defines += [ "V8_ENABLE_CHECKS" ]
@@ -262,7 +256,7 @@ config("features") {
defines += [ "V8_IMMINENT_DEPRECATION_WARNINGS" ]
}
if (v8_enable_i18n_support) {
- defines += [ "V8_I18N_SUPPORT" ]
+ defines += [ "V8_INTL_SUPPORT" ]
}
if (v8_enable_handle_zapping) {
defines += [ "ENABLE_HANDLE_ZAPPING" ]
@@ -273,6 +267,9 @@ config("features") {
if (v8_use_external_startup_data) {
defines += [ "V8_USE_EXTERNAL_STARTUP_DATA" ]
}
+ if (v8_enable_concurrent_marking) {
+ defines += [ "V8_CONCURRENT_MARKING" ]
+ }
}
config("toolchain") {
@@ -386,7 +383,7 @@ config("toolchain") {
if (v8_current_cpu == "s390x") {
defines += [ "V8_TARGET_ARCH_S390X" ]
}
- if (v8_host_byteorder == "little") {
+ if (host_byteorder == "little") {
defines += [ "V8_TARGET_ARCH_S390_LE_SIM" ]
} else {
cflags += [ "-march=z196" ]
@@ -397,9 +394,9 @@ config("toolchain") {
if (v8_current_cpu == "ppc64") {
defines += [ "V8_TARGET_ARCH_PPC64" ]
}
- if (v8_host_byteorder == "little") {
+ if (host_byteorder == "little") {
defines += [ "V8_TARGET_ARCH_PPC_LE" ]
- } else if (v8_host_byteorder == "big") {
+ } else if (host_byteorder == "big") {
defines += [ "V8_TARGET_ARCH_PPC_BE" ]
if (current_os == "aix") {
cflags += [
@@ -413,6 +410,7 @@ config("toolchain") {
}
}
}
+
if (v8_current_cpu == "x86") {
defines += [ "V8_TARGET_ARCH_IA32" ]
if (is_win) {
@@ -487,6 +485,15 @@ config("toolchain") {
"/wd4800", # Forcing value to bool.
]
}
+
+ if (!is_clang && !is_win) {
+ cflags += [
+ # Disable gcc warnings for optimizations based on the assumption that
+ # signed overflow does not occur. Generates false positives (see
+ # http://crbug.com/v8/6341).
+ "-Wno-strict-overflow",
+ ]
+ }
}
###############################################################################
@@ -509,7 +516,7 @@ action("js2c") {
"src/js/macros.py",
"src/messages.h",
"src/js/prologue.js",
- "src/js/runtime.js",
+ "src/js/max-min.js",
"src/js/v8natives.js",
"src/js/array.js",
"src/js/string.js",
@@ -522,7 +529,6 @@ action("js2c") {
"src/js/templates.js",
"src/js/spread.js",
"src/js/proxy.js",
- "src/js/harmony-string-padding.js",
"src/debug/mirrors.js",
"src/debug/debug.js",
"src/debug/liveedit.js",
@@ -533,7 +539,7 @@ action("js2c") {
]
if (v8_enable_i18n_support) {
- sources += [ "src/js/i18n.js" ]
+ sources += [ "src/js/intl.js" ]
}
args = [
@@ -695,6 +701,8 @@ action("postmortem-metadata") {
sources = [
"src/objects.h",
"src/objects-inl.h",
+ "src/objects/map.h",
+ "src/objects/map-inl.h",
]
outputs = [
@@ -742,10 +750,6 @@ action("run_mksnapshot") {
]
}
- if (v8_trace_ignition) {
- args += [ "--trace-ignition" ]
- }
-
if (v8_use_external_startup_data) {
outputs += [ "$root_out_dir/snapshot_blob.bin" ]
args += [
@@ -885,7 +889,6 @@ v8_source_set("v8_builtins_generators") {
visibility = [
":*",
"test/cctest:*",
- "test/unittests:*",
]
deps = [
@@ -904,6 +907,7 @@ v8_source_set("v8_builtins_generators") {
"src/builtins/builtins-async-iterator-gen.cc",
"src/builtins/builtins-boolean-gen.cc",
"src/builtins/builtins-call-gen.cc",
+ "src/builtins/builtins-console-gen.cc",
"src/builtins/builtins-constructor-gen.cc",
"src/builtins/builtins-constructor-gen.h",
"src/builtins/builtins-constructor.h",
@@ -918,6 +922,7 @@ v8_source_set("v8_builtins_generators") {
"src/builtins/builtins-ic-gen.cc",
"src/builtins/builtins-internal-gen.cc",
"src/builtins/builtins-interpreter-gen.cc",
+ "src/builtins/builtins-intl-gen.cc",
"src/builtins/builtins-math-gen.cc",
"src/builtins/builtins-number-gen.cc",
"src/builtins/builtins-object-gen.cc",
@@ -927,6 +932,7 @@ v8_source_set("v8_builtins_generators") {
"src/builtins/builtins-regexp-gen.h",
"src/builtins/builtins-sharedarraybuffer-gen.cc",
"src/builtins/builtins-string-gen.cc",
+ "src/builtins/builtins-string-gen.h",
"src/builtins/builtins-symbol-gen.cc",
"src/builtins/builtins-typedarray-gen.cc",
"src/builtins/builtins-utils-gen.h",
@@ -995,6 +1001,10 @@ v8_source_set("v8_builtins_generators") {
]
}
+ if (!v8_enable_i18n_support) {
+ sources -= [ "src/builtins/builtins-intl-gen.cc" ]
+ }
+
configs = [ ":internal_config" ]
}
@@ -1019,11 +1029,27 @@ v8_header_set("v8_version") {
configs = [ ":internal_config" ]
sources = [
+ "include/v8-value-serializer-version.h",
"include/v8-version-string.h",
"include/v8-version.h",
]
}
+# This is split out to be a non-code containing target that the Chromium browser
+# can depend upon to get basic v8 types.
+v8_header_set("v8_headers") {
+ configs = [ ":internal_config" ]
+
+ sources = [
+ "include/v8.h",
+ "include/v8config.h",
+ ]
+
+ deps = [
+ ":v8_version",
+ ]
+}
+
v8_source_set("v8_base") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@@ -1064,12 +1090,8 @@ v8_source_set("v8_base") {
"src/asmjs/asm-parser.h",
"src/asmjs/asm-scanner.cc",
"src/asmjs/asm-scanner.h",
- "src/asmjs/asm-typer.cc",
- "src/asmjs/asm-typer.h",
"src/asmjs/asm-types.cc",
"src/asmjs/asm-types.h",
- "src/asmjs/asm-wasm-builder.cc",
- "src/asmjs/asm-wasm-builder.h",
"src/asmjs/switch-logic.cc",
"src/asmjs/switch-logic.h",
"src/assembler-inl.h",
@@ -1123,6 +1145,7 @@ v8_source_set("v8_base") {
"src/builtins/builtins-boolean.cc",
"src/builtins/builtins-call.cc",
"src/builtins/builtins-callsite.cc",
+ "src/builtins/builtins-console.cc",
"src/builtins/builtins-constructor.h",
"src/builtins/builtins-dataview.cc",
"src/builtins/builtins-date.cc",
@@ -1558,6 +1581,7 @@ v8_source_set("v8_base") {
"src/heap/array-buffer-tracker.h",
"src/heap/code-stats.cc",
"src/heap/code-stats.h",
+ "src/heap/concurrent-marking-deque.h",
"src/heap/concurrent-marking.cc",
"src/heap/concurrent-marking.h",
"src/heap/embedder-tracing.cc",
@@ -1574,6 +1598,7 @@ v8_source_set("v8_base") {
"src/heap/incremental-marking-job.h",
"src/heap/incremental-marking.cc",
"src/heap/incremental-marking.h",
+ "src/heap/item-parallel-job.h",
"src/heap/mark-compact-inl.h",
"src/heap/mark-compact.cc",
"src/heap/mark-compact.h",
@@ -1592,14 +1617,15 @@ v8_source_set("v8_base") {
"src/heap/scavenger-inl.h",
"src/heap/scavenger.cc",
"src/heap/scavenger.h",
+ "src/heap/sequential-marking-deque.cc",
+ "src/heap/sequential-marking-deque.h",
"src/heap/slot-set.h",
"src/heap/spaces-inl.h",
"src/heap/spaces.cc",
"src/heap/spaces.h",
"src/heap/store-buffer.cc",
"src/heap/store-buffer.h",
- "src/i18n.cc",
- "src/i18n.h",
+ "src/heap/workstealing-marking-deque.h",
"src/ic/access-compiler-data.h",
"src/ic/access-compiler.cc",
"src/ic/access-compiler.h",
@@ -1640,17 +1666,20 @@ v8_source_set("v8_base") {
"src/interpreter/bytecode-flags.h",
"src/interpreter/bytecode-generator.cc",
"src/interpreter/bytecode-generator.h",
+ "src/interpreter/bytecode-jump-table.h",
"src/interpreter/bytecode-label.cc",
"src/interpreter/bytecode-label.h",
+ "src/interpreter/bytecode-node.cc",
+ "src/interpreter/bytecode-node.h",
"src/interpreter/bytecode-operands.cc",
"src/interpreter/bytecode-operands.h",
- "src/interpreter/bytecode-pipeline.cc",
- "src/interpreter/bytecode-pipeline.h",
"src/interpreter/bytecode-register-allocator.h",
"src/interpreter/bytecode-register-optimizer.cc",
"src/interpreter/bytecode-register-optimizer.h",
"src/interpreter/bytecode-register.cc",
"src/interpreter/bytecode-register.h",
+ "src/interpreter/bytecode-source-info.cc",
+ "src/interpreter/bytecode-source-info.h",
"src/interpreter/bytecode-traits.h",
"src/interpreter/bytecodes.cc",
"src/interpreter/bytecodes.h",
@@ -1665,6 +1694,8 @@ v8_source_set("v8_base") {
"src/interpreter/interpreter-intrinsics.h",
"src/interpreter/interpreter.cc",
"src/interpreter/interpreter.h",
+ "src/intl.cc",
+ "src/intl.h",
"src/isolate-inl.h",
"src/isolate.cc",
"src/isolate.h",
@@ -1717,10 +1748,13 @@ v8_source_set("v8_base") {
"src/objects/dictionary.h",
"src/objects/frame-array-inl.h",
"src/objects/frame-array.h",
- "src/objects/hash-table-inl.h",
"src/objects/hash-table.h",
+ "src/objects/intl-objects.cc",
+ "src/objects/intl-objects.h",
"src/objects/literal-objects.cc",
"src/objects/literal-objects.h",
+ "src/objects/map-inl.h",
+ "src/objects/map.h",
"src/objects/module-info.h",
"src/objects/object-macros-undef.h",
"src/objects/object-macros.h",
@@ -1833,9 +1867,9 @@ v8_source_set("v8_base") {
"src/runtime/runtime-function.cc",
"src/runtime/runtime-futex.cc",
"src/runtime/runtime-generator.cc",
- "src/runtime/runtime-i18n.cc",
"src/runtime/runtime-internal.cc",
"src/runtime/runtime-interpreter.cc",
+ "src/runtime/runtime-intl.cc",
"src/runtime/runtime-literals.cc",
"src/runtime/runtime-liveedit.cc",
"src/runtime/runtime-maths.cc",
@@ -1891,6 +1925,8 @@ v8_source_set("v8_base") {
"src/string-builder.h",
"src/string-case.cc",
"src/string-case.h",
+ "src/string-hasher-inl.h",
+ "src/string-hasher.h",
"src/string-search.h",
"src/string-stream.cc",
"src/string-stream.h",
@@ -1935,6 +1971,8 @@ v8_source_set("v8_base") {
"src/vector.h",
"src/version.cc",
"src/version.h",
+ "src/visitors.cc",
+ "src/visitors.h",
"src/vm-state-inl.h",
"src/vm-state.h",
"src/wasm/decoder.h",
@@ -1942,10 +1980,14 @@ v8_source_set("v8_base") {
"src/wasm/function-body-decoder.cc",
"src/wasm/function-body-decoder.h",
"src/wasm/leb-helper.h",
+ "src/wasm/local-decl-encoder.cc",
+ "src/wasm/local-decl-encoder.h",
"src/wasm/module-decoder.cc",
"src/wasm/module-decoder.h",
"src/wasm/signature-map.cc",
"src/wasm/signature-map.h",
+ "src/wasm/streaming-decoder.cc",
+ "src/wasm/streaming-decoder.h",
"src/wasm/wasm-code-specialization.cc",
"src/wasm/wasm-code-specialization.h",
"src/wasm/wasm-debug.cc",
@@ -1956,7 +1998,6 @@ v8_source_set("v8_base") {
"src/wasm/wasm-js.cc",
"src/wasm/wasm-js.h",
"src/wasm/wasm-limits.h",
- "src/wasm/wasm-macro-gen.h",
"src/wasm/wasm-module-builder.cc",
"src/wasm/wasm-module-builder.h",
"src/wasm/wasm-module.cc",
@@ -2363,21 +2404,27 @@ v8_source_set("v8_base") {
defines = []
deps = [
+ ":v8_headers",
":v8_libbase",
":v8_libsampler",
- ":v8_version",
"src/inspector:inspector",
]
if (v8_enable_i18n_support) {
- deps += [ "//third_party/icu" ]
+ public_deps = [
+ "//third_party/icu",
+ ]
if (is_win) {
deps += [ "//third_party/icu:icudata" ]
}
} else {
sources -= [
- "src/i18n.cc",
- "src/i18n.h",
+ "src/builtins/builtins-intl.cc",
+ "src/intl.cc",
+ "src/intl.h",
+ "src/objects/intl-objects.cc",
+ "src/objects/intl-objects.h",
+ "src/runtime/runtime-intl.cc",
]
}
@@ -2406,6 +2453,7 @@ v8_component("v8_libbase") {
"src/base/debug/stack_trace.h",
"src/base/division-by-constant.cc",
"src/base/division-by-constant.h",
+ "src/base/export-template.h",
"src/base/file-utils.cc",
"src/base/file-utils.h",
"src/base/flags.h",
@@ -2473,6 +2521,16 @@ v8_component("v8_libbase") {
"dl",
"rt",
]
+ } else if (current_os == "aix") {
+ sources += [
+ "src/base/debug/stack_trace_posix.cc",
+ "src/base/platform/platform-aix.cc",
+ ]
+
+ libs = [
+ "dl",
+ "rt",
+ ]
} else if (is_android) {
if (current_toolchain == host_toolchain) {
libs = [
@@ -2589,10 +2647,6 @@ v8_source_set("fuzzer_support") {
":v8_libbase",
":v8_libplatform",
]
-
- if (v8_enable_i18n_support) {
- deps += [ "//third_party/icu" ]
- }
}
###############################################################################
@@ -2615,7 +2669,7 @@ if (current_toolchain == v8_snapshot_toolchain) {
":v8_libbase",
":v8_libplatform",
":v8_nosnapshot",
- "//build/config/sanitizers:deps",
+ "//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
]
}
@@ -2668,6 +2722,12 @@ group("v8_clusterfuzz") {
}
}
+group("v8_archive") {
+ deps = [
+ ":d8",
+ ]
+}
+
group("v8_fuzzers") {
testonly = true
deps = [
@@ -2677,6 +2737,7 @@ group("v8_fuzzers") {
":v8_simple_wasm_asmjs_fuzzer",
":v8_simple_wasm_call_fuzzer",
":v8_simple_wasm_code_fuzzer",
+ ":v8_simple_wasm_compile_fuzzer",
":v8_simple_wasm_data_section_fuzzer",
":v8_simple_wasm_function_sigs_section_fuzzer",
":v8_simple_wasm_fuzzer",
@@ -2707,6 +2768,31 @@ if (is_component_build) {
public_configs = [ ":external_config" ]
}
+
+ v8_component("v8_for_testing") {
+ testonly = true
+
+ sources = [
+ "src/v8dll-main.cc",
+ ]
+
+ deps = [
+ ":v8_dump_build_config",
+ ]
+
+ public_deps = [
+ ":v8_base",
+ ":v8_maybe_snapshot",
+ ]
+
+ if (v8_use_snapshot) {
+ public_deps += [ ":v8_builtins_generators" ]
+ }
+
+ configs = [ ":internal_config" ]
+
+ public_configs = [ ":external_config" ]
+ }
} else {
group("v8") {
deps = [
@@ -2720,11 +2806,32 @@ if (is_component_build) {
public_configs = [ ":external_config" ]
}
+
+ group("v8_for_testing") {
+ testonly = true
+
+ deps = [
+ ":v8_dump_build_config",
+ ]
+
+ public_deps = [
+ ":v8_base",
+ ":v8_maybe_snapshot",
+ ]
+
+ if (v8_use_snapshot) {
+ public_deps += [ ":v8_builtins_generators" ]
+ }
+
+ public_configs = [ ":external_config" ]
+ }
}
v8_executable("d8") {
sources = [
"$target_gen_dir/d8-js.cc",
+ "src/d8-console.cc",
+ "src/d8-console.h",
"src/d8.cc",
"src/d8.h",
]
@@ -2741,7 +2848,7 @@ v8_executable("d8") {
":v8",
":v8_libbase",
":v8_libplatform",
- "//build/config/sanitizers:deps",
+ "//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
]
@@ -2751,10 +2858,6 @@ v8_executable("d8") {
sources += [ "src/d8-windows.cc" ]
}
- if (v8_enable_i18n_support) {
- deps += [ "//third_party/icu" ]
- }
-
if (v8_correctness_fuzzer) {
deps += [ "tools/foozzie:v8_correctness_fuzzer_resources" ]
}
@@ -2790,13 +2893,9 @@ v8_executable("v8_hello_world") {
":v8",
":v8_libbase",
":v8_libplatform",
- "//build/config/sanitizers:deps",
+ "//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
]
-
- if (v8_enable_i18n_support) {
- deps += [ "//third_party/icu" ]
- }
}
v8_executable("v8_sample_process") {
@@ -2815,13 +2914,9 @@ v8_executable("v8_sample_process") {
":v8",
":v8_libbase",
":v8_libplatform",
- "//build/config/sanitizers:deps",
+ "//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
]
-
- if (v8_enable_i18n_support) {
- deps += [ "//third_party/icu" ]
- }
}
v8_executable("v8_parser_shell") {
@@ -2839,13 +2934,9 @@ v8_executable("v8_parser_shell") {
":v8",
":v8_libbase",
":v8_libplatform",
- "//build/config/sanitizers:deps",
+ "//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
]
-
- if (v8_enable_i18n_support) {
- deps += [ "//third_party/icu" ]
- }
}
if (want_v8_shell) {
@@ -2865,13 +2956,9 @@ if (want_v8_shell) {
":v8",
":v8_libbase",
":v8_libplatform",
- "//build/config/sanitizers:deps",
+ "//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
]
-
- if (v8_enable_i18n_support) {
- deps += [ "//third_party/icu" ]
- }
}
}
@@ -2881,7 +2968,7 @@ template("v8_fuzzer") {
v8_executable("v8_simple_" + name) {
deps = [
":" + name,
- "//build/config/sanitizers:deps",
+ "//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
]
@@ -2966,6 +3053,7 @@ v8_source_set("wasm_fuzzer") {
deps = [
":fuzzer_support",
+ ":lib_wasm_fuzzer_common",
":wasm_module_runner",
]
@@ -2985,6 +3073,7 @@ v8_source_set("wasm_asmjs_fuzzer") {
deps = [
":fuzzer_support",
+ ":lib_wasm_fuzzer_common",
":wasm_module_runner",
]
@@ -3005,6 +3094,7 @@ v8_source_set("wasm_code_fuzzer") {
deps = [
":fuzzer_support",
+ ":lib_wasm_fuzzer_common",
":wasm_module_runner",
]
@@ -3025,6 +3115,7 @@ v8_source_set("wasm_call_fuzzer") {
deps = [
":fuzzer_support",
+ ":lib_wasm_fuzzer_common",
":wasm_module_runner",
]
@@ -3037,10 +3128,10 @@ v8_source_set("wasm_call_fuzzer") {
v8_fuzzer("wasm_call_fuzzer") {
}
-v8_source_set("lib_wasm_section_fuzzer") {
+v8_source_set("lib_wasm_fuzzer_common") {
sources = [
- "test/fuzzer/wasm-section-fuzzers.cc",
- "test/fuzzer/wasm-section-fuzzers.h",
+ "test/fuzzer/wasm-fuzzer-common.cc",
+ "test/fuzzer/wasm-fuzzer-common.h",
]
configs = [
@@ -3056,7 +3147,7 @@ v8_source_set("wasm_types_section_fuzzer") {
deps = [
":fuzzer_support",
- ":lib_wasm_section_fuzzer",
+ ":lib_wasm_fuzzer_common",
":wasm_module_runner",
]
@@ -3076,7 +3167,7 @@ v8_source_set("wasm_names_section_fuzzer") {
deps = [
":fuzzer_support",
- ":lib_wasm_section_fuzzer",
+ ":lib_wasm_fuzzer_common",
":wasm_module_runner",
]
@@ -3096,7 +3187,7 @@ v8_source_set("wasm_globals_section_fuzzer") {
deps = [
":fuzzer_support",
- ":lib_wasm_section_fuzzer",
+ ":lib_wasm_fuzzer_common",
":wasm_module_runner",
]
@@ -3116,7 +3207,7 @@ v8_source_set("wasm_imports_section_fuzzer") {
deps = [
":fuzzer_support",
- ":lib_wasm_section_fuzzer",
+ ":lib_wasm_fuzzer_common",
":wasm_module_runner",
]
@@ -3136,7 +3227,7 @@ v8_source_set("wasm_function_sigs_section_fuzzer") {
deps = [
":fuzzer_support",
- ":lib_wasm_section_fuzzer",
+ ":lib_wasm_fuzzer_common",
":wasm_module_runner",
]
@@ -3156,7 +3247,7 @@ v8_source_set("wasm_memory_section_fuzzer") {
deps = [
":fuzzer_support",
- ":lib_wasm_section_fuzzer",
+ ":lib_wasm_fuzzer_common",
":wasm_module_runner",
]
@@ -3176,7 +3267,7 @@ v8_source_set("wasm_data_section_fuzzer") {
deps = [
":fuzzer_support",
- ":lib_wasm_section_fuzzer",
+ ":lib_wasm_fuzzer_common",
":wasm_module_runner",
]
@@ -3197,6 +3288,7 @@ v8_source_set("wasm_compile_fuzzer") {
deps = [
":fuzzer_support",
+ ":lib_wasm_fuzzer_common",
":wasm_module_runner",
]
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 515e5b830e..7ee1b37e79 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,1493 @@
+2017-05-20: Version 6.0.286
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-19: Version 6.0.285
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-19: Version 6.0.284
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-19: Version 6.0.283
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-19: Version 6.0.282
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-19: Version 6.0.281
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-19: Version 6.0.280
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-19: Version 6.0.279
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-19: Version 6.0.278
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-19: Version 6.0.277
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-19: Version 6.0.276
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-19: Version 6.0.275
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-19: Version 6.0.274
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-19: Version 6.0.273
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-19: Version 6.0.272
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-19: Version 6.0.271
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-18: Version 6.0.270
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-18: Version 6.0.269
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-18: Version 6.0.268
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-18: Version 6.0.267
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-18: Version 6.0.266
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-18: Version 6.0.265
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-18: Version 6.0.264
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-18: Version 6.0.263
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-18: Version 6.0.262
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-18: Version 6.0.261
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-18: Version 6.0.260
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-18: Version 6.0.259
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-18: Version 6.0.258
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-18: Version 6.0.257
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-18: Version 6.0.256
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-18: Version 6.0.255
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-18: Version 6.0.254
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-17: Version 6.0.253
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-17: Version 6.0.252
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-17: Version 6.0.251
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-17: Version 6.0.250
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-17: Version 6.0.249
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-17: Version 6.0.248
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-17: Version 6.0.247
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-17: Version 6.0.246
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-17: Version 6.0.245
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-17: Version 6.0.244
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-17: Version 6.0.243
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-17: Version 6.0.242
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-17: Version 6.0.241
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-17: Version 6.0.240
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-16: Version 6.0.239
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-16: Version 6.0.238
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-16: Version 6.0.237
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-16: Version 6.0.236
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-16: Version 6.0.235
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-16: Version 6.0.234
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-16: Version 6.0.233
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-16: Version 6.0.232
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-16: Version 6.0.231
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-16: Version 6.0.230
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-16: Version 6.0.229
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-16: Version 6.0.228
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-16: Version 6.0.227
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-15: Version 6.0.226
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-15: Version 6.0.225
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-15: Version 6.0.224
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-15: Version 6.0.223
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-15: Version 6.0.222
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-15: Version 6.0.221
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-15: Version 6.0.220
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-15: Version 6.0.219
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-15: Version 6.0.218
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-15: Version 6.0.217
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-15: Version 6.0.216
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-15: Version 6.0.215
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-15: Version 6.0.214
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-15: Version 6.0.213
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-15: Version 6.0.212
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-15: Version 6.0.211
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-15: Version 6.0.210
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-15: Version 6.0.209
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-15: Version 6.0.208
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-15: Version 6.0.207
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-14: Version 6.0.206
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-13: Version 6.0.205
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-12: Version 6.0.204
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-12: Version 6.0.203
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-12: Version 6.0.202
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-12: Version 6.0.201
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-12: Version 6.0.200
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-12: Version 6.0.199
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-12: Version 6.0.198
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-11: Version 6.0.197
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-11: Version 6.0.196
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-11: Version 6.0.195
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-11: Version 6.0.194
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-11: Version 6.0.193
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-10: Version 6.0.192
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-10: Version 6.0.191
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-10: Version 6.0.190
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-10: Version 6.0.189
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-10: Version 6.0.188
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-09: Version 6.0.187
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-08: Version 6.0.186
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-08: Version 6.0.185
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-08: Version 6.0.184
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-08: Version 6.0.183
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-08: Version 6.0.182
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-08: Version 6.0.181
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-08: Version 6.0.180
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-08: Version 6.0.179
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-08: Version 6.0.178
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-06: Version 6.0.177
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-06: Version 6.0.176
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-06: Version 6.0.175
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-05: Version 6.0.174
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-05: Version 6.0.173
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-05: Version 6.0.172
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-05: Version 6.0.171
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-05: Version 6.0.170
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-05: Version 6.0.169
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-05: Version 6.0.168
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-05: Version 6.0.167
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-05: Version 6.0.166
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-05: Version 6.0.165
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-05: Version 6.0.164
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-05: Version 6.0.163
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-05: Version 6.0.162
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-05: Version 6.0.161
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-05: Version 6.0.160
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-05: Version 6.0.159
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-05: Version 6.0.158
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-05: Version 6.0.157
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-04: Version 6.0.156
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-04: Version 6.0.155
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-04: Version 6.0.154
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-04: Version 6.0.153
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-04: Version 6.0.152
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-04: Version 6.0.151
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-03: Version 6.0.150
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-03: Version 6.0.149
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-03: Version 6.0.148
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-03: Version 6.0.147
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-03: Version 6.0.146
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-03: Version 6.0.145
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-03: Version 6.0.144
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-03: Version 6.0.143
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-03: Version 6.0.142
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-02: Version 6.0.141
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-02: Version 6.0.140
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-02: Version 6.0.139
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-02: Version 6.0.138
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-02: Version 6.0.137
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-02: Version 6.0.136
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-02: Version 6.0.135
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-02: Version 6.0.134
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-02: Version 6.0.133
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-02: Version 6.0.132
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-02: Version 6.0.131
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-02: Version 6.0.130
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-01: Version 6.0.129
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-01: Version 6.0.128
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-01: Version 6.0.127
+
+ Performance and stability improvements on all platforms.
+
+
+2017-05-01: Version 6.0.126
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-30: Version 6.0.125
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-30: Version 6.0.124
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-29: Version 6.0.123
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-29: Version 6.0.122
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-29: Version 6.0.121
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-29: Version 6.0.120
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-29: Version 6.0.119
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-28: Version 6.0.118
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-28: Version 6.0.117
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-28: Version 6.0.116
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-28: Version 6.0.115
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-28: Version 6.0.114
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-28: Version 6.0.113
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-28: Version 6.0.112
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-28: Version 6.0.111
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-28: Version 6.0.110
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-28: Version 6.0.109
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-28: Version 6.0.108
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-27: Version 6.0.107
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-27: Version 6.0.106
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-27: Version 6.0.105
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-27: Version 6.0.104
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-27: Version 6.0.103
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-27: Version 6.0.102
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-27: Version 6.0.101
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-27: Version 6.0.100
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-27: Version 6.0.99
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-27: Version 6.0.98
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-27: Version 6.0.97
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-27: Version 6.0.96
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-27: Version 6.0.95
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-27: Version 6.0.94
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-27: Version 6.0.93
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-27: Version 6.0.92
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-27: Version 6.0.91
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-26: Version 6.0.90
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-26: Version 6.0.89
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-26: Version 6.0.88
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-26: Version 6.0.87
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-26: Version 6.0.86
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-25: Version 6.0.85
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-25: Version 6.0.84
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-25: Version 6.0.83
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-25: Version 6.0.82
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-25: Version 6.0.81
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-25: Version 6.0.80
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-24: Version 6.0.79
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-24: Version 6.0.78
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-24: Version 6.0.77
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-24: Version 6.0.76
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-24: Version 6.0.75
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-24: Version 6.0.74
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-24: Version 6.0.73
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-24: Version 6.0.72
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-24: Version 6.0.71
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-24: Version 6.0.70
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-24: Version 6.0.69
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-24: Version 6.0.68
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-24: Version 6.0.67
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-24: Version 6.0.66
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-24: Version 6.0.65
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-24: Version 6.0.64
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-24: Version 6.0.63
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-24: Version 6.0.62
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-22: Version 6.0.61
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-21: Version 6.0.60
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-21: Version 6.0.59
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-21: Version 6.0.58
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-21: Version 6.0.57
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-21: Version 6.0.56
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-21: Version 6.0.55
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-21: Version 6.0.54
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-21: Version 6.0.53
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-21: Version 6.0.52
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-20: Version 6.0.51
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-20: Version 6.0.50
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-20: Version 6.0.49
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-20: Version 6.0.48
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-20: Version 6.0.47
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-20: Version 6.0.46
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-20: Version 6.0.45
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-20: Version 6.0.44
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-20: Version 6.0.43
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-20: Version 6.0.42
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-19: Version 6.0.41
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-19: Version 6.0.40
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-19: Version 6.0.39
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-19: Version 6.0.38
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-19: Version 6.0.37
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-19: Version 6.0.36
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-19: Version 6.0.35
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-19: Version 6.0.34
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-19: Version 6.0.33
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-19: Version 6.0.32
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-19: Version 6.0.31
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-19: Version 6.0.30
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-19: Version 6.0.29
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-19: Version 6.0.28
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-19: Version 6.0.27
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-19: Version 6.0.26
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-19: Version 6.0.25
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-19: Version 6.0.24
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-18: Version 6.0.23
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-18: Version 6.0.22
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-18: Version 6.0.21
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-18: Version 6.0.20
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-18: Version 6.0.19
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-18: Version 6.0.18
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-18: Version 6.0.17
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-18: Version 6.0.16
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-18: Version 6.0.15
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-18: Version 6.0.14
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-18: Version 6.0.13
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-18: Version 6.0.12
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-18: Version 6.0.11
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-17: Version 6.0.10
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-17: Version 6.0.9
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-17: Version 6.0.8
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-17: Version 6.0.7
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-17: Version 6.0.6
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-17: Version 6.0.5
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-16: Version 6.0.4
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-15: Version 6.0.3
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-15: Version 6.0.2
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-14: Version 6.0.1
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-13: Version 5.9.223
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-13: Version 5.9.222
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-13: Version 5.9.221
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-12: Version 5.9.220
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-12: Version 5.9.219
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-12: Version 5.9.218
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-12: Version 5.9.217
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-12: Version 5.9.216
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-12: Version 5.9.215
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-12: Version 5.9.214
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-12: Version 5.9.213
+
+ Performance and stability improvements on all platforms.
+
+
+2017-04-11: Version 5.9.212
+
+ Performance and stability improvements on all platforms.
+
+
2017-04-11: Version 5.9.211
Performance and stability improvements on all platforms.
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index aeeb0e5911..1a55e663c6 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -8,23 +8,23 @@ vars = {
deps = {
"v8/build":
- Var("chromium_url") + "/chromium/src/build.git" + "@" + "94c06fe70f3f6429c59e3ec0f6acd4f6710050b2",
+ Var("chromium_url") + "/chromium/src/build.git" + "@" + "1caf3a69f3b0379c9fef2493aa1b3cda96e17d7b",
"v8/tools/gyp":
- Var("chromium_url") + "/external/gyp.git" + "@" + "e7079f0e0e14108ab0dba58728ff219637458563",
+ Var("chromium_url") + "/external/gyp.git" + "@" + "eb296f67da078ec01f5e3a9ea9cdc6d26d680161",
"v8/third_party/icu":
- Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "450be73c9ee8ae29d43d4fdc82febb2a5f62bfb5",
+ Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "c844075aa0f1758d04f9192825f1b1e7e607992e",
"v8/third_party/instrumented_libraries":
- Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "05d5695a73e78b9cae55b8579fd8bf22b85eb283",
+ Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "644afd349826cb68204226a16c38bde13abe9c3c",
"v8/buildtools":
- Var("chromium_url") + "/chromium/buildtools.git" + "@" + "d3074448541662f242bcee623049c13a231b5648",
+ Var("chromium_url") + "/chromium/buildtools.git" + "@" + "98f00fa10dbad2cdbb2e297a66c3d6d5bc3994f3",
"v8/base/trace_event/common":
- Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "06294c8a4a6f744ef284cd63cfe54dbf61eea290",
+ Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "39a3450531fc73432e963db8668695d2e8f13053",
"v8/third_party/jinja2":
Var("chromium_url") + "/chromium/src/third_party/jinja2.git" + "@" + "d34383206fa42d52faa10bb9931d6d538f3a57e0",
"v8/third_party/markupsafe":
Var("chromium_url") + "/chromium/src/third_party/markupsafe.git" + "@" + "8f45f5cfa0009d2a70589bcda0349b8cb2b72783",
"v8/tools/swarming_client":
- Var('chromium_url') + '/external/swarming.client.git' + '@' + "11e31afa5d330756ff87aa12064bb5d032896cb5",
+ Var('chromium_url') + '/external/swarming.client.git' + '@' + "a941a089ff1000403078b74cb628eb430f07d271",
"v8/testing/gtest":
Var("chromium_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87",
"v8/testing/gmock":
@@ -38,7 +38,7 @@ deps = {
"v8/test/test262/harness":
Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "0f2acdd882c84cff43b9d60df7574a1901e2cdcd",
"v8/tools/clang":
- Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "49df471350a60efaec6951f321dd65475496ba17",
+ Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "05f306039aa5029fa88768690e5c512097419f9d",
"v8/test/wasm-js":
Var("chromium_url") + "/external/github.com/WebAssembly/spec.git" + "@" + "07fd6430f879d36928d179a62d9bdeed82286065",
}
@@ -46,9 +46,9 @@ deps = {
deps_os = {
"android": {
"v8/third_party/android_tools":
- Var("chromium_url") + "/android_tools.git" + "@" + "b65c4776dac2cf1b80e969b3b2d4e081b9c84f29",
+ Var("chromium_url") + "/android_tools.git" + "@" + "cb6bc21107001e2f2eeee2707b482b2b755baf51",
"v8/third_party/catapult":
- Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "9a55abab029cb9ae94f5160ded11b09a4638a955",
+ Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "08a6e0ac161db7309d8f9cad0ccd38e0b1fd41e0",
},
}
@@ -202,25 +202,14 @@ hooks = [
],
},
{
- "name": "wasm_fuzzer",
+ "name": "wasm_spec_tests",
"pattern": ".",
"action": [ "download_from_google_storage",
"--no_resume",
"--no_auth",
"-u",
- "--bucket", "v8-wasm-fuzzer",
- "-s", "v8/test/fuzzer/wasm.tar.gz.sha1",
- ],
- },
- {
- "name": "wasm_asmjs_fuzzer",
- "pattern": ".",
- "action": [ "download_from_google_storage",
- "--no_resume",
- "--no_auth",
- "-u",
- "--bucket", "v8-wasm-asmjs-fuzzer",
- "-s", "v8/test/fuzzer/wasm_asmjs.tar.gz.sha1",
+ "--bucket", "v8-wasm-spec-tests",
+ "-s", "v8/test/wasm-spec-tests/tests.tar.gz.sha1",
],
},
{
diff --git a/deps/v8/Makefile b/deps/v8/Makefile
index 299d4aa09c..ed5b3a7fab 100644
--- a/deps/v8/Makefile
+++ b/deps/v8/Makefile
@@ -240,7 +240,6 @@ ifdef android_ndk_root
endif
# ----------------- available targets: --------------------
-# - "grokdump": rebuilds heap constants lists used by grokdump
# - any arch listed in ARCHES (see below)
# - any mode listed in MODES
# - every combination <arch>.<mode>, e.g. "ia32.release"
@@ -467,12 +466,6 @@ $(ENVFILE).new:
$(eval CXX_TARGET_ARCH:=$(subst x86_64,x64,$(CXX_TARGET_ARCH)))
@mkdir -p $(OUTDIR); echo "GYPFLAGS=$(GYPFLAGS) -Dtarget_arch=$(CXX_TARGET_ARCH)" > $(ENVFILE).new;
-# Heap constants for grokdump.
-DUMP_FILE = tools/v8heapconst.py
-grokdump: ia32.release
- @cat $(DUMP_FILE).tmpl > $(DUMP_FILE)
- @$(OUTDIR)/ia32.release/d8 --dump-heap-constants >> $(DUMP_FILE)
-
# Support for the GNU GLOBAL Source Code Tag System.
gtags.files: $(GYPFILES) $(ENVFILE)
@find include src test -name '*.h' -o -name '*.cc' -o -name '*.c' > $@
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index 6aa94a01b3..7d7faec696 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -43,6 +43,12 @@ _EXCLUDED_PATHS = (
)
+# Regular expression that matches code which should not be run through cpplint.
+_NO_LINT_PATHS = (
+ r'src[\\\/]base[\\\/]export-template\.h',
+)
+
+
# Regular expression that matches code only used for test binaries
# (best effort).
_TEST_CODE_EXCLUDED_PATHS = (
@@ -70,9 +76,15 @@ def _V8PresubmitChecks(input_api, output_api):
from presubmit import SourceProcessor
from presubmit import StatusFilesProcessor
+ def FilterFile(affected_file):
+ return input_api.FilterSourceFile(
+ affected_file,
+ white_list=None,
+ black_list=_NO_LINT_PATHS)
+
results = []
if not CppLintProcessor().RunOnFiles(
- input_api.AffectedFiles(include_deletes=False)):
+ input_api.AffectedFiles(file_filter=FilterFile, include_deletes=False)):
results.append(output_api.PresubmitError("C++ lint check failed"))
if not SourceProcessor().RunOnFiles(
input_api.AffectedFiles(include_deletes=False)):
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index e87665b8cd..76d3039250 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -258,6 +258,12 @@
TRACE_EVENT_PHASE_INSTANT, category_group, name, timestamp, \
TRACE_EVENT_FLAG_NONE | scope)
+#define TRACE_EVENT_INSTANT_WITH_TIMESTAMP1(category_group, name, scope, \
+ timestamp, arg_name, arg_val) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
+ TRACE_EVENT_PHASE_INSTANT, category_group, name, timestamp, \
+ TRACE_EVENT_FLAG_NONE | scope, arg_name, arg_val)
+
// Records a single BEGIN event called "name" immediately, with 0, 1 or 2
// associated arguments. If the category is not enabled, then this
// does nothing.
diff --git a/deps/v8/codereview.settings b/deps/v8/codereview.settings
index 532e4b4d7b..bff4e38ba5 100644
--- a/deps/v8/codereview.settings
+++ b/deps/v8/codereview.settings
@@ -1,8 +1,6 @@
+PROJECT: v8
+GERRIT_HOST: True
CODE_REVIEW_SERVER: https://codereview.chromium.org
CC_LIST: v8-reviews@googlegroups.com
VIEW_VC: https://chromium.googlesource.com/v8/v8/+/
STATUS: http://v8-status.appspot.com/status
-TRY_ON_UPLOAD: False
-TRYSERVER_SVN_URL: svn://svn.chromium.org/chrome-try-v8
-PROJECT: v8
-PENDING_REF_PREFIX: refs/pending/
diff --git a/deps/v8/gypfiles/all.gyp b/deps/v8/gypfiles/all.gyp
index bbad66741c..bc9d9650eb 100644
--- a/deps/v8/gypfiles/all.gyp
+++ b/deps/v8/gypfiles/all.gyp
@@ -10,6 +10,7 @@
'dependencies': [
'../src/d8.gyp:d8',
'../test/inspector/inspector.gyp:*',
+ '../test/mkgrokdump/mkgrokdump.gyp:*',
],
'conditions': [
['component!="shared_library"', {
diff --git a/deps/v8/gypfiles/coverage_wrapper.py b/deps/v8/gypfiles/coverage_wrapper.py
index 5b365d8e63..d5fdee43cf 100755
--- a/deps/v8/gypfiles/coverage_wrapper.py
+++ b/deps/v8/gypfiles/coverage_wrapper.py
@@ -31,6 +31,8 @@ for exclusion in exclusions:
remove_if_exists(args, '-fsanitize-coverage=func')
remove_if_exists(args, '-fsanitize-coverage=bb')
remove_if_exists(args, '-fsanitize-coverage=edge')
+ remove_if_exists(args, '-fsanitize-coverage=trace-pc-guard')
+ remove_if_exists(args, '-fsanitize-coverage=bb,trace-pc-guard')
break
sys.exit(subprocess.check_call(args))
diff --git a/deps/v8/gypfiles/features.gypi b/deps/v8/gypfiles/features.gypi
index f6a442f663..b38735e162 100644
--- a/deps/v8/gypfiles/features.gypi
+++ b/deps/v8/gypfiles/features.gypi
@@ -95,7 +95,7 @@
'defines': ['VERIFY_HEAP',],
}],
['v8_trace_maps==1', {
- 'defines': ['TRACE_MAPS',],
+ 'defines': ['V8_TRACE_MAPS',],
}],
['v8_enable_verify_predictable==1', {
'defines': ['VERIFY_PREDICTABLE',],
@@ -110,7 +110,7 @@
'defines': ['V8_IMMINENT_DEPRECATION_WARNINGS',],
}],
['v8_enable_i18n_support==1', {
- 'defines': ['V8_I18N_SUPPORT',],
+ 'defines': ['V8_INTL_SUPPORT',],
}],
['v8_use_snapshot=="true" and v8_use_external_startup_data==1', {
'defines': ['V8_USE_EXTERNAL_STARTUP_DATA',],
@@ -123,7 +123,7 @@
'DebugBaseCommon': {
'abstract': 1,
'variables': {
- 'v8_enable_handle_zapping%': 1,
+ 'v8_enable_handle_zapping%': 0,
},
'conditions': [
['v8_enable_handle_zapping==1', {
@@ -133,7 +133,7 @@
}, # Debug
'Release': {
'variables': {
- 'v8_enable_handle_zapping%': 0,
+ 'v8_enable_handle_zapping%': 1,
},
'conditions': [
['v8_enable_handle_zapping==1', {
diff --git a/deps/v8/gypfiles/standalone.gypi b/deps/v8/gypfiles/standalone.gypi
index 986aaaaebb..4c805bf643 100644
--- a/deps/v8/gypfiles/standalone.gypi
+++ b/deps/v8/gypfiles/standalone.gypi
@@ -780,6 +780,12 @@
# Don't warn about unrecognized command line option.
'-Wno-gnu-zero-variadic-macro-arguments',
],
+ 'cflags' : [
+ # Disable gcc warnings for optimizations based on the assumption
+ # that signed overflow does not occur. Generates false positives
+ # (see http://crbug.com/v8/6341).
+ "-Wno-strict-overflow",
+ ],
}],
[ 'clang==1 and (v8_target_arch=="x64" or v8_target_arch=="arm64" \
or v8_target_arch=="mips64el")', {
@@ -855,7 +861,6 @@
}],
],
'msvs_cygwin_shell': 0,
- 'msvs_cygwin_dirs': ['<(DEPTH)/third_party/cygwin'],
'msvs_disabled_warnings': [
# C4091: 'typedef ': ignored on left of 'X' when no variable is
# declared.
diff --git a/deps/v8/gypfiles/toolchain.gypi b/deps/v8/gypfiles/toolchain.gypi
index 815070a508..6b82cfbd49 100644
--- a/deps/v8/gypfiles/toolchain.gypi
+++ b/deps/v8/gypfiles/toolchain.gypi
@@ -1245,7 +1245,7 @@
'OBJECT_PRINT',
'VERIFY_HEAP',
'DEBUG',
- 'TRACE_MAPS'
+ 'V8_TRACE_MAPS'
],
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \
diff --git a/deps/v8/include/libplatform/libplatform.h b/deps/v8/include/libplatform/libplatform.h
index f77742f0f6..e945045629 100644
--- a/deps/v8/include/libplatform/libplatform.h
+++ b/deps/v8/include/libplatform/libplatform.h
@@ -15,6 +15,11 @@ namespace platform {
enum class IdleTaskSupport { kDisabled, kEnabled };
enum class InProcessStackDumping { kDisabled, kEnabled };
+enum class MessageLoopBehavior : bool {
+ kDoNotWait = false,
+ kWaitForWork = true
+};
+
/**
* Returns a new instance of the default v8::Platform implementation.
*
@@ -36,12 +41,16 @@ V8_PLATFORM_EXPORT v8::Platform* CreateDefaultPlatform(
* Pumps the message loop for the given isolate.
*
* The caller has to make sure that this is called from the right thread.
- * Returns true if a task was executed, and false otherwise. This call does
- * not block if no task is pending. The |platform| has to be created using
- * |CreateDefaultPlatform|.
+ * Returns true if a task was executed, and false otherwise. Unless requested
+ * through the |behavior| parameter, this call does not block if no task is
+ * pending. The |platform| has to be created using |CreateDefaultPlatform|.
*/
-V8_PLATFORM_EXPORT bool PumpMessageLoop(v8::Platform* platform,
- v8::Isolate* isolate);
+V8_PLATFORM_EXPORT bool PumpMessageLoop(
+ v8::Platform* platform, v8::Isolate* isolate,
+ MessageLoopBehavior behavior = MessageLoopBehavior::kDoNotWait);
+
+V8_PLATFORM_EXPORT void EnsureEventLoopInitialized(v8::Platform* platform,
+ v8::Isolate* isolate);
/**
* Runs pending idle tasks for at most |idle_time_in_seconds| seconds.
diff --git a/deps/v8/include/v8-value-serializer-version.h b/deps/v8/include/v8-value-serializer-version.h
new file mode 100644
index 0000000000..c72911c64d
--- /dev/null
+++ b/deps/v8/include/v8-value-serializer-version.h
@@ -0,0 +1,24 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * Compile-time constants.
+ *
+ * This header provides access to information about the value serializer at
+ * compile time, without declaring or defining any symbols that require linking
+ * to V8.
+ */
+
+#ifndef INCLUDE_V8_VALUE_SERIALIZER_VERSION_H_
+#define INCLUDE_V8_VALUE_SERIALIZER_VERSION_H_
+
+#include <stdint.h>
+
+namespace v8 {
+
+constexpr uint32_t CurrentValueSerializerFormatVersion() { return 13; }
+
+} // namespace v8
+
+#endif // INCLUDE_V8_VALUE_SERIALIZER_VERSION_H_
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 0bfc4e8c5d..0889459ca7 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -8,10 +8,10 @@
// These macros define the version number for the current version.
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
-#define V8_MAJOR_VERSION 5
-#define V8_MINOR_VERSION 9
-#define V8_BUILD_NUMBER 211
-#define V8_PATCH_LEVEL 39
+#define V8_MAJOR_VERSION 6
+#define V8_MINOR_VERSION 0
+#define V8_BUILD_NUMBER 286
+#define V8_PATCH_LEVEL 52
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index ce7741a08c..da3cdfdcbd 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -150,6 +150,9 @@ class FunctionCallbackArguments;
class GlobalHandles;
} // namespace internal
+namespace debug {
+class ConsoleCallArguments;
+} // namespace debug
// --- Handles ---
@@ -1092,7 +1095,8 @@ class V8_EXPORT Module {
/**
* ModuleDeclarationInstantiation
*
- * Returns false if an exception occurred during instantiation.
+ * Returns false if an exception occurred during instantiation. (In the case
+ * where the callback throws an exception, that exception is propagated.)
*/
V8_WARN_UNUSED_RESULT bool Instantiate(Local<Context> context,
ResolveCallback callback);
@@ -1789,8 +1793,6 @@ class V8_EXPORT ValueSerializer {
virtual void FreeBufferMemory(void* buffer);
};
- static uint32_t GetCurrentDataFormatVersion();
-
explicit ValueSerializer(Isolate* isolate);
ValueSerializer(Isolate* isolate, Delegate* delegate);
~ValueSerializer();
@@ -2317,6 +2319,8 @@ class V8_EXPORT Value : public Data {
Local<String> TypeOf(Isolate*);
+ Maybe<bool> InstanceOf(Local<Context> context, Local<Object> object);
+
private:
V8_INLINE bool QuickIsUndefined() const;
V8_INLINE bool QuickIsNull() const;
@@ -2791,11 +2795,16 @@ class V8_EXPORT Symbol : public Name {
static Local<Symbol> ForApi(Isolate *isolate, Local<String> name);
// Well-known symbols
+ static Local<Symbol> GetHasInstance(Isolate* isolate);
+ static Local<Symbol> GetIsConcatSpreadable(Isolate* isolate);
static Local<Symbol> GetIterator(Isolate* isolate);
- static Local<Symbol> GetUnscopables(Isolate* isolate);
+ static Local<Symbol> GetMatch(Isolate* isolate);
+ static Local<Symbol> GetReplace(Isolate* isolate);
+ static Local<Symbol> GetSearch(Isolate* isolate);
+ static Local<Symbol> GetSplit(Isolate* isolate);
static Local<Symbol> GetToPrimitive(Isolate* isolate);
static Local<Symbol> GetToStringTag(Isolate* isolate);
- static Local<Symbol> GetIsConcatSpreadable(Isolate* isolate);
+ static Local<Symbol> GetUnscopables(Isolate* isolate);
V8_INLINE static Symbol* Cast(Value* obj);
@@ -3070,12 +3079,12 @@ class V8_EXPORT Object : public Value {
Local<Context> context, Local<Value> key);
/**
- * Returns Object.getOwnPropertyDescriptor as per ES5 section 15.2.3.3.
+ * Returns Object.getOwnPropertyDescriptor as per ES2016 section 19.1.2.6.
*/
V8_DEPRECATED("Use maybe version",
- Local<Value> GetOwnPropertyDescriptor(Local<String> key));
+ Local<Value> GetOwnPropertyDescriptor(Local<Name> key));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetOwnPropertyDescriptor(
- Local<Context> context, Local<String> key);
+ Local<Context> context, Local<Name> key);
V8_DEPRECATE_SOON("Use maybe version", bool Has(Local<Value> key));
/**
@@ -3136,6 +3145,16 @@ class V8_EXPORT Object : public Value {
AccessControl settings = DEFAULT);
/**
+ * Sets a native data property like Template::SetNativeDataProperty, but
+ * this method sets on this object directly.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> SetNativeDataProperty(
+ Local<Context> context, Local<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter = nullptr,
+ Local<Value> data = Local<Value>(), PropertyAttribute attributes = None);
+
+ /**
* Functionality for private properties.
* This is an experimental feature, use at your own risk.
* Note: Private properties are not inherited. Do not rely on this, since it
@@ -3578,16 +3597,34 @@ class ReturnValue {
template<typename T>
class FunctionCallbackInfo {
public:
+ /** The number of available arguments. */
V8_INLINE int Length() const;
+ /** Accessor for the available arguments. */
V8_INLINE Local<Value> operator[](int i) const;
V8_INLINE V8_DEPRECATED("Use Data() to explicitly pass Callee instead",
Local<Function> Callee() const);
+ /** Returns the receiver. This corresponds to the "this" value. */
V8_INLINE Local<Object> This() const;
+ /**
+ * If the callback was created without a Signature, this is the same
+ * value as This(). If there is a signature, and the signature didn't match
+ * This() but one of its hidden prototypes, this will be the respective
+ * hidden prototype.
+ *
+ * Note that this is not the prototype of This() on which the accessor
+ * referencing this callback was found (which in V8 internally is often
+ * referred to as holder [sic]).
+ */
V8_INLINE Local<Object> Holder() const;
+ /** For construct calls, this returns the "new.target" value. */
V8_INLINE Local<Value> NewTarget() const;
+ /** Indicates whether this is a regular call or a construct call. */
V8_INLINE bool IsConstructCall() const;
+ /** The data argument specified when creating the callback. */
V8_INLINE Local<Value> Data() const;
+ /** The current Isolate. */
V8_INLINE Isolate* GetIsolate() const;
+ /** The ReturnValue for the call. */
V8_INLINE ReturnValue<T> GetReturnValue() const;
// This shouldn't be public, but the arm compiler needs it.
static const int kArgsLength = 8;
@@ -3595,6 +3632,7 @@ class FunctionCallbackInfo {
protected:
friend class internal::FunctionCallbackArguments;
friend class internal::CustomArguments<FunctionCallbackInfo>;
+ friend class debug::ConsoleCallArguments;
static const int kHolderIndex = 0;
static const int kIsolateIndex = 1;
static const int kReturnValueDefaultValueIndex = 2;
@@ -4025,12 +4063,10 @@ class V8_EXPORT WasmCompiledModule : public Object {
// supports move semantics, and does not support copy semantics.
class TransferrableModule final {
public:
- TransferrableModule(TransferrableModule&& src)
- : compiled_code(std::move(src.compiled_code)),
- wire_bytes(std::move(src.wire_bytes)) {}
+ TransferrableModule(TransferrableModule&& src) = default;
TransferrableModule(const TransferrableModule& src) = delete;
- TransferrableModule& operator=(TransferrableModule&& src);
+ TransferrableModule& operator=(TransferrableModule&& src) = default;
TransferrableModule& operator=(const TransferrableModule& src) = delete;
private:
@@ -4103,11 +4139,9 @@ class V8_EXPORT WasmModuleObjectBuilder final {
// Disable copy semantics *in this implementation*. We can choose to
// relax this, albeit it's not clear why.
WasmModuleObjectBuilder(const WasmModuleObjectBuilder&) = delete;
- WasmModuleObjectBuilder(WasmModuleObjectBuilder&& src)
- : received_buffers_(std::move(src.received_buffers_)),
- total_size_(src.total_size_) {}
+ WasmModuleObjectBuilder(WasmModuleObjectBuilder&&) = default;
WasmModuleObjectBuilder& operator=(const WasmModuleObjectBuilder&) = delete;
- WasmModuleObjectBuilder& operator=(WasmModuleObjectBuilder&&);
+ WasmModuleObjectBuilder& operator=(WasmModuleObjectBuilder&&) = default;
std::vector<Buffer> received_buffers_;
size_t total_size_ = 0;
@@ -4159,11 +4193,41 @@ class V8_EXPORT ArrayBuffer : public Object {
virtual void* AllocateUninitialized(size_t length) = 0;
/**
+ * Reserved |length| bytes, but do not commit the memory. Must call
+ * |SetProtection| to make memory accessible.
+ */
+ // TODO(eholk): make this pure virtual once blink implements this.
+ virtual void* Reserve(size_t length);
+
+ /**
* Free the memory block of size |length|, pointed to by |data|.
* That memory is guaranteed to be previously allocated by |Allocate|.
*/
virtual void Free(void* data, size_t length) = 0;
+ enum class AllocationMode { kNormal, kReservation };
+
+ /**
+ * Free the memory block of size |length|, pointed to by |data|.
+ * That memory is guaranteed to be previously allocated by |Allocate| or
+ * |Reserve|, depending on |mode|.
+ */
+ // TODO(eholk): make this pure virtual once blink implements this.
+ virtual void Free(void* data, size_t length, AllocationMode mode);
+
+ enum class Protection { kNoAccess, kReadWrite };
+
+ /**
+ * Change the protection on a region of memory.
+ *
+ * On platforms that make a distinction between reserving and committing
+ * memory, changing the protection to kReadWrite must also ensure the memory
+ * is committed.
+ */
+ // TODO(eholk): make this pure virtual once blink implements this.
+ virtual void SetProtection(void* data, size_t length,
+ Protection protection);
+
/**
* malloc/free based convenience allocator.
*
@@ -5745,9 +5809,13 @@ class V8_EXPORT ObjectTemplate : public Template {
friend class FunctionTemplate;
};
-
/**
* A Signature specifies which receiver is valid for a function.
+ *
+ * A receiver matches a given signature if the receiver (or any of its
+ * hidden prototypes) was created from the signature's FunctionTemplate, or
+ * from a FunctionTemplate that inherits directly or indirectly from the
+ * signature's FunctionTemplate.
*/
class V8_EXPORT Signature : public Data {
public:
@@ -5878,8 +5946,12 @@ class V8_EXPORT ResourceConstraints {
void set_max_old_space_size(int limit_in_mb) {
max_old_space_size_ = limit_in_mb;
}
- int max_executable_size() const { return max_executable_size_; }
- void set_max_executable_size(int limit_in_mb) {
+ V8_DEPRECATE_SOON("max_executable_size_ is subsumed by max_old_space_size_",
+ int max_executable_size() const) {
+ return max_executable_size_;
+ }
+ V8_DEPRECATE_SOON("max_executable_size_ is subsumed by max_old_space_size_",
+ void set_max_executable_size(int limit_in_mb)) {
max_executable_size_ = limit_in_mb;
}
uint32_t* stack_limit() const { return stack_limit_; }
@@ -6154,6 +6226,8 @@ enum GCType {
* - kGCCallbackFlagCollectAllAvailableGarbage: The GC callback is called
* in a phase where V8 is trying to collect all available garbage
* (e.g., handling a low memory notification).
+ * - kGCCallbackScheduleIdleGarbageCollection: The GC callback is called to
+ * trigger an idle garbage collection.
*/
enum GCCallbackFlags {
kNoGCCallbackFlags = 0,
@@ -6162,6 +6236,7 @@ enum GCCallbackFlags {
kGCCallbackFlagSynchronousPhantomCallbackProcessing = 1 << 3,
kGCCallbackFlagCollectAllAvailableGarbage = 1 << 4,
kGCCallbackFlagCollectAllExternalMemory = 1 << 5,
+ kGCCallbackScheduleIdleGarbageCollection = 1 << 6,
};
typedef void (*GCCallback)(GCType type, GCCallbackFlags flags);
@@ -6188,9 +6263,8 @@ class V8_EXPORT HeapStatistics {
size_t peak_malloced_memory() { return peak_malloced_memory_; }
/**
- * Returns a 0/1 boolean, which signifies whether the |--zap_code_space|
- * option is enabled or not, which makes V8 overwrite heap garbage with a bit
- * pattern.
+ * Returns a 0/1 boolean, which signifies whether the V8 overwrite heap
+ * garbage with a bit pattern.
*/
size_t does_zap_garbage() { return does_zap_garbage_; }
@@ -6607,7 +6681,7 @@ class V8_EXPORT Isolate {
/**
* Whether calling Atomics.wait (a function that may block) is allowed in
- * this isolate.
+ * this isolate. This can also be configured via SetAllowAtomicsWait.
*/
bool allow_atomics_wait;
@@ -7467,6 +7541,13 @@ class V8_EXPORT Isolate {
*/
bool IsInUse();
+ /**
+ * Set whether calling Atomics.wait (a function that may block) is allowed in
+ * this isolate. This can also be configured via
+ * CreateParams::allow_atomics_wait.
+ */
+ void SetAllowAtomicsWait(bool allow);
+
Isolate() = delete;
~Isolate() = delete;
Isolate(const Isolate&) = delete;
@@ -8365,16 +8446,14 @@ class V8_EXPORT Context {
Isolate* GetIsolate();
/**
- * The field at kDebugIdIndex is reserved for V8 debugger implementation.
- * The value is propagated to the scripts compiled in given Context and
- * can be used for filtering scripts.
+ * The field at kDebugIdIndex used to be reserved for the inspector.
+ * It now serves no purpose.
*/
enum EmbedderDataFields { kDebugIdIndex = 0 };
/**
* Gets the embedder data with the given index, which must have been set by a
- * previous call to SetEmbedderData with the same index. Note that index 0
- * currently has a special meaning for Chrome's debugger.
+ * previous call to SetEmbedderData with the same index.
*/
V8_INLINE Local<Value> GetEmbedderData(int index);
diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h
index c34cb69a89..964949c24c 100644
--- a/deps/v8/include/v8config.h
+++ b/deps/v8/include/v8config.h
@@ -316,7 +316,7 @@
// V8_NORETURN void MyAbort() { abort(); }
#if V8_HAS_ATTRIBUTE_NORETURN
# define V8_NORETURN __attribute__((noreturn))
-#elif HAS_DECLSPEC_NORETURN
+#elif V8_HAS_DECLSPEC_NORETURN
# define V8_NORETURN __declspec(noreturn)
#else
# define V8_NORETURN /* NOT SUPPORTED */
diff --git a/deps/v8/infra/config/cq.cfg b/deps/v8/infra/config/cq.cfg
index e9bc8fdd3c..dcf8e5c0b7 100644
--- a/deps/v8/infra/config/cq.cfg
+++ b/deps/v8/infra/config/cq.cfg
@@ -32,10 +32,7 @@ verifiers {
buckets {
name: "master.tryserver.v8"
builders { name: "v8_android_arm_compile_rel" }
- builders {
- name: "v8_node_linux64_rel"
- experiment_percentage: 100
- }
+ builders { name: "v8_node_linux64_rel" }
builders { name: "v8_linux64_asan_rel_ng" }
builders {
name: "v8_linux64_asan_rel_ng_triggered"
@@ -46,6 +43,7 @@ verifiers {
name: "v8_linux64_avx2_rel_ng_triggered"
triggered_by: "v8_linux64_avx2_rel_ng"
}
+ builders { name: "v8_linux64_gcc_compile_dbg" }
builders { name: "v8_linux64_gyp_rel_ng" }
builders {
name: "v8_linux64_gyp_rel_ng_triggered"
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index 0df30d4571..1a8247fc2b 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -84,6 +84,8 @@
# Sanitizers.
'V8 Linux64 ASAN': 'gn_release_x64_asan',
'V8 Linux64 TSAN': 'gn_release_x64_tsan',
+ 'V8 Linux64 TSAN - concurrent marking':
+ 'gn_release_x64_tsan_concurrent_marking',
'V8 Linux - arm64 - sim - MSAN': 'gn_release_simulate_arm64_msan',
# Clusterfuzz.
'V8 Linux64 - release builder': 'gn_release_x64_correctness_fuzzer',
@@ -98,6 +100,7 @@
'gn_debug_simulate_mipsel_asan_edge',
# Misc.
'V8 Linux gcc 4.8': 'gn_release_x86_gcc',
+ 'V8 Linux64 gcc 4.8 - debug': 'gn_debug_x64_gcc',
# FYI.
'V8 Linux - swarming staging': 'gn_release_x64',
# TODO(machenbach): Figure out if symbolized is still needed. The
@@ -177,6 +180,7 @@
'v8_linux_nosnap_dbg': 'gn_debug_x86_no_snap_trybot',
'v8_linux_gcc_compile_rel': 'gn_release_x86_gcc_minimal_symbols',
'v8_linux_gcc_rel': 'gn_release_x86_gcc_minimal_symbols',
+ 'v8_linux64_gcc_compile_dbg': 'gn_debug_x64_gcc',
'v8_linux64_rel_ng': 'gn_release_x64_valgrind_trybot',
'v8_linux64_verify_csa_rel_ng': 'gn_release_x64_verify_csa',
'v8_linux64_gyp_rel_ng': 'gyp_release_x64',
@@ -187,6 +191,8 @@
'v8_linux64_sanitizer_coverage_rel':
'gyp_release_x64_asan_minimal_symbols_coverage',
'v8_linux64_tsan_rel': 'gn_release_x64_tsan_minimal_symbols',
+ 'v8_linux64_tsan_concurrent_marking_rel_ng':
+ 'gn_release_x64_tsan_concurrent_marking_minimal_symbols',
'v8_win_dbg': 'gn_debug_x86_trybot',
'v8_win_compile_dbg': 'gn_debug_x86_trybot',
'v8_win_rel_ng': 'gn_release_x86_trybot',
@@ -298,7 +304,8 @@
'gn_debug_simulate_arm64_asan_edge': [
'gn', 'debug_bot', 'simulate_arm64', 'asan', 'lsan', 'edge'],
'gn_debug_simulate_arm64_no_snap': [
- 'gn', 'debug_bot', 'simulate_arm64', 'swarming', 'v8_snapshot_none'],
+ 'gn', 'debug', 'simulate_arm64', 'shared', 'goma',
+ 'v8_optimized_debug', 'swarming', 'v8_snapshot_none'],
'gn_debug_simulate_mipsel_asan_edge': [
'gn', 'debug_bot', 'simulate_mipsel', 'asan', 'edge'],
@@ -360,6 +367,12 @@
'gn', 'release_trybot', 'x64', 'swarming'],
'gn_release_x64_tsan': [
'gn', 'release_bot', 'x64', 'tsan', 'swarming'],
+ 'gn_release_x64_tsan_concurrent_marking': [
+ 'gn', 'release_bot', 'x64', 'v8_enable_concurrent_marking', 'tsan',
+ 'swarming'],
+ 'gn_release_x64_tsan_concurrent_marking_minimal_symbols': [
+ 'gn', 'release_bot', 'x64', 'v8_enable_concurrent_marking', 'tsan',
+ 'minimal_symbols', 'swarming'],
'gn_release_x64_tsan_minimal_symbols': [
'gn', 'release_bot', 'x64', 'tsan', 'minimal_symbols', 'swarming'],
'gn_release_x64_valgrind': [
@@ -377,6 +390,8 @@
'gn', 'debug_bot', 'x64', 'asan', 'lsan', 'edge'],
'gn_debug_x64_custom': [
'gn', 'debug_bot', 'x64', 'swarming', 'v8_snapshot_custom'],
+ 'gn_debug_x64_gcc': [
+ 'gn', 'debug_bot', 'x64', 'gcc'],
'gn_debug_x64_minimal_symbols': [
'gn', 'debug_bot', 'x64', 'minimal_symbols', 'swarming'],
'gn_debug_x64_trybot': [
@@ -498,8 +513,8 @@
},
'bb': {
- 'gn_args': 'sanitizer_coverage_flags="bb"',
- 'gyp_defines': 'sanitizer_coverage=bb',
+ 'gn_args': 'sanitizer_coverage_flags="bb,trace-pc-guard"',
+ 'gyp_defines': 'sanitizer_coverage=bb,trace-pc-guard',
},
'cfi': {
@@ -548,8 +563,8 @@
},
'edge': {
- 'gn_args': 'sanitizer_coverage_flags="edge"',
- 'gyp_defines': 'sanitizer_coverage=edge',
+ 'gn_args': 'sanitizer_coverage_flags="trace-pc-guard"',
+ 'gyp_defines': 'sanitizer_coverage=trace-pc-guard',
},
'gcc': {
@@ -695,6 +710,10 @@
'gyp_defines': 'v8_enable_i18n_support=0 icu_use_data_file_flag=0',
},
+ 'v8_enable_concurrent_marking': {
+ 'gn_args': 'v8_enable_concurrent_marking=true',
+ },
+
'v8_correctness_fuzzer': {
'gn_args': 'v8_correctness_fuzzer=true v8_multi_arch_build=true',
},
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index bfeca93241..bbf47e6107 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -1,6 +1,8 @@
include_rules = [
"+base/trace_event/common/trace_event_common.h",
"+src",
+ "-src/asmjs",
+ "+src/asmjs/asm-js.h",
"-src/compiler",
"+src/compiler/pipeline.h",
"+src/compiler/code-assembler.h",
diff --git a/deps/v8/src/OWNERS b/deps/v8/src/OWNERS
index 3a05c3a95b..8bbbab6ecb 100644
--- a/deps/v8/src/OWNERS
+++ b/deps/v8/src/OWNERS
@@ -1,5 +1,5 @@
-per-file i18n.*=cira@chromium.org
-per-file i18n.*=mnita@google.com
-per-file i18n.*=jshin@chromium.org
+per-file intl.*=cira@chromium.org
+per-file intl.*=mnita@google.com
+per-file intl.*=jshin@chromium.org
per-file typing-asm.*=aseemgarg@chromium.org
per-file typing-asm.*=bradnelson@chromium.org
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 1f2ce97240..98f780d589 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -681,23 +681,6 @@ static Handle<Object> GetFunctionPrototype(Isolate* isolate,
return Handle<Object>(function->prototype(), isolate);
}
-
-MUST_USE_RESULT static MaybeHandle<Object> SetFunctionPrototype(
- Isolate* isolate, Handle<JSFunction> function, Handle<Object> value) {
- JSFunction::SetPrototype(function, value);
- DCHECK(function->prototype() == *value);
- return function;
-}
-
-
-MaybeHandle<Object> Accessors::FunctionSetPrototype(Handle<JSFunction> function,
- Handle<Object> prototype) {
- DCHECK(function->IsConstructor());
- Isolate* isolate = function->GetIsolate();
- return SetFunctionPrototype(isolate, function, prototype);
-}
-
-
void Accessors::FunctionPrototypeGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
@@ -719,11 +702,8 @@ void Accessors::FunctionPrototypeSetter(
Handle<Object> value = Utils::OpenHandle(*val);
Handle<JSFunction> object =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
- if (SetFunctionPrototype(isolate, object, value).is_null()) {
- isolate->OptionalRescheduleException(false);
- } else {
- info.GetReturnValue().Set(true);
- }
+ JSFunction::SetPrototype(object, value);
+ info.GetReturnValue().Set(true);
}
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index a4d51fd18a..52420d91de 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -90,8 +90,6 @@ class Accessors : public AllStatic {
};
// Accessor functions called directly from the runtime system.
- MUST_USE_RESULT static MaybeHandle<Object> FunctionSetPrototype(
- Handle<JSFunction> object, Handle<Object> value);
static Handle<JSObject> FunctionGetArguments(Handle<JSFunction> object);
// Returns true for properties that are accessors to object fields.
diff --git a/deps/v8/src/api-arguments.h b/deps/v8/src/api-arguments.h
index 6c9ad7ad6b..ca7b4833e9 100644
--- a/deps/v8/src/api-arguments.h
+++ b/deps/v8/src/api-arguments.h
@@ -7,6 +7,7 @@
#include "src/api.h"
#include "src/isolate.h"
+#include "src/visitors.h"
namespace v8 {
namespace internal {
@@ -17,8 +18,8 @@ namespace internal {
template <int kArrayLength>
class CustomArgumentsBase : public Relocatable {
public:
- virtual inline void IterateInstance(ObjectVisitor* v) {
- v->VisitPointers(values_, values_ + kArrayLength);
+ virtual inline void IterateInstance(RootVisitor* v) {
+ v->VisitRootPointers(Root::kRelocatable, values_, values_ + kArrayLength);
}
protected:
diff --git a/deps/v8/src/api-natives.cc b/deps/v8/src/api-natives.cc
index 0957c47823..ef51f950a5 100644
--- a/deps/v8/src/api-natives.cc
+++ b/deps/v8/src/api-natives.cc
@@ -261,14 +261,25 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
return obj;
}
+// Whether or not to cache every instance: when we materialize a getter or
+// setter from an lazy AccessorPair, we rely on this cache to be able to always
+// return the same getter or setter. However, objects will be cloned anyways,
+// so it's not observable if we didn't cache an instance. Furthermore, a badly
+// behaved embedder might create an unlimited number of objects, so we limit
+// the cache for those cases.
+enum class CachingMode { kLimited, kUnlimited };
+
MaybeHandle<JSObject> ProbeInstantiationsCache(Isolate* isolate,
- int serial_number) {
+ int serial_number,
+ CachingMode caching_mode) {
DCHECK_LE(1, serial_number);
if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) {
Handle<FixedArray> fast_cache =
isolate->fast_template_instantiations_cache();
return fast_cache->GetValue<JSObject>(isolate, serial_number - 1);
- } else {
+ } else if (caching_mode == CachingMode::kUnlimited ||
+ (serial_number <=
+ TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
Handle<UnseededNumberDictionary> slow_cache =
isolate->slow_template_instantiations_cache();
int entry = slow_cache->FindEntry(serial_number);
@@ -276,10 +287,13 @@ MaybeHandle<JSObject> ProbeInstantiationsCache(Isolate* isolate,
return MaybeHandle<JSObject>();
}
return handle(JSObject::cast(slow_cache->ValueAt(entry)), isolate);
+ } else {
+ return MaybeHandle<JSObject>();
}
}
void CacheTemplateInstantiation(Isolate* isolate, int serial_number,
+ CachingMode caching_mode,
Handle<JSObject> object) {
DCHECK_LE(1, serial_number);
if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) {
@@ -291,7 +305,9 @@ void CacheTemplateInstantiation(Isolate* isolate, int serial_number,
isolate->native_context()->set_fast_template_instantiations_cache(
*new_cache);
}
- } else {
+ } else if (caching_mode == CachingMode::kUnlimited ||
+ (serial_number <=
+ TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
Handle<UnseededNumberDictionary> cache =
isolate->slow_template_instantiations_cache();
auto new_cache =
@@ -303,14 +319,17 @@ void CacheTemplateInstantiation(Isolate* isolate, int serial_number,
}
}
-void UncacheTemplateInstantiation(Isolate* isolate, int serial_number) {
+void UncacheTemplateInstantiation(Isolate* isolate, int serial_number,
+ CachingMode caching_mode) {
DCHECK_LE(1, serial_number);
if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) {
Handle<FixedArray> fast_cache =
isolate->fast_template_instantiations_cache();
DCHECK(!fast_cache->get(serial_number - 1)->IsUndefined(isolate));
fast_cache->set_undefined(serial_number - 1);
- } else {
+ } else if (caching_mode == CachingMode::kUnlimited ||
+ (serial_number <=
+ TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
Handle<UnseededNumberDictionary> cache =
isolate->slow_template_instantiations_cache();
int entry = cache->FindEntry(serial_number);
@@ -354,7 +373,8 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
// Fast path.
Handle<JSObject> result;
if (serial_number) {
- if (ProbeInstantiationsCache(isolate, serial_number).ToHandle(&result)) {
+ if (ProbeInstantiationsCache(isolate, serial_number, CachingMode::kLimited)
+ .ToHandle(&result)) {
return isolate->factory()->CopyJSObject(result);
}
}
@@ -396,7 +416,8 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
JSObject::MigrateSlowToFast(result, 0, "ApiNatives::InstantiateObject");
// Don't cache prototypes.
if (serial_number) {
- CacheTemplateInstantiation(isolate, serial_number, result);
+ CacheTemplateInstantiation(isolate, serial_number, CachingMode::kLimited,
+ result);
result = isolate->factory()->CopyJSObject(result);
}
}
@@ -433,7 +454,9 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
int serial_number = Smi::cast(data->serial_number())->value();
if (serial_number) {
Handle<JSObject> result;
- if (ProbeInstantiationsCache(isolate, serial_number).ToHandle(&result)) {
+ if (ProbeInstantiationsCache(isolate, serial_number,
+ CachingMode::kUnlimited)
+ .ToHandle(&result)) {
return Handle<JSFunction>::cast(result);
}
}
@@ -475,14 +498,16 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
}
if (serial_number) {
// Cache the function.
- CacheTemplateInstantiation(isolate, serial_number, function);
+ CacheTemplateInstantiation(isolate, serial_number, CachingMode::kUnlimited,
+ function);
}
MaybeHandle<JSObject> result =
ConfigureInstance(isolate, function, data, data->hidden_prototype());
if (result.is_null()) {
// Uncache on error.
if (serial_number) {
- UncacheTemplateInstantiation(isolate, serial_number);
+ UncacheTemplateInstantiation(isolate, serial_number,
+ CachingMode::kUnlimited);
}
return MaybeHandle<JSFunction>();
}
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index e725ef50fe..7699bc87ec 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -25,6 +25,7 @@
#include "src/base/safe_conversions.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
+#include "src/builtins/builtins-utils.h"
#include "src/char-predicates-inl.h"
#include "src/code-stubs.h"
#include "src/compiler-dispatcher/compiler-dispatcher.h"
@@ -84,7 +85,7 @@ namespace v8 {
isolate, &i::RuntimeCallStats::API_##class_name##_##function_name); \
LOG(isolate, ApiEntryCall("v8::" #class_name "::" #function_name))
-#define ENTER_V8(isolate) i::VMState<v8::OTHER> __state__((isolate))
+#define ENTER_V8_DO_NOT_USE(isolate) i::VMState<v8::OTHER> __state__((isolate))
#define PREPARE_FOR_EXECUTION_GENERIC(isolate, context, class_name, \
function_name, bailout_value, \
@@ -95,7 +96,7 @@ namespace v8 {
HandleScopeClass handle_scope(isolate); \
CallDepthScope<do_callback> call_depth_scope(isolate, context); \
LOG_API(isolate, class_name, function_name); \
- ENTER_V8(isolate); \
+ ENTER_V8_DO_NOT_USE(isolate); \
bool has_pending_exception = false
#define PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(isolate, T) \
@@ -104,7 +105,7 @@ namespace v8 {
} \
InternalEscapableScope handle_scope(isolate); \
CallDepthScope<false> call_depth_scope(isolate, v8::Local<v8::Context>()); \
- ENTER_V8(isolate); \
+ ENTER_V8_DO_NOT_USE(isolate); \
bool has_pending_exception = false
#define PREPARE_FOR_EXECUTION_WITH_CONTEXT(context, class_name, function_name, \
@@ -231,17 +232,20 @@ class CallDepthScope {
i::Handle<i::Context> env = Utils::OpenHandle(*context);
i::HandleScopeImplementer* impl = isolate->handle_scope_implementer();
if (isolate->context() != nullptr &&
- isolate->context()->native_context() == env->native_context() &&
- impl->LastEnteredContextWas(env)) {
+ isolate->context()->native_context() == env->native_context()) {
context_ = Local<Context>();
} else {
- context_->Enter();
+ impl->SaveContext(isolate->context());
+ isolate->set_context(*env);
}
}
if (do_callback) isolate_->FireBeforeCallEnteredCallback();
}
~CallDepthScope() {
- if (!context_.IsEmpty()) context_->Exit();
+ if (!context_.IsEmpty()) {
+ i::HandleScopeImplementer* impl = isolate_->handle_scope_implementer();
+ isolate_->set_context(impl->RestoreContext());
+ }
if (!escaped_) isolate_->handle_scope_implementer()->DecrementCallDepth();
if (do_callback) isolate_->FireCallCompletedCallback();
#ifdef DEBUG
@@ -437,6 +441,28 @@ void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) {
i::V8::SetSnapshotBlob(snapshot_blob);
}
+void* v8::ArrayBuffer::Allocator::Reserve(size_t length) { UNIMPLEMENTED(); }
+
+void v8::ArrayBuffer::Allocator::Free(void* data, size_t length,
+ AllocationMode mode) {
+ switch (mode) {
+ case AllocationMode::kNormal: {
+ Free(data, length);
+ return;
+ }
+ case AllocationMode::kReservation: {
+ UNIMPLEMENTED();
+ return;
+ }
+ }
+}
+
+void v8::ArrayBuffer::Allocator::SetProtection(
+ void* data, size_t length,
+ v8::ArrayBuffer::Allocator::Protection protection) {
+ UNIMPLEMENTED();
+}
+
namespace {
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
@@ -447,6 +473,39 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
}
virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
virtual void Free(void* data, size_t) { free(data); }
+
+ virtual void* Reserve(size_t length) {
+ return base::VirtualMemory::ReserveRegion(length);
+ }
+
+ virtual void Free(void* data, size_t length,
+ v8::ArrayBuffer::Allocator::AllocationMode mode) {
+ switch (mode) {
+ case v8::ArrayBuffer::Allocator::AllocationMode::kNormal: {
+ return Free(data, length);
+ }
+ case v8::ArrayBuffer::Allocator::AllocationMode::kReservation: {
+ base::VirtualMemory::ReleaseRegion(data, length);
+ return;
+ }
+ }
+ }
+
+ virtual void SetProtection(
+ void* data, size_t length,
+ v8::ArrayBuffer::Allocator::Protection protection) {
+ switch (protection) {
+ case v8::ArrayBuffer::Allocator::Protection::kNoAccess: {
+ base::VirtualMemory::UncommitRegion(data, length);
+ return;
+ }
+ case v8::ArrayBuffer::Allocator::Protection::kReadWrite: {
+ const bool is_executable = false;
+ base::VirtualMemory::CommitRegion(data, length, is_executable);
+ return;
+ }
+ }
+ }
};
bool RunExtraCode(Isolate* isolate, Local<Context> context,
@@ -601,9 +660,6 @@ StartupData SnapshotCreator::CreateBlob(
isolate->heap()->SetSerializedGlobalProxySizes(*global_proxy_sizes);
}
- // We might rehash strings and re-sort descriptors. Clear the lookup cache.
- isolate->descriptor_lookup_cache()->Clear();
-
// If we don't do this then we end up with a stray root pointing at the
// context even after we have disposed of the context.
isolate->heap()->CollectAllAvailableGarbage(
@@ -645,15 +701,11 @@ StartupData SnapshotCreator::CreateBlob(
// Serialize each context with a new partial serializer.
i::List<i::SnapshotData*> context_snapshots(num_additional_contexts + 1);
- // TODO(6593): generalize rehashing, and remove this flag.
- bool can_be_rehashed = true;
-
{
// The default snapshot does not support embedder fields.
i::PartialSerializer partial_serializer(
isolate, &startup_serializer, v8::SerializeInternalFieldsCallback());
partial_serializer.Serialize(&default_context, false);
- can_be_rehashed = can_be_rehashed && partial_serializer.can_be_rehashed();
context_snapshots.Add(new i::SnapshotData(&partial_serializer));
}
@@ -661,12 +713,10 @@ StartupData SnapshotCreator::CreateBlob(
i::PartialSerializer partial_serializer(
isolate, &startup_serializer, data->embedder_fields_serializers_[i]);
partial_serializer.Serialize(&contexts[i], true);
- can_be_rehashed = can_be_rehashed && partial_serializer.can_be_rehashed();
context_snapshots.Add(new i::SnapshotData(&partial_serializer));
}
startup_serializer.SerializeWeakReferencesAndDeferred();
- can_be_rehashed = can_be_rehashed && startup_serializer.can_be_rehashed();
#ifdef DEBUG
if (i::FLAG_external_reference_stats) {
@@ -675,8 +725,8 @@ StartupData SnapshotCreator::CreateBlob(
#endif // DEBUG
i::SnapshotData startup_snapshot(&startup_serializer);
- StartupData result = i::Snapshot::CreateSnapshotBlob(
- &startup_snapshot, &context_snapshots, can_be_rehashed);
+ StartupData result =
+ i::Snapshot::CreateSnapshotBlob(&startup_snapshot, &context_snapshots);
// Delete heap-allocated context snapshot instances.
for (const auto& context_snapshot : context_snapshots) {
@@ -818,7 +868,6 @@ Extension::Extension(const char* name,
ResourceConstraints::ResourceConstraints()
: max_semi_space_size_(0),
max_old_space_size_(0),
- max_executable_size_(0),
stack_limit_(NULL),
code_range_size_(0),
max_zone_pool_size_(0) {}
@@ -840,24 +889,20 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
if (physical_memory <= low_limit) {
set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeLowMemoryDevice);
set_max_old_space_size(i::Heap::kMaxOldSpaceSizeLowMemoryDevice);
- set_max_executable_size(i::Heap::kMaxExecutableSizeLowMemoryDevice);
set_max_zone_pool_size(i::AccountingAllocator::kMaxPoolSizeLowMemoryDevice);
} else if (physical_memory <= medium_limit) {
set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeMediumMemoryDevice);
set_max_old_space_size(i::Heap::kMaxOldSpaceSizeMediumMemoryDevice);
- set_max_executable_size(i::Heap::kMaxExecutableSizeMediumMemoryDevice);
set_max_zone_pool_size(
i::AccountingAllocator::kMaxPoolSizeMediumMemoryDevice);
} else if (physical_memory <= high_limit) {
set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeHighMemoryDevice);
set_max_old_space_size(i::Heap::kMaxOldSpaceSizeHighMemoryDevice);
- set_max_executable_size(i::Heap::kMaxExecutableSizeHighMemoryDevice);
set_max_zone_pool_size(
i::AccountingAllocator::kMaxPoolSizeHighMemoryDevice);
} else {
set_max_semi_space_size(i::Heap::kMaxSemiSpaceSizeHugeMemoryDevice);
set_max_old_space_size(i::Heap::kMaxOldSpaceSizeHugeMemoryDevice);
- set_max_executable_size(i::Heap::kMaxExecutableSizeHugeMemoryDevice);
set_max_zone_pool_size(
i::AccountingAllocator::kMaxPoolSizeHugeMemoryDevice);
}
@@ -876,13 +921,11 @@ void SetResourceConstraints(i::Isolate* isolate,
const ResourceConstraints& constraints) {
int semi_space_size = constraints.max_semi_space_size();
int old_space_size = constraints.max_old_space_size();
- int max_executable_size = constraints.max_executable_size();
size_t code_range_size = constraints.code_range_size();
size_t max_pool_size = constraints.max_zone_pool_size();
- if (semi_space_size != 0 || old_space_size != 0 ||
- max_executable_size != 0 || code_range_size != 0) {
+ if (semi_space_size != 0 || old_space_size != 0 || code_range_size != 0) {
isolate->heap()->ConfigureHeap(semi_space_size, old_space_size,
- max_executable_size, code_range_size);
+ code_range_size);
}
isolate->allocator()->ConfigureSegmentPool(max_pool_size);
@@ -1015,10 +1058,7 @@ HandleScope::~HandleScope() {
i::HandleScope::CloseScope(isolate_, prev_next_, prev_limit_);
}
-V8_NORETURN void* HandleScope::operator new(size_t) {
- base::OS::Abort();
- abort();
-}
+void* HandleScope::operator new(size_t) { base::OS::Abort(); }
void HandleScope::operator delete(void*, size_t) { base::OS::Abort(); }
@@ -1059,10 +1099,7 @@ i::Object** EscapableHandleScope::Escape(i::Object** escape_value) {
return escape_slot_;
}
-V8_NORETURN void* EscapableHandleScope::operator new(size_t) {
- base::OS::Abort();
- abort();
-}
+void* EscapableHandleScope::operator new(size_t) { base::OS::Abort(); }
void EscapableHandleScope::operator delete(void*, size_t) { base::OS::Abort(); }
@@ -1084,10 +1121,7 @@ SealHandleScope::~SealHandleScope() {
current->sealed_level = prev_sealed_level_;
}
-V8_NORETURN void* SealHandleScope::operator new(size_t) {
- base::OS::Abort();
- abort();
-}
+void* SealHandleScope::operator new(size_t) { base::OS::Abort(); }
void SealHandleScope::operator delete(void*, size_t) { base::OS::Abort(); }
@@ -1396,7 +1430,7 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback,
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
+ isolate->factory()->NewStruct(i::TUPLE2_TYPE);
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
SET_FIELD_WRAPPED(obj, set_callback, callback);
@@ -1853,7 +1887,7 @@ void ObjectTemplate::SetCallAsFunctionHandler(FunctionCallback callback,
auto cons = EnsureConstructor(isolate, this);
EnsureNotInstantiated(cons, "v8::ObjectTemplate::SetCallAsFunctionHandler");
i::Handle<i::Struct> struct_obj =
- isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE);
+ isolate->factory()->NewStruct(i::TUPLE2_TYPE);
i::Handle<i::CallHandlerInfo> obj =
i::Handle<i::CallHandlerInfo>::cast(struct_obj);
SET_FIELD_WRAPPED(obj, set_callback, callback);
@@ -2478,6 +2512,7 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
source->parser->ReportErrors(isolate, script);
}
source->parser->UpdateStatistics(isolate, script);
+ source->info->UpdateStatisticsAfterBackgroundParse(isolate);
i::DeferredHandleScope deferred_handle_scope(isolate);
{
@@ -2620,10 +2655,7 @@ v8::TryCatch::~TryCatch() {
}
}
-V8_NORETURN void* v8::TryCatch::operator new(size_t) {
- base::OS::Abort();
- abort();
-}
+void* v8::TryCatch::operator new(size_t) { base::OS::Abort(); }
void v8::TryCatch::operator delete(void*, size_t) { base::OS::Abort(); }
@@ -2718,10 +2750,7 @@ void v8::TryCatch::SetVerbose(bool value) {
is_verbose_ = value;
}
-bool v8::TryCatch::IsVerbose() const {
- return is_verbose_;
-}
-
+bool v8::TryCatch::IsVerbose() const { return is_verbose_; }
void v8::TryCatch::SetCaptureMessage(bool value) {
capture_message_ = value;
@@ -2764,8 +2793,8 @@ v8::Local<v8::StackTrace> Message::GetStackTrace() const {
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
auto message = i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
i::Handle<i::Object> stackFramesObj(message->stack_frames(), isolate);
- if (!stackFramesObj->IsJSArray()) return v8::Local<v8::StackTrace>();
- auto stackTrace = i::Handle<i::JSArray>::cast(stackFramesObj);
+ if (!stackFramesObj->IsFixedArray()) return v8::Local<v8::StackTrace>();
+ auto stackTrace = i::Handle<i::FixedArray>::cast(stackFramesObj);
return scope.Escape(Utils::StackTraceToLocal(stackTrace));
}
@@ -2890,15 +2919,14 @@ Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
- auto self = Utils::OpenHandle(this);
- auto obj = i::JSReceiver::GetElement(isolate, self, index).ToHandleChecked();
+ auto obj = handle(Utils::OpenHandle(this)->get(index), isolate);
auto info = i::Handle<i::StackFrameInfo>::cast(obj);
return scope.Escape(Utils::StackFrameToLocal(info));
}
int StackTrace::GetFrameCount() const {
- return i::Smi::cast(Utils::OpenHandle(this)->length())->value();
+ return Utils::OpenHandle(this)->length();
}
namespace {
@@ -2946,12 +2974,12 @@ i::Handle<i::JSObject> NewFrameObject(i::Isolate* isolate,
Local<Array> StackTrace::AsArray() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- i::Handle<i::JSArray> self = Utils::OpenHandle(this);
- int frame_count = GetFrameCount();
+ i::Handle<i::FixedArray> self = Utils::OpenHandle(this);
+ int frame_count = self->length();
i::Handle<i::FixedArray> frames =
isolate->factory()->NewFixedArray(frame_count);
for (int i = 0; i < frame_count; ++i) {
- auto obj = i::JSReceiver::GetElement(isolate, self, i).ToHandleChecked();
+ auto obj = handle(self->get(i), isolate);
auto frame = i::Handle<i::StackFrameInfo>::cast(obj);
i::Handle<i::JSObject> frame_obj = NewFrameObject(isolate, frame);
frames->set(i, *frame_obj);
@@ -2967,7 +2995,7 @@ Local<StackTrace> StackTrace::CurrentStackTrace(
StackTraceOptions options) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
- i::Handle<i::JSArray> stackTrace =
+ i::Handle<i::FixedArray> stackTrace =
i_isolate->CaptureCurrentStackTrace(frame_limit, options);
return Utils::StackTraceToLocal(stackTrace);
}
@@ -3046,7 +3074,7 @@ Local<NativeWeakMap> NativeWeakMap::New(Isolate* v8_isolate) {
void NativeWeakMap::Set(Local<Value> v8_key, Local<Value> v8_value) {
i::Handle<i::JSWeakMap> weak_collection = Utils::OpenHandle(this);
i::Isolate* isolate = weak_collection->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
i::Handle<i::Object> key = Utils::OpenHandle(*v8_key);
i::Handle<i::Object> value = Utils::OpenHandle(*v8_value);
@@ -3067,7 +3095,7 @@ void NativeWeakMap::Set(Local<Value> v8_key, Local<Value> v8_value) {
Local<Value> NativeWeakMap::Get(Local<Value> v8_key) const {
i::Handle<i::JSWeakMap> weak_collection = Utils::OpenHandle(this);
i::Isolate* isolate = weak_collection->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::Handle<i::Object> key = Utils::OpenHandle(*v8_key);
if (!key->IsJSReceiver() && !key->IsSymbol()) {
DCHECK(false);
@@ -3228,11 +3256,6 @@ struct ValueSerializer::PrivateData {
i::ValueSerializer serializer;
};
-// static
-uint32_t ValueSerializer::GetCurrentDataFormatVersion() {
- return i::ValueSerializer::GetCurrentDataFormatVersion();
-}
-
ValueSerializer::ValueSerializer(Isolate* isolate)
: ValueSerializer(isolate, nullptr) {}
@@ -4194,6 +4217,18 @@ Local<String> Value::TypeOf(v8::Isolate* external_isolate) {
return Utils::ToLocal(i::Object::TypeOf(isolate, Utils::OpenHandle(this)));
}
+Maybe<bool> Value::InstanceOf(v8::Local<v8::Context> context,
+ v8::Local<v8::Object> object) {
+ PREPARE_FOR_EXECUTION_PRIMITIVE(context, Value, InstanceOf, bool);
+ auto left = Utils::OpenHandle(this);
+ auto right = Utils::OpenHandle(*object);
+ i::Handle<i::Object> result;
+ has_pending_exception =
+ !i::Object::InstanceOf(isolate, left, right).ToHandle(&result);
+ RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
+ return Just(result->IsTrue(isolate));
+}
+
Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context,
v8::Local<Value> key, v8::Local<Value> value) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, Set, bool);
@@ -4538,12 +4573,11 @@ PropertyAttribute v8::Object::GetPropertyAttributes(v8::Local<Value> key) {
.FromMaybe(static_cast<PropertyAttribute>(i::NONE));
}
-
MaybeLocal<Value> v8::Object::GetOwnPropertyDescriptor(Local<Context> context,
- Local<String> key) {
+ Local<Name> key) {
PREPARE_FOR_EXECUTION(context, Object, GetOwnPropertyDescriptor, Value);
i::Handle<i::JSReceiver> obj = Utils::OpenHandle(this);
- i::Handle<i::String> key_name = Utils::OpenHandle(*key);
+ i::Handle<i::Name> key_name = Utils::OpenHandle(*key);
i::PropertyDescriptor desc;
Maybe<bool> found =
@@ -4556,8 +4590,7 @@ MaybeLocal<Value> v8::Object::GetOwnPropertyDescriptor(Local<Context> context,
RETURN_ESCAPED(Utils::ToLocal(desc.ToObject(isolate)));
}
-
-Local<Value> v8::Object::GetOwnPropertyDescriptor(Local<String> key) {
+Local<Value> v8::Object::GetOwnPropertyDescriptor(Local<Name> key) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
RETURN_TO_LOCAL_UNCHECKED(GetOwnPropertyDescriptor(context, key), Value);
}
@@ -4783,21 +4816,20 @@ bool v8::Object::Has(uint32_t index) {
return Has(context, index).FromMaybe(false);
}
-
template <typename Getter, typename Setter, typename Data>
static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
Local<Name> name, Getter getter,
Setter setter, Data data,
AccessControl settings,
- PropertyAttribute attributes) {
+ PropertyAttribute attributes,
+ bool is_special_data_property) {
PREPARE_FOR_EXECUTION_PRIMITIVE(context, Object, SetAccessor, bool);
if (!Utils::OpenHandle(self)->IsJSObject()) return Just(false);
i::Handle<i::JSObject> obj =
i::Handle<i::JSObject>::cast(Utils::OpenHandle(self));
v8::Local<AccessorSignature> signature;
- auto info =
- MakeAccessorInfo(name, getter, setter, data, settings, attributes,
- signature, i::FLAG_disable_old_api_accessors, false);
+ auto info = MakeAccessorInfo(name, getter, setter, data, settings, attributes,
+ signature, is_special_data_property, false);
if (info.is_null()) return Nothing<bool>();
bool fast = obj->HasFastProperties();
i::Handle<i::Object> result;
@@ -4818,7 +4850,8 @@ Maybe<bool> Object::SetAccessor(Local<Context> context, Local<Name> name,
MaybeLocal<Value> data, AccessControl settings,
PropertyAttribute attribute) {
return ObjectSetAccessor(context, this, name, getter, setter,
- data.FromMaybe(Local<Value>()), settings, attribute);
+ data.FromMaybe(Local<Value>()), settings, attribute,
+ i::FLAG_disable_old_api_accessors);
}
@@ -4827,7 +4860,8 @@ bool Object::SetAccessor(Local<String> name, AccessorGetterCallback getter,
AccessControl settings, PropertyAttribute attributes) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
return ObjectSetAccessor(context, this, name, getter, setter, data, settings,
- attributes).FromMaybe(false);
+ attributes, i::FLAG_disable_old_api_accessors)
+ .FromMaybe(false);
}
@@ -4837,7 +4871,8 @@ bool Object::SetAccessor(Local<Name> name, AccessorNameGetterCallback getter,
PropertyAttribute attributes) {
auto context = ContextFromHeapObject(Utils::OpenHandle(this));
return ObjectSetAccessor(context, this, name, getter, setter, data, settings,
- attributes).FromMaybe(false);
+ attributes, i::FLAG_disable_old_api_accessors)
+ .FromMaybe(false);
}
@@ -4848,7 +4883,7 @@ void Object::SetAccessorProperty(Local<Name> name, Local<Function> getter,
// TODO(verwaest): Remove |settings|.
DCHECK_EQ(v8::DEFAULT, settings);
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
auto self = Utils::OpenHandle(this);
if (!self->IsJSObject()) return;
@@ -4860,6 +4895,15 @@ void Object::SetAccessorProperty(Local<Name> name, Local<Function> getter,
static_cast<i::PropertyAttributes>(attribute));
}
+Maybe<bool> Object::SetNativeDataProperty(v8::Local<v8::Context> context,
+ v8::Local<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter,
+ v8::Local<Value> data,
+ PropertyAttribute attributes) {
+ return ObjectSetAccessor(context, this, name, getter, setter, data, DEFAULT,
+ attributes, true);
+}
Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context,
Local<Name> key) {
@@ -5289,7 +5333,7 @@ Local<Value> Function::GetDebugName() const {
Local<Value> Function::GetDisplayName() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
auto self = Utils::OpenHandle(this);
if (!self->IsJSFunction()) {
return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
@@ -5387,7 +5431,6 @@ Local<v8::Value> Function::GetBoundFunction() const {
return v8::Undefined(reinterpret_cast<v8::Isolate*>(self->GetIsolate()));
}
-
int Name::GetIdentityHash() {
auto self = Utils::OpenHandle(this);
return static_cast<int>(self->Hash());
@@ -6322,11 +6365,11 @@ template <>
struct InvokeBootstrapper<i::Context> {
i::Handle<i::Context> Invoke(
i::Isolate* isolate, i::MaybeHandle<i::JSGlobalProxy> maybe_global_proxy,
- v8::Local<v8::ObjectTemplate> global_object_template,
+ v8::Local<v8::ObjectTemplate> global_proxy_template,
v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
v8::DeserializeInternalFieldsCallback embedder_fields_deserializer) {
return isolate->bootstrapper()->CreateEnvironment(
- maybe_global_proxy, global_object_template, extensions,
+ maybe_global_proxy, global_proxy_template, extensions,
context_snapshot_index, embedder_fields_deserializer);
}
};
@@ -6335,13 +6378,13 @@ template <>
struct InvokeBootstrapper<i::JSGlobalProxy> {
i::Handle<i::JSGlobalProxy> Invoke(
i::Isolate* isolate, i::MaybeHandle<i::JSGlobalProxy> maybe_global_proxy,
- v8::Local<v8::ObjectTemplate> global_object_template,
+ v8::Local<v8::ObjectTemplate> global_proxy_template,
v8::ExtensionConfiguration* extensions, size_t context_snapshot_index,
v8::DeserializeInternalFieldsCallback embedder_fields_deserializer) {
USE(extensions);
USE(context_snapshot_index);
return isolate->bootstrapper()->NewRemoteContext(maybe_global_proxy,
- global_object_template);
+ global_proxy_template);
}
};
@@ -6823,7 +6866,7 @@ MaybeLocal<String> String::NewFromTwoByte(Isolate* isolate,
Local<String> v8::String::Concat(Local<String> left, Local<String> right) {
i::Handle<i::String> left_string = Utils::OpenHandle(*left);
i::Isolate* isolate = left_string->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
LOG_API(isolate, String, Concat);
i::Handle<i::String> right_string = Utils::OpenHandle(*right);
// If we are steering towards a range error, do not wait for the error to be
@@ -7604,14 +7647,6 @@ Local<String> WasmCompiledModule::GetWasmWireBytes() {
return Local<String>::Cast(Utils::ToLocal(wire_bytes));
}
-WasmCompiledModule::TransferrableModule&
-WasmCompiledModule::TransferrableModule::operator=(
- TransferrableModule&& src) {
- compiled_code = std::move(src.compiled_code);
- wire_bytes = std::move(src.wire_bytes);
- return *this;
-}
-
// Currently, wasm modules are bound, both to Isolate and to
// the Context they were created in. The currently-supported means to
// decontextualize and then re-contextualize a module is via
@@ -7666,8 +7701,7 @@ MaybeLocal<WasmCompiledModule> WasmCompiledModule::Deserialize(
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::MaybeHandle<i::FixedArray> maybe_compiled_part =
i::WasmCompiledModuleSerializer::DeserializeWasmModule(
- i_isolate, &sc,
- {wire_bytes.first, static_cast<int>(wire_bytes.second)});
+ i_isolate, &sc, {wire_bytes.first, wire_bytes.second});
i::Handle<i::FixedArray> compiled_part;
if (!maybe_compiled_part.ToHandle(&compiled_part)) {
return MaybeLocal<WasmCompiledModule>();
@@ -7726,13 +7760,6 @@ MaybeLocal<WasmCompiledModule> WasmModuleObjectBuilder::Finish() {
return WasmCompiledModule::Compile(isolate_, wire_bytes.get(), total_size_);
}
-WasmModuleObjectBuilder&
-WasmModuleObjectBuilder::operator=(WasmModuleObjectBuilder&& src) {
- received_buffers_ = std::move(src.received_buffers_);
- total_size_ = src.total_size_;
- return *this;
-}
-
// static
v8::ArrayBuffer::Allocator* v8::ArrayBuffer::Allocator::NewDefaultAllocator() {
return new ArrayBufferAllocator();
@@ -8043,34 +8070,28 @@ Local<Symbol> v8::Symbol::ForApi(Isolate* isolate, Local<String> name) {
i_isolate->SymbolFor(i::Heap::kApiSymbolTableRootIndex, i_name, false));
}
+#define WELL_KNOWN_SYMBOLS(V) \
+ V(HasInstance, has_instance) \
+ V(IsConcatSpreadable, is_concat_spreadable) \
+ V(Iterator, iterator) \
+ V(Match, match) \
+ V(Replace, replace) \
+ V(Search, search) \
+ V(Split, split) \
+ V(ToPrimitive, to_primitive) \
+ V(ToStringTag, to_string_tag) \
+ V(Unscopables, unscopables)
-Local<Symbol> v8::Symbol::GetIterator(Isolate* isolate) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- return Utils::ToLocal(i_isolate->factory()->iterator_symbol());
-}
-
-
-Local<Symbol> v8::Symbol::GetUnscopables(Isolate* isolate) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- return Utils::ToLocal(i_isolate->factory()->unscopables_symbol());
-}
-
-Local<Symbol> v8::Symbol::GetToPrimitive(Isolate* isolate) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- return Utils::ToLocal(i_isolate->factory()->to_primitive_symbol());
-}
-
-Local<Symbol> v8::Symbol::GetToStringTag(Isolate* isolate) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- return Utils::ToLocal(i_isolate->factory()->to_string_tag_symbol());
-}
+#define SYMBOL_GETTER(Name, name) \
+ Local<Symbol> v8::Symbol::Get##Name(Isolate* isolate) { \
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate); \
+ return Utils::ToLocal(i_isolate->factory()->name##_symbol()); \
+ }
+WELL_KNOWN_SYMBOLS(SYMBOL_GETTER)
-Local<Symbol> v8::Symbol::GetIsConcatSpreadable(Isolate* isolate) {
- i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- return Utils::ToLocal(i_isolate->factory()->is_concat_spreadable_symbol());
-}
-
+#undef SYMBOL_GETTER
+#undef WELL_KNOWN_SYMBOLS
Local<Private> v8::Private::New(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@@ -8196,7 +8217,7 @@ v8::Local<v8::Context> Isolate::GetEnteredOrMicrotaskContext() {
v8::Local<Value> Isolate::ThrowException(v8::Local<v8::Value> value) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
- ENTER_V8(isolate);
+ ENTER_V8_DO_NOT_USE(isolate);
// If we're passed an empty handle, we throw an undefined exception
// to deal more gracefully with out of memory situations.
if (value.IsEmpty()) {
@@ -8641,7 +8662,7 @@ void Isolate::EnqueueMicrotask(MicrotaskCallback microtask, void* data) {
i::HandleScope scope(isolate);
i::Handle<i::CallHandlerInfo> callback_info =
i::Handle<i::CallHandlerInfo>::cast(
- isolate->factory()->NewStruct(i::CALL_HANDLER_INFO_TYPE));
+ isolate->factory()->NewStruct(i::TUPLE2_TYPE));
SET_FIELD_WRAPPED(callback_info, set_callback, microtask);
SET_FIELD_WRAPPED(callback_info, set_data, data);
isolate->EnqueueMicrotask(callback_info);
@@ -8734,7 +8755,6 @@ bool Isolate::IdleNotificationDeadline(double deadline_in_seconds) {
return isolate->heap()->IdleNotification(deadline_in_seconds);
}
-
void Isolate::LowMemoryNotification() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
{
@@ -8744,6 +8764,15 @@ void Isolate::LowMemoryNotification() {
isolate->heap()->CollectAllAvailableGarbage(
i::GarbageCollectionReason::kLowMemoryNotification);
}
+ {
+ i::HeapIterator iterator(isolate->heap());
+ i::HeapObject* obj;
+ while ((obj = iterator.next()) != nullptr) {
+ if (obj->IsAbstractCode()) {
+ i::AbstractCode::cast(obj)->DropStackFrameCache();
+ }
+ }
+ }
}
@@ -8946,6 +8975,10 @@ void Isolate::VisitWeakHandles(PersistentHandleVisitor* visitor) {
isolate->global_handles()->IterateWeakRootsInNewSpaceWithClassIds(visitor);
}
+void Isolate::SetAllowAtomicsWait(bool allow) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+ isolate->set_allow_atomics_wait(allow);
+}
MicrotasksScope::MicrotasksScope(Isolate* isolate, MicrotasksScope::Type type)
: isolate_(reinterpret_cast<i::Isolate*>(isolate)),
@@ -8999,7 +9032,7 @@ String::Utf8Value::Utf8Value(v8::Local<v8::Value> obj)
if (obj.IsEmpty()) return;
i::Isolate* isolate = i::Isolate::Current();
Isolate* v8_isolate = reinterpret_cast<Isolate*>(isolate);
- ENTER_V8(isolate);
+ ENTER_V8_DO_NOT_USE(isolate);
i::HandleScope scope(isolate);
Local<Context> context = v8_isolate->GetCurrentContext();
TryCatch try_catch(v8_isolate);
@@ -9021,7 +9054,7 @@ String::Value::Value(v8::Local<v8::Value> obj) : str_(NULL), length_(0) {
if (obj.IsEmpty()) return;
i::Isolate* isolate = i::Isolate::Current();
Isolate* v8_isolate = reinterpret_cast<Isolate*>(isolate);
- ENTER_V8(isolate);
+ ENTER_V8_DO_NOT_USE(isolate);
i::HandleScope scope(isolate);
Local<Context> context = v8_isolate->GetCurrentContext();
TryCatch try_catch(v8_isolate);
@@ -9066,7 +9099,7 @@ Local<Message> Exception::CreateMessage(Isolate* isolate,
Local<Value> exception) {
i::Handle<i::Object> obj = Utils::OpenHandle(*exception);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::HandleScope scope(i_isolate);
return Utils::MessageToLocal(
scope.CloseAndEscape(i_isolate->CreateMessage(obj, NULL)));
@@ -9086,7 +9119,7 @@ Local<StackTrace> Exception::GetStackTrace(Local<Value> exception) {
if (!obj->IsJSObject()) return Local<StackTrace>();
i::Handle<i::JSObject> js_obj = i::Handle<i::JSObject>::cast(obj);
i::Isolate* isolate = js_obj->GetIsolate();
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
return Utils::StackTraceToLocal(isolate->GetDetailedStackTrace(js_obj));
}
@@ -9096,11 +9129,14 @@ Local<StackTrace> Exception::GetStackTrace(Local<Value> exception) {
bool Debug::SetDebugEventListener(Isolate* isolate, EventCallback that,
Local<Value> data) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ENTER_V8(i_isolate);
- i::HandleScope scope(i_isolate);
if (that == nullptr) {
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
+ i::HandleScope scope(i_isolate);
i_isolate->debug()->SetDebugDelegate(nullptr, false);
} else {
+ // Might create the Debug context.
+ ENTER_V8_FOR_NEW_CONTEXT(i_isolate);
+ i::HandleScope scope(i_isolate);
i::Handle<i::Object> i_data = i_isolate->factory()->undefined_value();
if (!data.IsEmpty()) i_data = Utils::OpenHandle(*data);
i::NativeDebugDelegate* delegate =
@@ -9141,7 +9177,7 @@ Local<Context> Debug::GetDebugContext(Isolate* isolate) {
MaybeLocal<Context> Debug::GetDebuggedContext(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
if (!i_isolate->debug()->in_debug_scope()) return MaybeLocal<Context>();
i::Handle<i::Object> calling = i_isolate->GetCallingNativeContext();
if (calling.is_null()) return MaybeLocal<Context>();
@@ -9167,9 +9203,18 @@ MaybeLocal<Array> Debug::GetInternalProperties(Isolate* v8_isolate,
return debug::GetInternalProperties(v8_isolate, value);
}
+void debug::SetContextId(Local<Context> context, int id) {
+ Utils::OpenHandle(*context)->set_debug_context_id(i::Smi::FromInt(id));
+}
+
+int debug::GetContextId(Local<Context> context) {
+ i::Object* value = Utils::OpenHandle(*context)->debug_context_id();
+ return (value->IsSmi()) ? i::Smi::cast(value)->value() : 0;
+}
+
Local<Context> debug::GetDebugContext(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
return Utils::ToLocal(i_isolate->debug()->GetDebugContext());
}
@@ -9207,7 +9252,7 @@ void debug::CancelDebugBreak(Isolate* isolate) {
MaybeLocal<Array> debug::GetInternalProperties(Isolate* v8_isolate,
Local<Value> value) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::Handle<i::Object> val = Utils::OpenHandle(*value);
i::Handle<i::JSArray> result;
if (!i::Runtime::GetInternalProperties(isolate, val).ToHandle(&result))
@@ -9225,20 +9270,20 @@ void debug::ChangeBreakOnException(Isolate* isolate, ExceptionBreakState type) {
void debug::SetBreakPointsActive(Isolate* v8_isolate, bool is_active) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
isolate->debug()->set_break_points_active(is_active);
}
void debug::SetOutOfMemoryCallback(Isolate* isolate,
OutOfMemoryCallback callback, void* data) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- ENTER_V8(i_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i_isolate->heap()->SetOutOfMemoryCallback(callback, data);
}
void debug::PrepareStep(Isolate* v8_isolate, StepAction action) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8(isolate);
+ ENTER_V8_DO_NOT_USE(isolate);
CHECK(isolate->debug()->CheckExecutionState());
// Clear all current stepping setup.
isolate->debug()->ClearStepping();
@@ -9248,20 +9293,20 @@ void debug::PrepareStep(Isolate* v8_isolate, StepAction action) {
void debug::ClearStepping(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
// Clear all current stepping setup.
isolate->debug()->ClearStepping();
}
void debug::BreakRightNow(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8(isolate);
+ ENTER_V8_DO_NOT_USE(isolate);
isolate->debug()->HandleDebugBreak(i::kIgnoreIfAllFramesBlackboxed);
}
bool debug::AllFramesOnStackAreBlackboxed(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8(isolate);
+ ENTER_V8_DO_NOT_USE(isolate);
return isolate->debug()->AllFramesOnStackAreBlackboxed();
}
@@ -9278,6 +9323,11 @@ bool debug::Script::WasCompiled() const {
i::Script::COMPILATION_STATE_COMPILED;
}
+bool debug::Script::IsEmbedded() const {
+ i::Handle<i::Script> script = Utils::OpenHandle(this);
+ return script->context_data() == script->GetHeap()->uninitialized_symbol();
+}
+
int debug::Script::Id() const { return Utils::OpenHandle(this)->id(); }
int debug::Script::LineOffset() const {
@@ -9334,12 +9384,13 @@ MaybeLocal<String> debug::Script::SourceMappingURL() const {
handle_scope.CloseAndEscape(i::Handle<i::String>::cast(value)));
}
-MaybeLocal<Value> debug::Script::ContextData() const {
+Maybe<int> debug::Script::ContextId() const {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
i::HandleScope handle_scope(isolate);
i::Handle<i::Script> script = Utils::OpenHandle(this);
- i::Handle<i::Object> value(script->context_data(), isolate);
- return Utils::ToLocal(handle_scope.CloseAndEscape(value));
+ i::Object* value = script->context_data();
+ if (value->IsSmi()) return Just(i::Smi::cast(value)->value());
+ return Nothing<int>();
}
MaybeLocal<String> debug::Script::Source() const {
@@ -9541,7 +9592,7 @@ bool debug::Location::IsEmpty() const {
void debug::GetLoadedScripts(v8::Isolate* v8_isolate,
PersistentValueVector<debug::Script>& scripts) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
// TODO(kozyatinskiy): remove this GC once tests are dealt with.
isolate->heap()->CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask,
i::GarbageCollectionReason::kDebugger);
@@ -9584,14 +9635,15 @@ MaybeLocal<UnboundScript> debug::CompileInspectorScript(Isolate* v8_isolate,
void debug::SetDebugDelegate(Isolate* v8_isolate,
debug::DebugDelegate* delegate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8(isolate);
+ // Might create the Debug context.
+ ENTER_V8_FOR_NEW_CONTEXT(isolate);
isolate->debug()->SetDebugDelegate(delegate, false);
}
void debug::ResetBlackboxedStateCache(Isolate* v8_isolate,
v8::Local<debug::Script> script) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::DisallowHeapAllocation no_gc;
i::SharedFunctionInfo::ScriptIterator iter(Utils::OpenHandle(*script));
while (i::SharedFunctionInfo* info = iter.Next()) {
@@ -9601,7 +9653,7 @@ void debug::ResetBlackboxedStateCache(Isolate* v8_isolate,
int debug::EstimatedValueSize(Isolate* v8_isolate, v8::Local<v8::Value> value) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::Handle<i::Object> object = Utils::OpenHandle(*value);
if (object->IsSmi()) return i::kPointerSize;
CHECK(object->IsHeapObject());
@@ -9612,7 +9664,7 @@ v8::MaybeLocal<v8::Array> debug::EntriesPreview(Isolate* v8_isolate,
v8::Local<v8::Value> value,
bool* is_key_value) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
if (value->IsMap()) {
*is_key_value = true;
return value.As<Map>()->AsArray();
@@ -9650,7 +9702,7 @@ v8::MaybeLocal<v8::Array> debug::EntriesPreview(Isolate* v8_isolate,
Local<Function> debug::GetBuiltin(Isolate* v8_isolate, Builtin builtin) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- ENTER_V8(isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope handle_scope(isolate);
i::Builtins::Name name;
switch (builtin) {
@@ -9678,6 +9730,26 @@ Local<Function> debug::GetBuiltin(Isolate* v8_isolate, Builtin builtin) {
return Utils::ToLocal(handle_scope.CloseAndEscape(fun));
}
+void debug::SetConsoleDelegate(Isolate* v8_isolate, ConsoleDelegate* delegate) {
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+ ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
+ isolate->set_console_delegate(delegate);
+}
+
+debug::ConsoleCallArguments::ConsoleCallArguments(
+ const v8::FunctionCallbackInfo<v8::Value>& info)
+ : v8::FunctionCallbackInfo<v8::Value>(nullptr, info.values_, info.length_) {
+}
+
+debug::ConsoleCallArguments::ConsoleCallArguments(
+ internal::BuiltinArguments& args)
+ : v8::FunctionCallbackInfo<v8::Value>(nullptr, &args[0] - 1,
+ args.length() - 1) {}
+
+int debug::GetStackFrameId(v8::Local<v8::StackFrame> frame) {
+ return Utils::OpenHandle(*frame)->id();
+}
+
MaybeLocal<debug::Script> debug::GeneratorObject::Script() {
i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
i::Object* maybe_script = obj->function()->shared()->script();
@@ -10297,8 +10369,7 @@ char* HandleScopeImplementer::RestoreThread(char* storage) {
return storage + ArchiveSpacePerThread();
}
-
-void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
+void HandleScopeImplementer::IterateThis(RootVisitor* v) {
#ifdef DEBUG
bool found_block_before_deferred = false;
#endif
@@ -10308,13 +10379,14 @@ void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
if (last_handle_before_deferred_block_ != NULL &&
(last_handle_before_deferred_block_ <= &block[kHandleBlockSize]) &&
(last_handle_before_deferred_block_ >= block)) {
- v->VisitPointers(block, last_handle_before_deferred_block_);
+ v->VisitRootPointers(Root::kHandleScope, block,
+ last_handle_before_deferred_block_);
DCHECK(!found_block_before_deferred);
#ifdef DEBUG
found_block_before_deferred = true;
#endif
} else {
- v->VisitPointers(block, &block[kHandleBlockSize]);
+ v->VisitRootPointers(Root::kHandleScope, block, &block[kHandleBlockSize]);
}
}
@@ -10323,30 +10395,30 @@ void HandleScopeImplementer::IterateThis(ObjectVisitor* v) {
// Iterate over live handles in the last block (if any).
if (!blocks()->is_empty()) {
- v->VisitPointers(blocks()->last(), handle_scope_data_.next);
+ v->VisitRootPointers(Root::kHandleScope, blocks()->last(),
+ handle_scope_data_.next);
}
List<Context*>* context_lists[2] = { &saved_contexts_, &entered_contexts_};
for (unsigned i = 0; i < arraysize(context_lists); i++) {
if (context_lists[i]->is_empty()) continue;
Object** start = reinterpret_cast<Object**>(&context_lists[i]->first());
- v->VisitPointers(start, start + context_lists[i]->length());
+ v->VisitRootPointers(Root::kHandleScope, start,
+ start + context_lists[i]->length());
}
if (microtask_context_) {
- Object** start = reinterpret_cast<Object**>(&microtask_context_);
- v->VisitPointers(start, start + 1);
+ v->VisitRootPointer(Root::kHandleScope,
+ reinterpret_cast<Object**>(&microtask_context_));
}
}
-
-void HandleScopeImplementer::Iterate(ObjectVisitor* v) {
+void HandleScopeImplementer::Iterate(RootVisitor* v) {
HandleScopeData* current = isolate_->handle_scope_data();
handle_scope_data_ = *current;
IterateThis(v);
}
-
-char* HandleScopeImplementer::Iterate(ObjectVisitor* v, char* storage) {
+char* HandleScopeImplementer::Iterate(RootVisitor* v, char* storage) {
HandleScopeImplementer* scope_implementer =
reinterpret_cast<HandleScopeImplementer*>(storage);
scope_implementer->IterateThis(v);
@@ -10399,17 +10471,17 @@ DeferredHandles::~DeferredHandles() {
}
}
-
-void DeferredHandles::Iterate(ObjectVisitor* v) {
+void DeferredHandles::Iterate(RootVisitor* v) {
DCHECK(!blocks_.is_empty());
DCHECK((first_block_limit_ >= blocks_.first()) &&
(first_block_limit_ <= &(blocks_.first())[kHandleBlockSize]));
- v->VisitPointers(blocks_.first(), first_block_limit_);
+ v->VisitRootPointers(Root::kHandleScope, blocks_.first(), first_block_limit_);
for (int i = 1; i < blocks_.length(); i++) {
- v->VisitPointers(blocks_[i], &blocks_[i][kHandleBlockSize]);
+ v->VisitRootPointers(Root::kHandleScope, blocks_[i],
+ &blocks_[i][kHandleBlockSize]);
}
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index dcb51c8833..3b97e04fb2 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -105,7 +105,7 @@ class RegisteredExtension {
V(Message, JSMessageObject) \
V(Context, Context) \
V(External, Object) \
- V(StackTrace, JSArray) \
+ V(StackTrace, FixedArray) \
V(StackFrame, StackFrameInfo) \
V(Proxy, JSProxy) \
V(NativeWeakMap, JSWeakMap) \
@@ -189,7 +189,7 @@ class Utils {
static inline Local<DynamicImportResult> PromiseToDynamicImportResult(
v8::internal::Handle<v8::internal::JSPromise> obj);
static inline Local<StackTrace> StackTraceToLocal(
- v8::internal::Handle<v8::internal::JSArray> obj);
+ v8::internal::Handle<v8::internal::FixedArray> obj);
static inline Local<StackFrame> StackFrameToLocal(
v8::internal::Handle<v8::internal::StackFrameInfo> obj);
static inline Local<Number> NumberToLocal(
@@ -321,7 +321,7 @@ MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature)
MAKE_TO_LOCAL(MessageToLocal, Object, Message)
MAKE_TO_LOCAL(PromiseToLocal, JSObject, Promise)
MAKE_TO_LOCAL(PromiseToDynamicImportResult, JSPromise, DynamicImportResult)
-MAKE_TO_LOCAL(StackTraceToLocal, JSArray, StackTrace)
+MAKE_TO_LOCAL(StackTraceToLocal, FixedArray, StackTrace)
MAKE_TO_LOCAL(StackFrameToLocal, StackFrameInfo, StackFrame)
MAKE_TO_LOCAL(NumberToLocal, Object, Number)
MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
@@ -369,7 +369,7 @@ class V8_EXPORT_PRIVATE DeferredHandles {
isolate->LinkDeferredHandles(this);
}
- void Iterate(ObjectVisitor* v);
+ void Iterate(RootVisitor* v);
List<Object**> blocks_;
DeferredHandles* next_;
@@ -421,9 +421,8 @@ class HandleScopeImplementer {
void FreeThreadResources();
// Garbage collection support.
- void Iterate(v8::internal::ObjectVisitor* v);
- static char* Iterate(v8::internal::ObjectVisitor* v, char* data);
-
+ void Iterate(v8::internal::RootVisitor* v);
+ static char* Iterate(v8::internal::RootVisitor* v, char* data);
inline internal::Object** GetSpareOrNewBlock();
inline void DeleteExtensions(internal::Object** prev_limit);
@@ -538,7 +537,7 @@ class HandleScopeImplementer {
// This is only used for threading support.
HandleScopeData handle_scope_data_;
- void IterateThis(ObjectVisitor* v);
+ void IterateThis(RootVisitor* v);
char* RestoreThreadHelper(char* from);
char* ArchiveThreadHelper(char* to);
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 50b5ea2053..b5a59bb476 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -75,11 +75,7 @@ Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
- if (FLAG_enable_embedded_constant_pool ||
- Assembler::IsMovW(Memory::int32_at(pc_))) {
- // We return the PC for embedded constant pool since this function is used
- // by the serializer and expects the address to reside within the code
- // object.
+ if (Assembler::IsMovW(Memory::int32_at(pc_))) {
return reinterpret_cast<Address>(pc_);
} else {
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
@@ -238,22 +234,22 @@ template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
+ visitor->VisitEmbeddedPointer(host(), this);
} else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
+ visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::CELL) {
- visitor->VisitCell(this);
+ visitor->VisitCellPointer(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(this);
+ visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
- visitor->VisitInternalReference(this);
+ visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
+ visitor->VisitCodeAgeSequence(host(), this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
- visitor->VisitDebugTarget(this);
+ visitor->VisitDebugTarget(host(), this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(this);
+ visitor->VisitRuntimeEntry(host(), this);
}
}
@@ -344,32 +340,14 @@ Address Assembler::target_address_from_return_address(Address pc) {
// @ return address
// In cases that need frequent patching, the address is in the
// constant pool. It could be a small constant pool load:
- // ldr ip, [pc / pp, #...] @ call address
- // blx ip
- // @ return address
- // Or an extended constant pool load (ARMv7):
- // movw ip, #...
- // movt ip, #...
- // ldr ip, [pc, ip] @ call address
- // blx ip
- // @ return address
- // Or an extended constant pool load (ARMv6):
- // mov ip, #...
- // orr ip, ip, #...
- // orr ip, ip, #...
- // orr ip, ip, #...
- // ldr ip, [pc, ip] @ call address
+ // ldr ip, [pc, #...] @ call address
// blx ip
// @ return address
Address candidate = pc - 2 * Assembler::kInstrSize;
Instr candidate_instr(Memory::int32_at(candidate));
- if (IsLdrPcImmediateOffset(candidate_instr) |
- IsLdrPpImmediateOffset(candidate_instr)) {
+ if (IsLdrPcImmediateOffset(candidate_instr)) {
return candidate;
} else {
- if (IsLdrPpRegOffset(candidate_instr)) {
- candidate -= Assembler::kInstrSize;
- }
if (CpuFeatures::IsSupported(ARMv7)) {
candidate -= 1 * Assembler::kInstrSize;
DCHECK(IsMovW(Memory::int32_at(candidate)) &&
@@ -388,33 +366,22 @@ Address Assembler::target_address_from_return_address(Address pc) {
Address Assembler::return_address_from_call_start(Address pc) {
- if (IsLdrPcImmediateOffset(Memory::int32_at(pc)) |
- IsLdrPpImmediateOffset(Memory::int32_at(pc))) {
+ if (IsLdrPcImmediateOffset(Memory::int32_at(pc))) {
// Load from constant pool, small section.
return pc + kInstrSize * 2;
} else {
if (CpuFeatures::IsSupported(ARMv7)) {
DCHECK(IsMovW(Memory::int32_at(pc)));
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
- if (IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize))) {
- // Load from constant pool, extended section.
- return pc + kInstrSize * 4;
- } else {
- // A movw / movt load immediate.
- return pc + kInstrSize * 3;
- }
+ // A movw / movt load immediate.
+ return pc + kInstrSize * 3;
} else {
DCHECK(IsMovImmed(Memory::int32_at(pc)));
DCHECK(IsOrrImmed(Memory::int32_at(pc + kInstrSize)));
DCHECK(IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)));
DCHECK(IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
- if (IsLdrPpRegOffset(Memory::int32_at(pc + 4 * kInstrSize))) {
- // Load from constant pool, extended section.
- return pc + kInstrSize * 6;
- } else {
- // A mov / orr load immediate.
- return pc + kInstrSize * 5;
- }
+ // A mov / orr load immediate.
+ return pc + kInstrSize * 5;
}
}
}
@@ -422,11 +389,7 @@ Address Assembler::return_address_from_call_start(Address pc) {
void Assembler::deserialization_set_special_target_at(
Isolate* isolate, Address constant_pool_entry, Code* code, Address target) {
- if (FLAG_enable_embedded_constant_pool) {
- set_target_address_at(isolate, constant_pool_entry, code, target);
- } else {
- Memory::Address_at(constant_pool_entry) = target;
- }
+ Memory::Address_at(constant_pool_entry) = target;
}
@@ -438,55 +401,18 @@ void Assembler::deserialization_set_target_internal_reference_at(
bool Assembler::is_constant_pool_load(Address pc) {
if (CpuFeatures::IsSupported(ARMv7)) {
- return !Assembler::IsMovW(Memory::int32_at(pc)) ||
- (FLAG_enable_embedded_constant_pool &&
- Assembler::IsLdrPpRegOffset(
- Memory::int32_at(pc + 2 * Assembler::kInstrSize)));
+ return !Assembler::IsMovW(Memory::int32_at(pc));
} else {
- return !Assembler::IsMovImmed(Memory::int32_at(pc)) ||
- (FLAG_enable_embedded_constant_pool &&
- Assembler::IsLdrPpRegOffset(
- Memory::int32_at(pc + 4 * Assembler::kInstrSize)));
+ return !Assembler::IsMovImmed(Memory::int32_at(pc));
}
}
Address Assembler::constant_pool_entry_address(Address pc,
Address constant_pool) {
- if (FLAG_enable_embedded_constant_pool) {
- DCHECK(constant_pool != NULL);
- int cp_offset;
- if (!CpuFeatures::IsSupported(ARMv7) && IsMovImmed(Memory::int32_at(pc))) {
- DCHECK(IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
- IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
- IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)) &&
- IsLdrPpRegOffset(Memory::int32_at(pc + 4 * kInstrSize)));
- // This is an extended constant pool lookup (ARMv6).
- Instr mov_instr = instr_at(pc);
- Instr orr_instr_1 = instr_at(pc + kInstrSize);
- Instr orr_instr_2 = instr_at(pc + 2 * kInstrSize);
- Instr orr_instr_3 = instr_at(pc + 3 * kInstrSize);
- cp_offset = DecodeShiftImm(mov_instr) | DecodeShiftImm(orr_instr_1) |
- DecodeShiftImm(orr_instr_2) | DecodeShiftImm(orr_instr_3);
- } else if (IsMovW(Memory::int32_at(pc))) {
- DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)) &&
- IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize)));
- // This is an extended constant pool lookup (ARMv7).
- Instruction* movw_instr = Instruction::At(pc);
- Instruction* movt_instr = Instruction::At(pc + kInstrSize);
- cp_offset = (movt_instr->ImmedMovwMovtValue() << 16) |
- movw_instr->ImmedMovwMovtValue();
- } else {
- // This is a small constant pool lookup.
- DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc)));
- cp_offset = GetLdrRegisterImmediateOffset(Memory::int32_at(pc));
- }
- return constant_pool + cp_offset;
- } else {
- DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc)));
- Instr instr = Memory::int32_at(pc);
- return pc + GetLdrRegisterImmediateOffset(instr) + kPcLoadDelta;
- }
+ DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc)));
+ Instr instr = Memory::int32_at(pc);
+ return pc + GetLdrRegisterImmediateOffset(instr) + kPcLoadDelta;
}
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 2a7f68c07c..6932e97379 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -327,10 +327,9 @@ const int RelocInfo::kApplyMask = 0;
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded.  Being
- // specially coded on ARM means that it is a movw/movt instruction, or is an
- // embedded constant pool entry.  These only occur if
- // FLAG_enable_embedded_constant_pool is true.
- return FLAG_enable_embedded_constant_pool;
+ // specially coded on ARM means that it is a movw/movt instruction. We don't
+ // generate those for relocatable pointers.
+ return false;
}
@@ -503,18 +502,9 @@ const Instr kPopRegPattern =
// ldr rd, [pc, #offset]
const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
const Instr kLdrPCImmedPattern = 5 * B24 | L | Register::kCode_pc * B16;
-// ldr rd, [pp, #offset]
-const Instr kLdrPpImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPpImmedPattern = 5 * B24 | L | Register::kCode_r8 * B16;
-// ldr rd, [pp, rn]
-const Instr kLdrPpRegMask = 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPpRegPattern = 7 * B24 | L | Register::kCode_r8 * B16;
// vldr dd, [pc, #offset]
const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
const Instr kVldrDPCPattern = 13 * B24 | L | Register::kCode_pc * B16 | 11 * B8;
-// vldr dd, [pp, #offset]
-const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
-const Instr kVldrDPpPattern = 13 * B24 | L | Register::kCode_r8 * B16 | 11 * B8;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
@@ -554,8 +544,7 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
: AssemblerBase(isolate_data, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
pending_32_bit_constants_(),
- pending_64_bit_constants_(),
- constant_pool_builder_(kLdrMaxReachBits, kVldrMaxReachBits) {
+ pending_64_bit_constants_() {
pending_32_bit_constants_.reserve(kMinNumPendingConstants);
pending_64_bit_constants_.reserve(kMinNumPendingConstants);
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
@@ -583,13 +572,9 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
// Emit constant pool if necessary.
int constant_pool_offset = 0;
- if (FLAG_enable_embedded_constant_pool) {
- constant_pool_offset = EmitEmbeddedConstantPool();
- } else {
- CheckConstPool(true, false);
- DCHECK(pending_32_bit_constants_.empty());
- DCHECK(pending_64_bit_constants_.empty());
- }
+ CheckConstPool(true, false);
+ DCHECK(pending_32_bit_constants_.empty());
+ DCHECK(pending_64_bit_constants_.empty());
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@@ -740,24 +725,6 @@ Register Assembler::GetRm(Instr instr) {
}
-Instr Assembler::GetConsantPoolLoadPattern() {
- if (FLAG_enable_embedded_constant_pool) {
- return kLdrPpImmedPattern;
- } else {
- return kLdrPCImmedPattern;
- }
-}
-
-
-Instr Assembler::GetConsantPoolLoadMask() {
- if (FLAG_enable_embedded_constant_pool) {
- return kLdrPpImmedMask;
- } else {
- return kLdrPCImmedMask;
- }
-}
-
-
bool Assembler::IsPush(Instr instr) {
return ((instr & ~kRdMask) == kPushRegPattern);
}
@@ -795,23 +762,6 @@ bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
}
-bool Assembler::IsLdrPpImmediateOffset(Instr instr) {
- // Check the instruction is indeed a
- // ldr<cond> <Rd>, [pp +/- offset_12].
- return (instr & kLdrPpImmedMask) == kLdrPpImmedPattern;
-}
-
-
-bool Assembler::IsLdrPpRegOffset(Instr instr) {
- // Check the instruction is indeed a
- // ldr<cond> <Rd>, [pp, +/- <Rm>].
- return (instr & kLdrPpRegMask) == kLdrPpRegPattern;
-}
-
-
-Instr Assembler::GetLdrPpRegOffsetPattern() { return kLdrPpRegPattern; }
-
-
bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
// Check the instruction is indeed a
// vldr<cond> <Dd>, [pc +/- offset_10].
@@ -819,13 +769,6 @@ bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
}
-bool Assembler::IsVldrDPpImmediateOffset(Instr instr) {
- // Check the instruction is indeed a
- // vldr<cond> <Dd>, [pp +/- offset_10].
- return (instr & kVldrDPpMask) == kVldrDPpPattern;
-}
-
-
bool Assembler::IsBlxReg(Instr instr) {
// Check the instruction is indeed a
// blxcc <Rm>
@@ -1169,10 +1112,7 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
static bool use_mov_immediate_load(const Operand& x,
const Assembler* assembler) {
DCHECK(assembler != nullptr);
- if (FLAG_enable_embedded_constant_pool &&
- !assembler->is_constant_pool_available()) {
- return true;
- } else if (x.must_output_reloc_info(assembler)) {
+ if (x.must_output_reloc_info(assembler)) {
// Prefer constant pool if data is likely to be patched.
return false;
} else {
@@ -1196,14 +1136,10 @@ int Operand::instructions_required(const Assembler* assembler,
if (use_mov_immediate_load(*this, assembler)) {
// A movw / movt or mov / orr immediate load.
instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4;
- } else if (assembler->ConstantPoolAccessIsInOverflow()) {
- // An overflowed constant pool load.
- instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5;
} else {
// A small constant pool load.
instructions = 1;
}
-
if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set
// For a mov or mvn instruction which doesn't set the condition
// code, the constant pool or immediate load is enough, otherwise we need
@@ -1228,51 +1164,25 @@ void Assembler::move_32_bit_immediate(Register rd,
}
if (use_mov_immediate_load(x, this)) {
+ // use_mov_immediate_load should return false when we need to output
+ // relocation info, since we prefer the constant pool for values that
+ // can be patched.
+ DCHECK(!x.must_output_reloc_info(this));
Register target = rd.code() == pc.code() ? ip : rd;
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(this, ARMv7);
- if (!FLAG_enable_embedded_constant_pool &&
- x.must_output_reloc_info(this)) {
- // Make sure the movw/movt doesn't get separated.
- BlockConstPoolFor(2);
- }
movw(target, imm32 & 0xffff, cond);
movt(target, imm32 >> 16, cond);
- } else {
- DCHECK(FLAG_enable_embedded_constant_pool);
- mov(target, Operand(imm32 & kImm8Mask), LeaveCC, cond);
- orr(target, target, Operand(imm32 & (kImm8Mask << 8)), LeaveCC, cond);
- orr(target, target, Operand(imm32 & (kImm8Mask << 16)), LeaveCC, cond);
- orr(target, target, Operand(imm32 & (kImm8Mask << 24)), LeaveCC, cond);
}
if (target.code() != rd.code()) {
mov(rd, target, LeaveCC, cond);
}
} else {
- DCHECK(!FLAG_enable_embedded_constant_pool || is_constant_pool_available());
ConstantPoolEntry::Access access =
ConstantPoolAddEntry(pc_offset(), x.rmode_, x.imm32_);
- if (access == ConstantPoolEntry::OVERFLOWED) {
- DCHECK(FLAG_enable_embedded_constant_pool);
- Register target = rd.code() == pc.code() ? ip : rd;
- // Emit instructions to load constant pool offset.
- if (CpuFeatures::IsSupported(ARMv7)) {
- CpuFeatureScope scope(this, ARMv7);
- movw(target, 0, cond);
- movt(target, 0, cond);
- } else {
- mov(target, Operand(0), LeaveCC, cond);
- orr(target, target, Operand(0), LeaveCC, cond);
- orr(target, target, Operand(0), LeaveCC, cond);
- orr(target, target, Operand(0), LeaveCC, cond);
- }
- // Load from constant pool at offset.
- ldr(rd, MemOperand(pp, target), cond);
- } else {
- DCHECK(access == ConstantPoolEntry::REGULAR);
- ldr(rd, MemOperand(FLAG_enable_embedded_constant_pool ? pp : pc, 0),
- cond);
- }
+ DCHECK(access == ConstantPoolEntry::REGULAR);
+ USE(access);
+ ldr(rd, MemOperand(pc, 0), cond);
}
}
@@ -2787,12 +2697,6 @@ void Assembler::vmov(const DwVfpRegister dst,
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(!scratch.is(ip));
uint32_t enc;
- // If the embedded constant pool is disabled, we can use the normal, inline
- // constant pool. If the embedded constant pool is enabled (via
- // FLAG_enable_embedded_constant_pool), we can only use it where the pool
- // pointer (pp) is valid.
- bool can_use_pool =
- !FLAG_enable_embedded_constant_pool || is_constant_pool_available();
if (CpuFeatures::IsSupported(VFPv3) && FitsVmovFPImmediate(imm, &enc)) {
CpuFeatureScope scope(this, VFPv3);
// The double can be encoded in the instruction.
@@ -2804,8 +2708,7 @@ void Assembler::vmov(const DwVfpRegister dst,
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
- } else if (CpuFeatures::IsSupported(ARMv7) && FLAG_enable_vldr_imm &&
- can_use_pool) {
+ } else if (CpuFeatures::IsSupported(ARMv7) && FLAG_enable_vldr_imm) {
CpuFeatureScope scope(this, ARMv7);
// TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise control
@@ -2823,17 +2726,9 @@ void Assembler::vmov(const DwVfpRegister dst,
// that's tricky because vldr has a limited reach. Furthermore
// it breaks load locality.
ConstantPoolEntry::Access access = ConstantPoolAddEntry(pc_offset(), imm);
- if (access == ConstantPoolEntry::OVERFLOWED) {
- DCHECK(FLAG_enable_embedded_constant_pool);
- // Emit instructions to load constant pool offset.
- movw(ip, 0);
- movt(ip, 0);
- // Load from constant pool at offset.
- vldr(dst, MemOperand(pp, ip));
- } else {
- DCHECK(access == ConstantPoolEntry::REGULAR);
- vldr(dst, MemOperand(FLAG_enable_embedded_constant_pool ? pp : pc, 0));
- }
+ DCHECK(access == ConstantPoolEntry::REGULAR);
+ USE(access);
+ vldr(dst, MemOperand(pc, 0));
} else {
// Synthesise the double from ARM immediates.
uint32_t lo, hi;
@@ -4015,19 +3910,47 @@ void Assembler::vdup(NeonSize size, QwNeonRegister dst, Register src) {
0xB * B8 | d * B7 | E * B5 | B4);
}
-void Assembler::vdup(QwNeonRegister dst, SwVfpRegister src) {
- DCHECK(IsEnabled(NEON));
- // Instruction details available in ARM DDI 0406C.b, A8-884.
- int index = src.code() & 1;
- int d_reg = src.code() / 2;
- int imm4 = 4 | index << 3; // esize = 32, index in bit 3.
+enum NeonRegType { NEON_D, NEON_Q };
+
+void NeonSplitCode(NeonRegType type, int code, int* vm, int* m, int* encoding) {
+ if (type == NEON_D) {
+ DwVfpRegister::split_code(code, vm, m);
+ } else {
+ DCHECK_EQ(type, NEON_Q);
+ QwNeonRegister::split_code(code, vm, m);
+ *encoding |= B6;
+ }
+}
+
+static Instr EncodeNeonDupOp(NeonSize size, NeonRegType reg_type, int dst_code,
+ DwVfpRegister src, int index) {
+ DCHECK_NE(Neon64, size);
+ int sz = static_cast<int>(size);
+ DCHECK_LE(0, index);
+ DCHECK_GT(kSimd128Size / (1 << sz), index);
+ int imm4 = (1 << sz) | ((index << (sz + 1)) & 0xF);
+ int qbit = 0;
int vd, d;
- dst.split_code(&vd, &d);
+ NeonSplitCode(reg_type, dst_code, &vd, &d, &qbit);
int vm, m;
- DwVfpRegister::from_code(d_reg).split_code(&vm, &m);
+ src.split_code(&vm, &m);
- emit(0x1E7U * B23 | d * B22 | 0x3 * B20 | imm4 * B16 | vd * B12 | 0x18 * B7 |
- B6 | m * B5 | vm);
+ return 0x1E7U * B23 | d * B22 | 0x3 * B20 | imm4 * B16 | vd * B12 |
+ 0x18 * B7 | qbit | m * B5 | vm;
+}
+
+void Assembler::vdup(NeonSize size, DwVfpRegister dst, DwVfpRegister src,
+ int index) {
+ DCHECK(IsEnabled(NEON));
+ // Instruction details available in ARM DDI 0406C.b, A8-884.
+ emit(EncodeNeonDupOp(size, NEON_D, dst.code(), src, index));
+}
+
+void Assembler::vdup(NeonSize size, QwNeonRegister dst, DwVfpRegister src,
+ int index) {
+ // Instruction details available in ARM DDI 0406C.b, A8-884.
+ DCHECK(IsEnabled(NEON));
+ emit(EncodeNeonDupOp(size, NEON_Q, dst.code(), src, index));
}
// Encode NEON vcvt.src_type.dst_type instruction.
@@ -4082,18 +4005,6 @@ void Assembler::vcvt_u32_f32(QwNeonRegister dst, QwNeonRegister src) {
emit(EncodeNeonVCVT(U32, dst, F32, src));
}
-enum NeonRegType { NEON_D, NEON_Q };
-
-void NeonSplitCode(NeonRegType type, int code, int* vm, int* m, int* encoding) {
- if (type == NEON_D) {
- DwVfpRegister::split_code(code, vm, m);
- } else {
- DCHECK_EQ(type, NEON_Q);
- QwNeonRegister::split_code(code, vm, m);
- *encoding |= B6;
- }
-}
-
enum UnaryOp { VMVN, VSWP, VABS, VABSF, VNEG, VNEGF };
static Instr EncodeNeonUnaryOp(UnaryOp op, NeonRegType reg_type, NeonSize size,
@@ -4508,30 +4419,55 @@ void Assembler::vmax(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
emit(EncodeNeonBinOp(VMAX, dt, dst, src1, src2));
}
-enum NeonShiftOp { VSHL, VSHR };
+enum NeonShiftOp { VSHL, VSHR, VSLI, VSRI };
-static Instr EncodeNeonShiftOp(NeonShiftOp op, NeonDataType dt,
- QwNeonRegister dst, QwNeonRegister src,
+static Instr EncodeNeonShiftOp(NeonShiftOp op, NeonSize size, bool is_unsigned,
+ NeonRegType reg_type, int dst_code, int src_code,
int shift) {
- int vd, d;
- dst.split_code(&vd, &d);
- int vm, m;
- src.split_code(&vm, &m);
- int size_in_bits = kBitsPerByte << NeonSz(dt);
- int op_encoding = 0;
int imm6 = 0;
- if (op == VSHL) {
- DCHECK(shift >= 0 && size_in_bits > shift);
- imm6 = size_in_bits + shift;
- op_encoding = 0x5 * B8;
- } else {
- DCHECK_EQ(VSHR, op);
- DCHECK(shift > 0 && size_in_bits >= shift);
- imm6 = 2 * size_in_bits - shift;
- op_encoding = NeonU(dt) * B24;
+ int size_in_bits = kBitsPerByte << static_cast<int>(size);
+ int op_encoding = 0;
+ switch (op) {
+ case VSHL: {
+ DCHECK(shift >= 0 && size_in_bits > shift);
+ imm6 = size_in_bits + shift;
+ op_encoding = 0x5 * B8;
+ break;
+ }
+ case VSHR: {
+ DCHECK(shift > 0 && size_in_bits >= shift);
+ imm6 = 2 * size_in_bits - shift;
+ if (is_unsigned) op_encoding |= B24;
+ break;
+ }
+ case VSLI: {
+ DCHECK(shift >= 0 && size_in_bits > shift);
+ imm6 = size_in_bits + shift;
+ int L = imm6 >> 6;
+ imm6 &= 0x3F;
+ op_encoding = B24 | 0x5 * B8 | L * B7;
+ break;
+ }
+ case VSRI: {
+ DCHECK(shift > 0 && size_in_bits >= shift);
+ imm6 = 2 * size_in_bits - shift;
+ int L = imm6 >> 6;
+ imm6 &= 0x3F;
+ op_encoding = B24 | 0x4 * B8 | L * B7;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
}
- return 0x1E5U * B23 | d * B22 | imm6 * B16 | vd * B12 | B6 | m * B5 | B4 |
- vm | op_encoding;
+
+ int vd, d;
+ NeonSplitCode(reg_type, dst_code, &vd, &d, &op_encoding);
+ int vm, m;
+ NeonSplitCode(reg_type, src_code, &vm, &m, &op_encoding);
+
+ return 0x1E5U * B23 | d * B22 | imm6 * B16 | vd * B12 | m * B5 | B4 | vm |
+ op_encoding;
}
void Assembler::vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
@@ -4539,7 +4475,8 @@ void Assembler::vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
DCHECK(IsEnabled(NEON));
// Qd = vshl(Qm, bits) SIMD shift left immediate.
// Instruction details available in ARM DDI 0406C.b, A8-1046.
- emit(EncodeNeonShiftOp(VSHL, dt, dst, src, shift));
+ emit(EncodeNeonShiftOp(VSHL, NeonDataTypeToSize(dt), false, NEON_Q,
+ dst.code(), src.code(), shift));
}
void Assembler::vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
@@ -4547,7 +4484,26 @@ void Assembler::vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
DCHECK(IsEnabled(NEON));
// Qd = vshl(Qm, bits) SIMD shift right immediate.
// Instruction details available in ARM DDI 0406C.b, A8-1052.
- emit(EncodeNeonShiftOp(VSHR, dt, dst, src, shift));
+ emit(EncodeNeonShiftOp(VSHR, NeonDataTypeToSize(dt), NeonU(dt), NEON_Q,
+ dst.code(), src.code(), shift));
+}
+
+void Assembler::vsli(NeonSize size, DwVfpRegister dst, DwVfpRegister src,
+ int shift) {
+ DCHECK(IsEnabled(NEON));
+ // Dd = vsli(Dm, bits) SIMD shift left and insert.
+ // Instruction details available in ARM DDI 0406C.b, A8-1056.
+ emit(EncodeNeonShiftOp(VSLI, size, false, NEON_D, dst.code(), src.code(),
+ shift));
+}
+
+void Assembler::vsri(NeonSize size, DwVfpRegister dst, DwVfpRegister src,
+ int shift) {
+ DCHECK(IsEnabled(NEON));
+ // Dd = vsri(Dm, bits) SIMD shift right and insert.
+ // Instruction details available in ARM DDI 0406C.b, A8-1062.
+ emit(EncodeNeonShiftOp(VSRI, size, false, NEON_D, dst.code(), src.code(),
+ shift));
}
static Instr EncodeNeonEstimateOp(bool is_rsqrt, QwNeonRegister dst,
@@ -4591,13 +4547,16 @@ void Assembler::vrsqrts(QwNeonRegister dst, QwNeonRegister src1,
emit(EncodeNeonBinOp(VRSQRTS, dst, src1, src2));
}
-enum NeonPairwiseOp { VPMIN, VPMAX };
+enum NeonPairwiseOp { VPADD, VPMIN, VPMAX };
static Instr EncodeNeonPairwiseOp(NeonPairwiseOp op, NeonDataType dt,
DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2) {
int op_encoding = 0;
switch (op) {
+ case VPADD:
+ op_encoding = 0xB * B8 | B4;
+ break;
case VPMIN:
op_encoding = 0xA * B8 | B4;
break;
@@ -4620,6 +4579,30 @@ static Instr EncodeNeonPairwiseOp(NeonPairwiseOp op, NeonDataType dt,
n * B7 | m * B5 | vm | op_encoding;
}
+void Assembler::vpadd(DwVfpRegister dst, DwVfpRegister src1,
+ DwVfpRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Dd = vpadd(Dn, Dm) SIMD integer pairwise ADD.
+ // Instruction details available in ARM DDI 0406C.b, A8-982.
+ int vd, d;
+ dst.split_code(&vd, &d);
+ int vn, n;
+ src1.split_code(&vn, &n);
+ int vm, m;
+ src2.split_code(&vm, &m);
+
+ emit(0x1E6U * B23 | d * B22 | vn * B16 | vd * B12 | 0xD * B8 | n * B7 |
+ m * B5 | vm);
+}
+
+void Assembler::vpadd(NeonSize size, DwVfpRegister dst, DwVfpRegister src1,
+ DwVfpRegister src2) {
+ DCHECK(IsEnabled(NEON));
+ // Dd = vpadd(Dn, Dm) SIMD integer pairwise ADD.
+ // Instruction details available in ARM DDI 0406C.b, A8-980.
+ emit(EncodeNeonPairwiseOp(VPADD, NeonSizeToDataType(size), dst, src1, src2));
+}
+
void Assembler::vpmin(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2) {
DCHECK(IsEnabled(NEON));
@@ -4747,10 +4730,14 @@ static Instr EncodeNeonSizedOp(NeonSizedOp op, NeonRegType reg_type,
}
void Assembler::vzip(NeonSize size, DwVfpRegister src1, DwVfpRegister src2) {
- DCHECK(IsEnabled(NEON));
- // vzip.<size>(Dn, Dm) SIMD zip (interleave).
- // Instruction details available in ARM DDI 0406C.b, A8-1102.
- emit(EncodeNeonSizedOp(VZIP, NEON_D, size, src1.code(), src2.code()));
+ if (size == Neon32) { // vzip.32 Dd, Dm is a pseudo-op for vtrn.32 Dd, Dm.
+ vtrn(size, src1, src2);
+ } else {
+ DCHECK(IsEnabled(NEON));
+ // vzip.<size>(Dn, Dm) SIMD zip (interleave).
+ // Instruction details available in ARM DDI 0406C.b, A8-1102.
+ emit(EncodeNeonSizedOp(VZIP, NEON_D, size, src1.code(), src2.code()));
+ }
}
void Assembler::vzip(NeonSize size, QwNeonRegister src1, QwNeonRegister src2) {
@@ -4761,10 +4748,14 @@ void Assembler::vzip(NeonSize size, QwNeonRegister src1, QwNeonRegister src2) {
}
void Assembler::vuzp(NeonSize size, DwVfpRegister src1, DwVfpRegister src2) {
- DCHECK(IsEnabled(NEON));
- // vuzp.<size>(Dn, Dm) SIMD un-zip (de-interleave).
- // Instruction details available in ARM DDI 0406C.b, A8-1100.
- emit(EncodeNeonSizedOp(VUZP, NEON_D, size, src1.code(), src2.code()));
+ if (size == Neon32) { // vuzp.32 Dd, Dm is a pseudo-op for vtrn.32 Dd, Dm.
+ vtrn(size, src1, src2);
+ } else {
+ DCHECK(IsEnabled(NEON));
+ // vuzp.<size>(Dn, Dm) SIMD un-zip (de-interleave).
+ // Instruction details available in ARM DDI 0406C.b, A8-1100.
+ emit(EncodeNeonSizedOp(VUZP, NEON_D, size, src1.code(), src2.code()));
+ }
}
void Assembler::vuzp(NeonSize size, QwNeonRegister src1, QwNeonRegister src2) {
@@ -4951,7 +4942,14 @@ void Assembler::GrowBuffer() {
} else {
desc.buffer_size = buffer_size_ + 1*MB;
}
- CHECK_GT(desc.buffer_size, 0); // no overflow
+
+ // Some internal data structures overflow for very large buffers,
+ // they must ensure that kMaximalBufferSize is not too large.
+ if (desc.buffer_size > kMaximalBufferSize ||
+ static_cast<size_t>(desc.buffer_size) >
+ isolate_data().max_old_generation_size_) {
+ V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ }
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
@@ -5046,52 +5044,37 @@ ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
rmode != RelocInfo::NONE64);
bool sharing_ok = RelocInfo::IsNone(rmode) ||
!(serializer_enabled() || rmode < RelocInfo::CELL);
- if (FLAG_enable_embedded_constant_pool) {
- return constant_pool_builder_.AddEntry(position, value, sharing_ok);
- } else {
- DCHECK(pending_32_bit_constants_.size() < kMaxNumPending32Constants);
- if (pending_32_bit_constants_.empty()) {
- first_const_pool_32_use_ = position;
- }
- ConstantPoolEntry entry(position, value, sharing_ok);
- pending_32_bit_constants_.push_back(entry);
-
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info.
- BlockConstPoolFor(1);
- return ConstantPoolEntry::REGULAR;
+ DCHECK(pending_32_bit_constants_.size() < kMaxNumPending32Constants);
+ if (pending_32_bit_constants_.empty()) {
+ first_const_pool_32_use_ = position;
}
+ ConstantPoolEntry entry(position, value, sharing_ok);
+ pending_32_bit_constants_.push_back(entry);
+
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info.
+ BlockConstPoolFor(1);
+ return ConstantPoolEntry::REGULAR;
}
ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
double value) {
- if (FLAG_enable_embedded_constant_pool) {
- return constant_pool_builder_.AddEntry(position, value);
- } else {
- DCHECK(pending_64_bit_constants_.size() < kMaxNumPending64Constants);
- if (pending_64_bit_constants_.empty()) {
- first_const_pool_64_use_ = position;
- }
- ConstantPoolEntry entry(position, value);
- pending_64_bit_constants_.push_back(entry);
-
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info.
- BlockConstPoolFor(1);
- return ConstantPoolEntry::REGULAR;
+ DCHECK(pending_64_bit_constants_.size() < kMaxNumPending64Constants);
+ if (pending_64_bit_constants_.empty()) {
+ first_const_pool_64_use_ = position;
}
+ ConstantPoolEntry entry(position, value);
+ pending_64_bit_constants_.push_back(entry);
+
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info.
+ BlockConstPoolFor(1);
+ return ConstantPoolEntry::REGULAR;
}
void Assembler::BlockConstPoolFor(int instructions) {
- if (FLAG_enable_embedded_constant_pool) {
- // Should be a no-op if using an embedded constant pool.
- DCHECK(pending_32_bit_constants_.empty());
- DCHECK(pending_64_bit_constants_.empty());
- return;
- }
-
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
// Max pool start (if we need a jump and an alignment).
@@ -5114,13 +5097,6 @@ void Assembler::BlockConstPoolFor(int instructions) {
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
- if (FLAG_enable_embedded_constant_pool) {
- // Should be a no-op if using an embedded constant pool.
- DCHECK(pending_32_bit_constants_.empty());
- DCHECK(pending_64_bit_constants_.empty());
- return;
- }
-
// Some short sequence of instruction mustn't be broken up by constant pool
// emission, such sequences are protected by calls to BlockConstPoolFor and
// BlockConstPoolScope.
@@ -5333,61 +5309,6 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
}
-
-void Assembler::PatchConstantPoolAccessInstruction(
- int pc_offset, int offset, ConstantPoolEntry::Access access,
- ConstantPoolEntry::Type type) {
- DCHECK(FLAG_enable_embedded_constant_pool);
- Address pc = buffer_ + pc_offset;
-
- // Patch vldr/ldr instruction with correct offset.
- Instr instr = instr_at(pc);
- if (access == ConstantPoolEntry::OVERFLOWED) {
- if (CpuFeatures::IsSupported(ARMv7)) {
- CpuFeatureScope scope(this, ARMv7);
- // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
- Instr next_instr = instr_at(pc + kInstrSize);
- DCHECK((IsMovW(instr) && Instruction::ImmedMovwMovtValue(instr) == 0));
- DCHECK((IsMovT(next_instr) &&
- Instruction::ImmedMovwMovtValue(next_instr) == 0));
- instr_at_put(pc, PatchMovwImmediate(instr, offset & 0xffff));
- instr_at_put(pc + kInstrSize,
- PatchMovwImmediate(next_instr, offset >> 16));
- } else {
- // Instructions to patch must be 'mov rd, [#0]' and 'orr rd, rd, [#0].
- Instr instr_2 = instr_at(pc + kInstrSize);
- Instr instr_3 = instr_at(pc + 2 * kInstrSize);
- Instr instr_4 = instr_at(pc + 3 * kInstrSize);
- DCHECK((IsMovImmed(instr) && Instruction::Immed8Value(instr) == 0));
- DCHECK((IsOrrImmed(instr_2) && Instruction::Immed8Value(instr_2) == 0) &&
- GetRn(instr_2).is(GetRd(instr_2)));
- DCHECK((IsOrrImmed(instr_3) && Instruction::Immed8Value(instr_3) == 0) &&
- GetRn(instr_3).is(GetRd(instr_3)));
- DCHECK((IsOrrImmed(instr_4) && Instruction::Immed8Value(instr_4) == 0) &&
- GetRn(instr_4).is(GetRd(instr_4)));
- instr_at_put(pc, PatchShiftImm(instr, (offset & kImm8Mask)));
- instr_at_put(pc + kInstrSize,
- PatchShiftImm(instr_2, (offset & (kImm8Mask << 8))));
- instr_at_put(pc + 2 * kInstrSize,
- PatchShiftImm(instr_3, (offset & (kImm8Mask << 16))));
- instr_at_put(pc + 3 * kInstrSize,
- PatchShiftImm(instr_4, (offset & (kImm8Mask << 24))));
- }
- } else if (type == ConstantPoolEntry::DOUBLE) {
- // Instruction to patch must be 'vldr rd, [pp, #0]'.
- DCHECK((IsVldrDPpImmediateOffset(instr) &&
- GetVldrDRegisterImmediateOffset(instr) == 0));
- DCHECK(is_uint10(offset));
- instr_at_put(pc, SetVldrDRegisterImmediateOffset(instr, offset));
- } else {
- // Instruction to patch must be 'ldr rd, [pp, #0]'.
- DCHECK((IsLdrPpImmediateOffset(instr) &&
- GetLdrRegisterImmediateOffset(instr) == 0));
- DCHECK(is_uint12(offset));
- instr_at_put(pc, SetLdrRegisterImmediateOffset(instr, offset));
- }
-}
-
PatchingAssembler::PatchingAssembler(IsolateData isolate_data, byte* address,
int instructions)
: Assembler(isolate_data, address, instructions * kInstrSize + kGap) {
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 076274d8bc..a628493723 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -75,13 +75,13 @@ namespace internal {
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
- V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) \
V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
#define ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(V) \
V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
- V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) \
+ V(d8) V(d9) V(d10) V(d11) V(d12) V(d15) \
// clang-format on
// CPU Registers.
@@ -142,7 +142,6 @@ struct Register {
};
// r7: context register
-// r8: constant pool pointer register if FLAG_enable_embedded_constant_pool.
// r9: lithium scratch
#define DECLARE_REGISTER(R) constexpr Register R = {Register::kCode_##R};
GENERAL_REGISTERS(DECLARE_REGISTER)
@@ -424,12 +423,13 @@ constexpr QwNeonRegister q15 = { 15 };
// Aliases for double registers.
constexpr LowDwVfpRegister kFirstCalleeSavedDoubleReg = d8;
constexpr LowDwVfpRegister kLastCalleeSavedDoubleReg = d15;
-// kDoubleRegZero and kScratchDoubleReg must pair to form kScratchQuadReg. SIMD
-// code depends on kDoubleRegZero before kScratchDoubleReg.
-constexpr LowDwVfpRegister kDoubleRegZero = d14;
-constexpr LowDwVfpRegister kScratchDoubleReg = d15;
-// After using kScratchQuadReg, kDoubleRegZero must be reset to 0.
+constexpr LowDwVfpRegister kDoubleRegZero = d13;
+constexpr LowDwVfpRegister kScratchDoubleReg = d14;
+// This scratch q-register aliases d14 (kScratchDoubleReg) and d15, but is only
+// used if NEON is supported, which implies VFP32DREGS. When there are only 16
+// d-registers, d15 is still allocatable.
constexpr QwNeonRegister kScratchQuadReg = q7;
+constexpr LowDwVfpRegister kScratchDoubleReg2 = d15;
// Coprocessor register
struct CRegister {
@@ -1332,7 +1332,8 @@ class Assembler : public AssemblerBase {
void vmov(QwNeonRegister dst, QwNeonRegister src);
void vdup(NeonSize size, QwNeonRegister dst, Register src);
- void vdup(QwNeonRegister dst, SwVfpRegister src);
+ void vdup(NeonSize size, QwNeonRegister dst, DwVfpRegister src, int index);
+ void vdup(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int index);
void vcvt_f32_s32(QwNeonRegister dst, QwNeonRegister src);
void vcvt_f32_u32(QwNeonRegister dst, QwNeonRegister src);
@@ -1372,12 +1373,17 @@ class Assembler : public AssemblerBase {
void vmax(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vmax(NeonDataType dt, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2);
+ void vpadd(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2);
+ void vpadd(NeonSize size, DwVfpRegister dst, DwVfpRegister src1,
+ DwVfpRegister src2);
void vpmin(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2);
void vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2);
void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
void vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
+ void vsli(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int shift);
+ void vsri(NeonSize size, DwVfpRegister dst, DwVfpRegister src, int shift);
// vrecpe and vrsqrte only support floating point lanes.
void vrecpe(QwNeonRegister dst, QwNeonRegister src);
void vrsqrte(QwNeonRegister dst, QwNeonRegister src);
@@ -1558,12 +1564,6 @@ class Assembler : public AssemblerBase {
static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
static bool IsVldrDRegisterImmediate(Instr instr);
- static Instr GetConsantPoolLoadPattern();
- static Instr GetConsantPoolLoadMask();
- static bool IsLdrPpRegOffset(Instr instr);
- static Instr GetLdrPpRegOffsetPattern();
- static bool IsLdrPpImmediateOffset(Instr instr);
- static bool IsVldrDPpImmediateOffset(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
static int GetVldrDRegisterImmediateOffset(Instr instr);
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
@@ -1628,19 +1628,12 @@ class Assembler : public AssemblerBase {
}
}
- int EmitEmbeddedConstantPool() {
- DCHECK(FLAG_enable_embedded_constant_pool);
- return constant_pool_builder_.Emit(this);
- }
-
- bool ConstantPoolAccessIsInOverflow() const {
- return constant_pool_builder_.NextAccess(ConstantPoolEntry::INTPTR) ==
- ConstantPoolEntry::OVERFLOWED;
- }
-
void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
ConstantPoolEntry::Access access,
- ConstantPoolEntry::Type type);
+ ConstantPoolEntry::Type type) {
+ // No embedded constant pool support.
+ UNREACHABLE();
+ }
protected:
// Relocation for a type-recording IC has the AST id added to it. This
@@ -1734,6 +1727,9 @@ class Assembler : public AssemblerBase {
std::vector<ConstantPoolEntry> pending_64_bit_constants_;
private:
+ // Avoid overflows for displacements etc.
+ static const int kMaximalBufferSize = 512 * MB;
+
int next_buffer_check_; // pc offset of next buffer check
// Constant pool generation
@@ -1763,8 +1759,6 @@ class Assembler : public AssemblerBase {
int first_const_pool_32_use_;
int first_const_pool_64_use_;
- ConstantPoolBuilder constant_pool_builder_;
-
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 12b35ba5e7..fc59f4007e 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -983,9 +983,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ mov(r2, Operand(pending_handler_offset_address));
__ ldr(r2, MemOperand(r2));
__ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
- if (FLAG_enable_embedded_constant_pool) {
- __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r1);
- }
__ add(pc, r1, r2);
}
@@ -1029,9 +1026,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// r3: argc
// r4: argv
StackFrame::Type marker = type();
- if (FLAG_enable_embedded_constant_pool) {
- __ mov(r8, Operand::Zero());
- }
__ mov(r7, Operand(StackFrame::TypeToMarker(marker)));
__ mov(r6, Operand(StackFrame::TypeToMarker(marker)));
__ mov(r5,
@@ -1039,7 +1033,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ ldr(r5, MemOperand(r5));
__ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
__ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
- (FLAG_enable_embedded_constant_pool ? r8.bit() : 0) |
ip.bit());
// Set up frame pointer for the frame to be pushed.
@@ -1152,75 +1145,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ ldm(ia_w, sp, kCalleeSaved | pc.bit());
}
-void RegExpExecStub::Generate(MacroAssembler* masm) {
-#ifdef V8_INTERPRETED_REGEXP
- // This case is handled prior to the RegExpExecStub call.
- __ Abort(kUnexpectedRegExpExecCall);
-#else // V8_INTERPRETED_REGEXP
- // Isolates: note we add an additional parameter here (isolate pointer).
- const int kRegExpExecuteArguments = 9;
- const int kParameterRegisters = 4;
- __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
-
- // Stack pointer now points to cell where return address is to be written.
- // Arguments are before that on the stack or in registers.
-
- // Argument 9 (sp[20]): Pass current isolate address.
- __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
- __ str(r5, MemOperand(sp, 5 * kPointerSize));
-
- // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript.
- __ mov(r5, Operand(1));
- __ str(r5, MemOperand(sp, 4 * kPointerSize));
-
- // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate());
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate());
- __ mov(r5, Operand(address_of_regexp_stack_memory_address));
- __ ldr(r5, MemOperand(r5, 0));
- __ mov(r6, Operand(address_of_regexp_stack_memory_size));
- __ ldr(r6, MemOperand(r6, 0));
- __ add(r5, r5, Operand(r6));
- __ str(r5, MemOperand(sp, 3 * kPointerSize));
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(r5, Operand::Zero());
- __ str(r5, MemOperand(sp, 2 * kPointerSize));
-
- // Argument 5 (sp[4]): static offsets vector buffer.
- __ mov(
- r5,
- Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
- __ str(r5, MemOperand(sp, 1 * kPointerSize));
-
- // Argument 4: End of string data
- // Argument 3: Start of string data
- CHECK(r3.is(RegExpExecDescriptor::StringEndRegister()));
- CHECK(r2.is(RegExpExecDescriptor::StringStartRegister()));
-
- // Argument 2 (r1): Previous index.
- CHECK(r1.is(RegExpExecDescriptor::LastIndexRegister()));
-
- // Argument 1 (r0): Subject string.
- CHECK(r0.is(RegExpExecDescriptor::StringRegister()));
-
- // Locate the code entry and call it.
- Register code_reg = RegExpExecDescriptor::CodeRegister();
- __ add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm, code_reg);
-
- __ LeaveExitFrame(false, no_reg, true);
-
- __ SmiTag(r0);
- __ Ret();
-#endif // V8_INTERPRETED_REGEXP
-}
-
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// r0 : number of arguments to the construct function
// r1 : the function to call
@@ -2875,9 +2799,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
__ push(call_data);
Register scratch = call_data;
- if (!call_data_undefined()) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- }
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
// return value
__ push(scratch);
// return value default
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 0b86f3e149..21794a5a5c 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -324,6 +324,8 @@ enum LFlag {
Short = 0 << 22 // Short load/store coprocessor.
};
+// Neon sizes.
+enum NeonSize { Neon8 = 0x0, Neon16 = 0x1, Neon32 = 0x2, Neon64 = 0x3 };
// NEON data type
enum NeonDataType {
@@ -339,6 +341,16 @@ enum NeonDataType {
inline int NeonU(NeonDataType dt) { return static_cast<int>(dt) >> 2; }
inline int NeonSz(NeonDataType dt) { return static_cast<int>(dt) & 0x3; }
+// Convert sizes to data types (U bit is clear).
+inline NeonDataType NeonSizeToDataType(NeonSize size) {
+ DCHECK_NE(Neon64, size);
+ return static_cast<NeonDataType>(size);
+}
+
+inline NeonSize NeonDataTypeToSize(NeonDataType dt) {
+ return static_cast<NeonSize>(NeonSz(dt));
+}
+
enum NeonListType {
nlt_1 = 0x7,
nlt_2 = 0xA,
@@ -346,13 +358,6 @@ enum NeonListType {
nlt_4 = 0x2
};
-enum NeonSize {
- Neon8 = 0x0,
- Neon16 = 0x1,
- Neon32 = 0x2,
- Neon64 = 0x3
-};
-
// -----------------------------------------------------------------------------
// Supervisor Call (svc) specific support.
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 37687f0d55..b33b977879 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -34,30 +34,28 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- if (FLAG_zap_code_space) {
- // Fail hard and early if we enter this code object again.
- byte* pointer = code->FindCodeAgeSequence();
- if (pointer != NULL) {
- pointer += kNoCodeAgeSequenceLength;
- } else {
- pointer = code->instruction_start();
- }
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
- {
- PatchingAssembler patcher(Assembler::IsolateData(isolate), pointer, 1);
- patcher.bkpt(0);
- patcher.FlushICache(isolate);
- }
+ {
+ PatchingAssembler patcher(Assembler::IsolateData(isolate), pointer, 1);
+ patcher.bkpt(0);
+ patcher.FlushICache(isolate);
+ }
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- int osr_offset = data->OsrPcOffset()->value();
- if (osr_offset > 0) {
- PatchingAssembler patcher(Assembler::IsolateData(isolate),
- code->instruction_start() + osr_offset, 1);
- patcher.bkpt(0);
- patcher.FlushICache(isolate);
- }
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ PatchingAssembler patcher(Assembler::IsolateData(isolate),
+ code_start_address + osr_offset, 1);
+ patcher.bkpt(0);
+ patcher.FlushICache(isolate);
}
DeoptimizationInputData* deopt_data =
@@ -124,8 +122,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
const int kFloatRegsSize = kFloatSize * SwVfpRegister::kMaxNumRegisters;
// Save all allocatable VFP registers before messing with them.
- DCHECK(kDoubleRegZero.code() == 14);
- DCHECK(kScratchDoubleReg.code() == 15);
+ DCHECK(kDoubleRegZero.code() == 13);
+ DCHECK(kScratchDoubleReg.code() == 14);
{
// We use a run-time check for VFP32DREGS.
@@ -141,11 +139,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
__ vstm(db_w, sp, d0, d15);
- // Push registers s0-s15, and possibly s16-s31, on the stack.
- // If s16-s31 are not pushed, decrease the stack pointer instead.
- __ vstm(db_w, sp, s16, s31, ne);
- __ sub(sp, sp, Operand(16 * kFloatSize), LeaveCC, eq);
- __ vstm(db_w, sp, s0, s15);
+ // Push registers s0-s31 on the stack.
+ __ vstm(db_w, sp, s0, s31);
}
// Push all 16 registers (needed to populate FrameDescription::registers_).
@@ -391,8 +386,8 @@ void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
- DCHECK(FLAG_enable_embedded_constant_pool);
- SetFrameSlot(offset, value);
+ // No embedded constant pool support.
+ UNREACHABLE();
}
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index c9e7b1844b..0b8fee10f4 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -667,6 +667,28 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
case 'v': {
return FormatVFPinstruction(instr, format);
}
+ case 'A': {
+ // Print pc-relative address.
+ int offset = instr->Offset12Value();
+ byte* pc = reinterpret_cast<byte*>(instr) + Instruction::kPCReadOffset;
+ byte* addr;
+ switch (instr->PUField()) {
+ case db_x: {
+ addr = pc - offset;
+ break;
+ }
+ case ib_x: {
+ addr = pc + offset;
+ break;
+ }
+ default: {
+ UNREACHABLE();
+ return -1;
+ }
+ }
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%p", addr);
+ return 1;
+ }
case 'S':
case 'D': {
return FormatVFPRegister(instr, format);
@@ -1033,11 +1055,19 @@ void Decoder::DecodeType2(Instruction* instr) {
break;
}
case db_x: {
- Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
+ if (instr->HasL() && (instr->RnValue() == kPCRegister)) {
+ Format(instr, "'memop'cond'b 'rd, [pc, #-'off12]'w (addr 'A)");
+ } else {
+ Format(instr, "'memop'cond'b 'rd, ['rn, #-'off12]'w");
+ }
break;
}
case ib_x: {
- Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
+ if (instr->HasL() && (instr->RnValue() == kPCRegister)) {
+ Format(instr, "'memop'cond'b 'rd, [pc, #+'off12]'w (addr 'A)");
+ } else {
+ Format(instr, "'memop'cond'b 'rd, ['rn, #+'off12]'w");
+ }
break;
}
default: {
@@ -1950,6 +1980,13 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
op, size, Vd, Vn, Vm);
break;
}
+ case 0xb: {
+ // vpadd.i<size> Dd, Dm, Dn.
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vpadd.i%d d%d, d%d, d%d",
+ size, Vd, Vn, Vm);
+ break;
+ }
case 0xd: {
if (instr->Bit(4) == 0) {
const char* op = (instr->Bits(21, 20) == 0) ? "vadd" : "vsub";
@@ -2130,10 +2167,16 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
break;
}
case 0xd: {
- if (instr->Bit(21) == 0 && instr->Bit(6) == 1 && instr->Bit(4) == 1) {
- // vmul.f32 Qd, Qn, Qm
+ if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
+ instr->Bit(4) == 1) {
+ // vmul.f32 Qd, Qm, Qn
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmul.f32 q%d, q%d, q%d", Vd, Vn, Vm);
+ } else if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 0 &&
+ instr->Bit(4) == 0) {
+ // vpadd.f32 Dd, Dm, Dn.
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+ "vpadd.f32 d%d, d%d, d%d", Vd, Vn, Vm);
} else {
Unknown(instr);
}
@@ -2168,11 +2211,30 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
"vmovl.u%d q%d, d%d", imm3 * 8, Vd, Vm);
} else if (instr->Opc1Value() == 7 && instr->Bit(4) == 0) {
if (instr->Bits(11, 7) == 0x18) {
- int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
- int index = instr->Bit(19);
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
- "vdup q%d, d%d[%d]", Vd, Vm, index);
+ int imm4 = instr->Bits(19, 16);
+ int size = 0, index = 0;
+ if ((imm4 & 0x1) != 0) {
+ size = 8;
+ index = imm4 >> 1;
+ } else if ((imm4 & 0x2) != 0) {
+ size = 16;
+ index = imm4 >> 2;
+ } else {
+ size = 32;
+ index = imm4 >> 3;
+ }
+ if (instr->Bit(6) == 0) {
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vdup.%i d%d, d%d[%d]",
+ size, Vd, Vm, index);
+ } else {
+ int Vd = instr->VFPDRegValue(kSimd128Precision);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vdup.%i q%d, d%d[%d]",
+ size, Vd, Vm, index);
+ }
} else if (instr->Bits(11, 10) == 0x2) {
int Vd = instr->VFPDRegValue(kDoublePrecision);
int Vn = instr->VFPNRegValue(kDoublePrecision);
@@ -2303,6 +2365,27 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vshr.u%d q%d, q%d, #%d",
size, Vd, Vm, shift);
+ } else if (instr->Bit(10) == 1 && instr->Bit(6) == 0 &&
+ instr->Bit(4) == 1) {
+ // vsli.<size> Dd, Dm, shift
+ // vsri.<size> Dd, Dm, shift
+ int imm7 = instr->Bits(21, 16);
+ if (instr->Bit(7) != 0) imm7 += 64;
+ int size = base::bits::RoundDownToPowerOfTwo32(imm7);
+ int shift;
+ char direction;
+ if (instr->Bit(8) == 1) {
+ shift = imm7 - size;
+ direction = 'l'; // vsli
+ } else {
+ shift = 2 * size - imm7;
+ direction = 'r'; // vsri
+ }
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "vs%ci.%d d%d, d%d, #%d",
+ direction, size, Vd, Vm, shift);
} else {
Unknown(instr);
}
diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc
index b1139bacc4..8529bb541c 100644
--- a/deps/v8/src/arm/frames-arm.cc
+++ b/deps/v8/src/arm/frames-arm.cc
@@ -20,16 +20,16 @@ namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
- DCHECK(FLAG_enable_embedded_constant_pool);
- return pp;
+ UNREACHABLE();
+ return no_reg;
}
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
- DCHECK(FLAG_enable_embedded_constant_pool);
- return pp;
+ UNREACHABLE();
+ return no_reg;
}
diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h
index 37927758c3..170c0b1825 100644
--- a/deps/v8/src/arm/frames-arm.h
+++ b/deps/v8/src/arm/frames-arm.h
@@ -66,23 +66,11 @@ const int kNumDoubleCalleeSaved = 8;
// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
const int kNumSafepointRegisters = 16;
-// The embedded constant pool pointer (r8/pp) is not included in the safepoint
-// since it is not tagged. This register is preserved in the stack frame where
-// its value will be updated if GC code movement occurs. Including it in the
-// safepoint (where it will not be relocated) would cause a stale value to be
-// restored.
-const RegList kConstantPointerRegMask =
- FLAG_enable_embedded_constant_pool ? (1 << 8) : 0;
-const int kNumConstantPoolPointerReg =
- FLAG_enable_embedded_constant_pool ? 1 : 0;
-
// Define the list of registers actually saved at safepoints.
// Note that the number of saved registers may be smaller than the reserved
// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
-const RegList kSafepointSavedRegisters =
- kJSCallerSaved | (kCalleeSaved & ~kConstantPointerRegMask);
-const int kNumSafepointSavedRegisters =
- kNumJSCallerSaved + kNumCalleeSaved - kNumConstantPoolPointerReg;
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
// ----------------------------------------------------
diff --git a/deps/v8/src/arm/interface-descriptors-arm.cc b/deps/v8/src/arm/interface-descriptors-arm.cc
index 7e56698f00..f2fb703b9f 100644
--- a/deps/v8/src/arm/interface-descriptors-arm.cc
+++ b/deps/v8/src/arm/interface-descriptors-arm.cc
@@ -58,11 +58,6 @@ const Register MathPowIntegerDescriptor::exponent() {
return MathPowTaggedDescriptor::exponent();
}
-const Register RegExpExecDescriptor::StringRegister() { return r0; }
-const Register RegExpExecDescriptor::LastIndexRegister() { return r1; }
-const Register RegExpExecDescriptor::StringStartRegister() { return r2; }
-const Register RegExpExecDescriptor::StringEndRegister() { return r3; }
-const Register RegExpExecDescriptor::CodeRegister() { return r4; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
@@ -162,9 +157,20 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
+ // r0 : number of arguments
+ // r2 : start index (to support rest parameters)
+ // r1 : the target to call
+ Register registers[] = {r1, r0, r2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r0 : number of arguments
+ // r3 : the new target
// r2 : start index (to support rest parameters)
// r1 : the target to call
- Register registers[] = {r1, r2};
+ Register registers[] = {r1, r3, r0, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index f451ba5d3f..7256086b1d 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -497,7 +497,7 @@ void MacroAssembler::RecordWriteField(
add(dst, object, Operand(offset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
- tst(dst, Operand((1 << kPointerSizeLog2) - 1));
+ tst(dst, Operand(kPointerSize - 1));
b(eq, &ok);
stop("Unaligned cell in write barrier");
bind(&ok);
@@ -561,7 +561,7 @@ void MacroAssembler::RecordWriteForMap(Register object,
add(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
- tst(dst, Operand((1 << kPointerSizeLog2) - 1));
+ tst(dst, Operand(kPointerSize - 1));
b(eq, &ok);
stop("Unaligned cell in write barrier");
bind(&ok);
@@ -770,59 +770,36 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
void MacroAssembler::PushCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
- if (FLAG_enable_embedded_constant_pool) {
- if (marker_reg.code() > pp.code()) {
- stm(db_w, sp, pp.bit() | fp.bit() | lr.bit());
- add(fp, sp, Operand(kPointerSize));
- Push(marker_reg);
- } else {
- stm(db_w, sp, marker_reg.bit() | pp.bit() | fp.bit() | lr.bit());
- add(fp, sp, Operand(2 * kPointerSize));
- }
+ if (marker_reg.code() > fp.code()) {
+ stm(db_w, sp, fp.bit() | lr.bit());
+ mov(fp, Operand(sp));
+ Push(marker_reg);
} else {
- if (marker_reg.code() > fp.code()) {
- stm(db_w, sp, fp.bit() | lr.bit());
- mov(fp, Operand(sp));
- Push(marker_reg);
- } else {
- stm(db_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
- add(fp, sp, Operand(kPointerSize));
- }
+ stm(db_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
+ add(fp, sp, Operand(kPointerSize));
}
} else {
- stm(db_w, sp, (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
- fp.bit() | lr.bit());
- add(fp, sp, Operand(FLAG_enable_embedded_constant_pool ? kPointerSize : 0));
+ stm(db_w, sp, fp.bit() | lr.bit());
+ mov(fp, sp);
}
}
void MacroAssembler::PopCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
- if (FLAG_enable_embedded_constant_pool) {
- if (marker_reg.code() > pp.code()) {
- pop(marker_reg);
- ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
- } else {
- ldm(ia_w, sp, marker_reg.bit() | pp.bit() | fp.bit() | lr.bit());
- }
+ if (marker_reg.code() > fp.code()) {
+ pop(marker_reg);
+ ldm(ia_w, sp, fp.bit() | lr.bit());
} else {
- if (marker_reg.code() > fp.code()) {
- pop(marker_reg);
- ldm(ia_w, sp, fp.bit() | lr.bit());
- } else {
- ldm(ia_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
- }
+ ldm(ia_w, sp, marker_reg.bit() | fp.bit() | lr.bit());
}
} else {
- ldm(ia_w, sp, (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
- fp.bit() | lr.bit());
+ ldm(ia_w, sp, fp.bit() | lr.bit());
}
}
void MacroAssembler::PushStandardFrame(Register function_reg) {
DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code());
stm(db_w, sp, (function_reg.is_valid() ? function_reg.bit() : 0) | cp.bit() |
- (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
fp.bit() | lr.bit());
int offset = -StandardFrameConstants::kContextOffset;
offset += function_reg.is_valid() ? kPointerSize : 0;
@@ -833,11 +810,7 @@ void MacroAssembler::PushStandardFrame(Register function_reg) {
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of contiguous register values starting with r0.
- // except when FLAG_enable_embedded_constant_pool, which omits pp.
- DCHECK(kSafepointSavedRegisters ==
- (FLAG_enable_embedded_constant_pool
- ? ((1 << (kNumSafepointSavedRegisters + 1)) - 1) & ~pp.bit()
- : (1 << kNumSafepointSavedRegisters) - 1));
+ DCHECK(kSafepointSavedRegisters == (1 << kNumSafepointSavedRegisters) - 1);
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
@@ -867,10 +840,6 @@ void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the highest encoding,
// which means that lowest encodings are closest to the stack pointer.
- if (FLAG_enable_embedded_constant_pool && reg_code > pp.code()) {
- // RegList omits pp.
- reg_code -= 1;
- }
DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
return reg_code;
}
@@ -1110,47 +1079,90 @@ void MacroAssembler::VmovExtended(int dst_code, Register src) {
}
}
-void MacroAssembler::VmovExtended(int dst_code, int src_code,
- Register scratch) {
+void MacroAssembler::VmovExtended(int dst_code, int src_code) {
+ if (src_code == dst_code) return;
+
if (src_code < SwVfpRegister::kMaxNumRegisters &&
dst_code < SwVfpRegister::kMaxNumRegisters) {
// src and dst are both s-registers.
vmov(SwVfpRegister::from_code(dst_code),
SwVfpRegister::from_code(src_code));
- } else if (src_code < SwVfpRegister::kMaxNumRegisters) {
- // src is an s-register.
- vmov(scratch, SwVfpRegister::from_code(src_code));
- VmovExtended(dst_code, scratch);
+ return;
+ }
+ DwVfpRegister dst_d_reg = DwVfpRegister::from_code(dst_code / 2);
+ DwVfpRegister src_d_reg = DwVfpRegister::from_code(src_code / 2);
+ int dst_offset = dst_code & 1;
+ int src_offset = src_code & 1;
+ if (CpuFeatures::IsSupported(NEON)) {
+ // On Neon we can shift and insert from d-registers.
+ if (src_offset == dst_offset) {
+ // Offsets are the same, use vdup to copy the source to the opposite lane.
+ vdup(Neon32, kScratchDoubleReg, src_d_reg, src_offset);
+ src_d_reg = kScratchDoubleReg;
+ src_offset = dst_offset ^ 1;
+ }
+ if (dst_offset) {
+ if (dst_d_reg.is(src_d_reg)) {
+ vdup(Neon32, dst_d_reg, src_d_reg, 0);
+ } else {
+ vsli(Neon64, dst_d_reg, src_d_reg, 32);
+ }
+ } else {
+ if (dst_d_reg.is(src_d_reg)) {
+ vdup(Neon32, dst_d_reg, src_d_reg, 1);
+ } else {
+ vsri(Neon64, dst_d_reg, src_d_reg, 32);
+ }
+ }
+ return;
+ }
+
+ // Without Neon, use the scratch registers to move src and/or dst into
+ // s-registers.
+ int scratchSCode = kScratchDoubleReg.low().code();
+ int scratchSCode2 = kScratchDoubleReg2.low().code();
+ if (src_code < SwVfpRegister::kMaxNumRegisters) {
+ // src is an s-register, dst is not.
+ vmov(kScratchDoubleReg, dst_d_reg);
+ vmov(SwVfpRegister::from_code(scratchSCode + dst_offset),
+ SwVfpRegister::from_code(src_code));
+ vmov(dst_d_reg, kScratchDoubleReg);
} else if (dst_code < SwVfpRegister::kMaxNumRegisters) {
- // dst is an s-register.
- VmovExtended(scratch, src_code);
- vmov(SwVfpRegister::from_code(dst_code), scratch);
+ // dst is an s-register, src is not.
+ vmov(kScratchDoubleReg, src_d_reg);
+ vmov(SwVfpRegister::from_code(dst_code),
+ SwVfpRegister::from_code(scratchSCode + src_offset));
} else {
- // Neither src or dst are s-registers.
- DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, src_code);
- DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, dst_code);
- VmovExtended(scratch, src_code);
- VmovExtended(dst_code, scratch);
+ // Neither src or dst are s-registers. Both scratch double registers are
+ // available when there are 32 VFP registers.
+ vmov(kScratchDoubleReg, src_d_reg);
+ vmov(kScratchDoubleReg2, dst_d_reg);
+ vmov(SwVfpRegister::from_code(scratchSCode + dst_offset),
+ SwVfpRegister::from_code(scratchSCode2 + src_offset));
+ vmov(dst_d_reg, kScratchQuadReg.high());
}
}
-void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src,
- Register scratch) {
- if (dst_code >= SwVfpRegister::kMaxNumRegisters) {
- ldr(scratch, src);
- VmovExtended(dst_code, scratch);
- } else {
+void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src) {
+ if (dst_code < SwVfpRegister::kMaxNumRegisters) {
vldr(SwVfpRegister::from_code(dst_code), src);
+ } else {
+ // TODO(bbudge) If Neon supported, use load single lane form of vld1.
+ int dst_s_code = kScratchDoubleReg.low().code() + (dst_code & 1);
+ vmov(kScratchDoubleReg, DwVfpRegister::from_code(dst_code / 2));
+ vldr(SwVfpRegister::from_code(dst_s_code), src);
+ vmov(DwVfpRegister::from_code(dst_code / 2), kScratchDoubleReg);
}
}
-void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code,
- Register scratch) {
- if (src_code >= SwVfpRegister::kMaxNumRegisters) {
- VmovExtended(scratch, src_code);
- str(scratch, dst);
- } else {
+void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code) {
+ if (src_code < SwVfpRegister::kMaxNumRegisters) {
vstr(SwVfpRegister::from_code(src_code), dst);
+ } else {
+ // TODO(bbudge) If Neon supported, use store single lane form of vst1.
+ int src_s_code = kScratchDoubleReg.low().code() + (src_code & 1);
+ vmov(kScratchDoubleReg, DwVfpRegister::from_code(src_code / 2));
+ vstr(SwVfpRegister::from_code(src_s_code), dst);
}
}
@@ -1176,9 +1188,9 @@ void MacroAssembler::ExtractLane(Register dst, DwVfpRegister src,
}
void MacroAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
- Register scratch, int lane) {
+ int lane) {
int s_code = src.code() * 4 + lane;
- VmovExtended(dst.code(), s_code, scratch);
+ VmovExtended(dst.code(), s_code);
}
void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
@@ -1195,69 +1207,10 @@ void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
}
void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
- SwVfpRegister src_lane, Register scratch,
- int lane) {
+ SwVfpRegister src_lane, int lane) {
Move(dst, src);
int s_code = dst.code() * 4 + lane;
- VmovExtended(s_code, src_lane.code(), scratch);
-}
-
-void MacroAssembler::Swizzle(QwNeonRegister dst, QwNeonRegister src,
- Register scratch, NeonSize size, uint32_t lanes) {
- // TODO(bbudge) Handle Int16x8, Int8x16 vectors.
- DCHECK_EQ(Neon32, size);
- DCHECK_IMPLIES(size == Neon32, lanes < 0xFFFFu);
- if (size == Neon32) {
- switch (lanes) {
- // TODO(bbudge) Handle more special cases.
- case 0x3210: // Identity.
- Move(dst, src);
- return;
- case 0x1032: // Swap top and bottom.
- vext(dst, src, src, 8);
- return;
- case 0x2103: // Rotation.
- vext(dst, src, src, 12);
- return;
- case 0x0321: // Rotation.
- vext(dst, src, src, 4);
- return;
- case 0x0000: // Equivalent to vdup.
- case 0x1111:
- case 0x2222:
- case 0x3333: {
- int lane_code = src.code() * 4 + (lanes & 0xF);
- if (lane_code >= SwVfpRegister::kMaxNumRegisters) {
- // TODO(bbudge) use vdup (vdup.32 dst, D<src>[lane]) once implemented.
- int temp_code = kScratchDoubleReg.code() * 2;
- VmovExtended(temp_code, lane_code, scratch);
- lane_code = temp_code;
- }
- vdup(dst, SwVfpRegister::from_code(lane_code));
- return;
- }
- case 0x2301: // Swap lanes 0, 1 and lanes 2, 3.
- vrev64(Neon32, dst, src);
- return;
- default: // Handle all other cases with vmovs.
- int src_code = src.code() * 4;
- int dst_code = dst.code() * 4;
- bool in_place = src.is(dst);
- if (in_place) {
- vmov(kScratchQuadReg, src);
- src_code = kScratchQuadReg.code() * 4;
- }
- for (int i = 0; i < 4; i++) {
- int lane = (lanes >> (i * 4) & 0xF);
- VmovExtended(dst_code + i, src_code + lane, scratch);
- }
- if (in_place) {
- // Restore zero reg.
- veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
- }
- return;
- }
- }
+ VmovExtended(s_code, src_lane.code());
}
void MacroAssembler::LslPair(Register dst_low, Register dst_high,
@@ -1399,29 +1352,9 @@ void MacroAssembler::AsrPair(Register dst_low, Register dst_high,
}
}
-void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
- Register code_target_address) {
- DCHECK(FLAG_enable_embedded_constant_pool);
- ldr(pp, MemOperand(code_target_address,
- Code::kConstantPoolOffset - Code::kHeaderSize));
- add(pp, pp, code_target_address);
-}
-
-
-void MacroAssembler::LoadConstantPoolPointerRegister() {
- DCHECK(FLAG_enable_embedded_constant_pool);
- int entry_offset = pc_offset() + Instruction::kPCReadOffset;
- sub(ip, pc, Operand(entry_offset));
- LoadConstantPoolPointerRegisterFromCodeTargetAddress(ip);
-}
-
void MacroAssembler::StubPrologue(StackFrame::Type type) {
mov(ip, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(ip);
- if (FLAG_enable_embedded_constant_pool) {
- LoadConstantPoolPointerRegister();
- set_constant_pool_available(true);
- }
}
void MacroAssembler::Prologue(bool code_pre_aging) {
@@ -1440,10 +1373,6 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
nop(ip.code());
}
}
- if (FLAG_enable_embedded_constant_pool) {
- LoadConstantPoolPointerRegister();
- set_constant_pool_available(true);
- }
}
void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
@@ -1458,9 +1387,6 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
// r0-r3: preserved
mov(ip, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(ip);
- if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
- LoadConstantPoolPointerRegister();
- }
if (type == StackFrame::INTERNAL) {
mov(ip, Operand(CodeObject()));
push(ip);
@@ -1474,18 +1400,10 @@ int MacroAssembler::LeaveFrame(StackFrame::Type type) {
// r2: preserved
// Drop the execution stack down to the frame pointer and restore
- // the caller frame pointer, return address and constant pool pointer
- // (if FLAG_enable_embedded_constant_pool).
- int frame_ends;
- if (FLAG_enable_embedded_constant_pool) {
- add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
- frame_ends = pc_offset();
- ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
- } else {
- mov(sp, fp);
- frame_ends = pc_offset();
- ldm(ia_w, sp, fp.bit() | lr.bit());
- }
+ // the caller frame pointer and return address.
+ mov(sp, fp);
+ int frame_ends = pc_offset();
+ ldm(ia_w, sp, fp.bit() | lr.bit());
return frame_ends;
}
@@ -1519,9 +1437,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
mov(ip, Operand::Zero());
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
- if (FLAG_enable_embedded_constant_pool) {
- str(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
- }
mov(ip, Operand(CodeObject()));
str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
@@ -1537,8 +1452,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Note that d0 will be accessible at
// fp - ExitFrameConstants::kFrameSize -
// DwVfpRegister::kMaxNumRegisters * kDoubleSize,
- // since the sp slot, code slot and constant pool slot (if
- // FLAG_enable_embedded_constant_pool) were pushed after the fp.
+ // since the sp slot and code slot were pushed after the fp.
}
// Reserve place for the return address and stack space and align the frame
@@ -1603,9 +1517,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
#endif
// Tear down the exit frame, pop the arguments, and return.
- if (FLAG_enable_embedded_constant_pool) {
- ldr(pp, MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
- }
mov(sp, Operand(fp));
ldm(ia_w, sp, fp.bit() | lr.bit());
if (argument_count.is_valid()) {
@@ -3404,6 +3315,7 @@ void MacroAssembler::CallCFunction(Register function,
void MacroAssembler::CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments) {
+ DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index a69b918ed8..506364686f 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -41,7 +41,6 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
// Give alias names to registers
const Register cp = {Register::kCode_r7}; // JavaScript context pointer.
-const Register pp = {Register::kCode_r8}; // Constant pool pointer.
const Register kRootRegister = {Register::kCode_r10}; // Roots array pointer.
// Flags used for AllocateHeapNumber
@@ -474,12 +473,10 @@ class MacroAssembler: public Assembler {
}
}
- // Push a fixed frame, consisting of lr, fp, constant pool (if
- // FLAG_enable_embedded_constant_pool)
+ // Push a fixed frame, consisting of lr, fp
void PushCommonFrame(Register marker_reg = no_reg);
- // Push a standard frame, consisting of lr, fp, constant pool (if
- // FLAG_enable_embedded_constant_pool), context and JS function
+ // Push a standard frame, consisting of lr, fp, context and JS function
void PushStandardFrame(Register function_reg);
void PopCommonFrame(Register marker_reg = no_reg);
@@ -562,20 +559,17 @@ class MacroAssembler: public Assembler {
void VmovExtended(Register dst, int src_code);
void VmovExtended(int dst_code, Register src);
// Move between s-registers and imaginary s-registers.
- void VmovExtended(int dst_code, int src_code, Register scratch);
- void VmovExtended(int dst_code, const MemOperand& src, Register scratch);
- void VmovExtended(const MemOperand& dst, int src_code, Register scratch);
+ void VmovExtended(int dst_code, int src_code);
+ void VmovExtended(int dst_code, const MemOperand& src);
+ void VmovExtended(const MemOperand& dst, int src_code);
void ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane);
void ExtractLane(Register dst, DwVfpRegister src, NeonDataType dt, int lane);
- void ExtractLane(SwVfpRegister dst, QwNeonRegister src, Register scratch,
- int lane);
+ void ExtractLane(SwVfpRegister dst, QwNeonRegister src, int lane);
void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane,
NeonDataType dt, int lane);
void ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
- SwVfpRegister src_lane, Register scratch, int lane);
- void Swizzle(QwNeonRegister dst, QwNeonRegister src, Register scratch,
- NeonSize size, uint32_t lanes);
+ SwVfpRegister src_lane, int lane);
void LslPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
@@ -1337,11 +1331,6 @@ class MacroAssembler: public Assembler {
Register scratch_reg,
Label* no_memento_found);
- // Loads the constant pool pointer (pp) register.
- void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
- Register code_target_address);
- void LoadConstantPoolPointerRegister();
-
private:
void CallCFunctionHelper(Register function,
int num_reg_arguments,
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index c1e6fc1f4d..1f7e146692 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -803,6 +803,8 @@ void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
void* Simulator::RedirectExternalReference(Isolate* isolate,
void* external_function,
ExternalReference::Type type) {
+ base::LockGuard<base::Mutex> lock_guard(
+ isolate->simulator_redirection_mutex());
Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address_of_swi_instruction();
}
@@ -1706,12 +1708,11 @@ void Simulator::HandleVList(Instruction* instr) {
// 64-bit value. With the code below we assume that all runtime calls return
// 64 bits of result. If they don't, the r1 result register contains a bogus
// value, which is fine because it is caller-saved.
-typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
- int32_t arg1,
- int32_t arg2,
- int32_t arg3,
- int32_t arg4,
- int32_t arg5);
+typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0, int32_t arg1,
+ int32_t arg2, int32_t arg3,
+ int32_t arg4, int32_t arg5,
+ int32_t arg6, int32_t arg7,
+ int32_t arg8);
typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int32_t arg0, int32_t arg1,
int32_t arg2, int32_t arg3,
@@ -1752,6 +1753,11 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
int32_t arg4 = stack_pointer[0];
int32_t arg5 = stack_pointer[1];
+ int32_t arg6 = stack_pointer[2];
+ int32_t arg7 = stack_pointer[3];
+ int32_t arg8 = stack_pointer[4];
+ STATIC_ASSERT(kMaxCParameters == 9);
+
bool fp_call =
(redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
@@ -1939,16 +1945,17 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF(
"Call to host function at %p "
- "args %08x, %08x, %08x, %08x, %08x, %08x",
+ "args %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x",
static_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2, arg3,
- arg4, arg5);
+ arg4, arg5, arg6, arg7, arg8);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
PrintF("\n");
}
CHECK(stack_aligned);
- int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ int64_t result =
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
int32_t lo_res = static_cast<int32_t>(result);
int32_t hi_res = static_cast<int32_t>(result >> 32);
if (::v8::internal::FLAG_trace_sim) {
@@ -4216,6 +4223,34 @@ void ArithmeticShiftRight(Simulator* simulator, int Vd, int Vm, int shift) {
}
template <typename T, int SIZE>
+void ShiftLeftAndInsert(Simulator* simulator, int Vd, int Vm, int shift) {
+ static const int kElems = SIZE / sizeof(T);
+ T src[kElems];
+ T dst[kElems];
+ simulator->get_neon_register<T, SIZE>(Vm, src);
+ simulator->get_neon_register<T, SIZE>(Vd, dst);
+ uint64_t mask = (1llu << shift) - 1llu;
+ for (int i = 0; i < kElems; i++) {
+ dst[i] = (src[i] << shift) | (dst[i] & mask);
+ }
+ simulator->set_neon_register<T, SIZE>(Vd, dst);
+}
+
+template <typename T, int SIZE>
+void ShiftRightAndInsert(Simulator* simulator, int Vd, int Vm, int shift) {
+ static const int kElems = SIZE / sizeof(T);
+ T src[kElems];
+ T dst[kElems];
+ simulator->get_neon_register<T, SIZE>(Vm, src);
+ simulator->get_neon_register<T, SIZE>(Vd, dst);
+ uint64_t mask = ~((1llu << (kBitsPerByte * SIZE - shift)) - 1llu);
+ for (int i = 0; i < kElems; i++) {
+ dst[i] = (src[i] >> shift) | (dst[i] & mask);
+ }
+ simulator->set_neon_register<T, SIZE>(Vd, dst);
+}
+
+template <typename T, int SIZE>
void CompareEqual(Simulator* simulator, int Vd, int Vm, int Vn) {
static const int kElems = SIZE / sizeof(T);
T src1[kElems], src2[kElems];
@@ -4273,6 +4308,20 @@ void PairwiseMinMax(Simulator* simulator, int Vd, int Vm, int Vn, bool min) {
simulator->set_neon_register<T, kDoubleSize>(Vd, dst);
}
+template <typename T>
+void PairwiseAdd(Simulator* simulator, int Vd, int Vm, int Vn) {
+ static const int kElems = kDoubleSize / sizeof(T);
+ static const int kPairs = kElems / 2;
+ T dst[kElems], src1[kElems], src2[kElems];
+ simulator->get_neon_register<T, kDoubleSize>(Vn, src1);
+ simulator->get_neon_register<T, kDoubleSize>(Vm, src2);
+ for (int i = 0; i < kPairs; i++) {
+ dst[i] = src1[i * 2] + src1[i * 2 + 1];
+ dst[i + kPairs] = src2[i * 2] + src2[i * 2 + 1];
+ }
+ simulator->set_neon_register<T, kDoubleSize>(Vd, dst);
+}
+
void Simulator::DecodeSpecialCondition(Instruction* instr) {
switch (instr->SpecialValue()) {
case 4: {
@@ -4484,6 +4533,25 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
}
break;
}
+ case 0xb: {
+ // vpadd.i<size> Dd, Dm, Dn.
+ NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
+ switch (size) {
+ case Neon8:
+ PairwiseAdd<int8_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon16:
+ PairwiseAdd<int16_t>(this, Vd, Vm, Vn);
+ break;
+ case Neon32:
+ PairwiseAdd<int32_t>(this, Vd, Vm, Vn);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ break;
+ }
case 0xd: {
if (instr->Bit(4) == 0) {
float src1[4], src2[4];
@@ -4832,7 +4900,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
break;
}
case 0xd: {
- if (instr->Bit(21) == 0 && instr->Bit(6) == 1 && instr->Bit(4) == 1) {
+ if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
+ instr->Bit(4) == 1) {
// vmul.f32 Qd, Qn, Qm
float src1[4], src2[4];
get_neon_register(Vn, src1);
@@ -4841,6 +4910,10 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
src1[i] = src1[i] * src2[i];
}
set_neon_register(Vd, src1);
+ } else if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 0 &&
+ instr->Bit(4) == 0) {
+ // vpadd.f32 Dd, Dn, Dm
+ PairwiseAdd<float>(this, Vd, Vm, Vn);
} else {
UNIMPLEMENTED();
}
@@ -4950,14 +5023,40 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
set_neon_register(vd, mval);
}
} else if (instr->Bits(11, 7) == 0x18) {
- // vdup.32 Qd, Sm.
- int vd = instr->VFPDRegValue(kSimd128Precision);
+ // vdup.<size> Dd, Dm[index].
+ // vdup.<size> Qd, Dm[index].
int vm = instr->VFPMRegValue(kDoublePrecision);
- int index = instr->Bit(19);
- uint32_t s_data = get_s_register(vm * 2 + index);
- uint32_t q_data[4];
- for (int i = 0; i < 4; i++) q_data[i] = s_data;
- set_neon_register(vd, q_data);
+ int imm4 = instr->Bits(19, 16);
+ int size = 0, index = 0, mask = 0;
+ if ((imm4 & 0x1) != 0) {
+ size = 8;
+ index = imm4 >> 1;
+ mask = 0xffu;
+ } else if ((imm4 & 0x2) != 0) {
+ size = 16;
+ index = imm4 >> 2;
+ mask = 0xffffu;
+ } else {
+ size = 32;
+ index = imm4 >> 3;
+ mask = 0xffffffffu;
+ }
+ uint64_t d_data;
+ get_d_register(vm, &d_data);
+ uint32_t scalar = (d_data >> (size * index)) & mask;
+ uint32_t duped = scalar;
+ for (int i = 1; i < 32 / size; i++) {
+ scalar <<= size;
+ duped |= scalar;
+ }
+ uint32_t result[4] = {duped, duped, duped, duped};
+ if (instr->Bit(6) == 0) {
+ int vd = instr->VFPDRegValue(kDoublePrecision);
+ set_d_register(vd, result);
+ } else {
+ int vd = instr->VFPDRegValue(kSimd128Precision);
+ set_neon_register(vd, result);
+ }
} else if (instr->Bits(19, 16) == 0 && instr->Bits(11, 6) == 0x17) {
// vmvn Qd, Qm.
int vd = instr->VFPDRegValue(kSimd128Precision);
@@ -5334,6 +5433,58 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
UNREACHABLE();
break;
}
+ } else if (instr->Bits(11, 8) == 0x5 && instr->Bit(6) == 0 &&
+ instr->Bit(4) == 1) {
+ // vsli.<size> Dd, Dm, shift
+ int imm7 = instr->Bits(21, 16);
+ if (instr->Bit(7) != 0) imm7 += 64;
+ int size = base::bits::RoundDownToPowerOfTwo32(imm7);
+ int shift = imm7 - size;
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ switch (size) {
+ case 8:
+ ShiftLeftAndInsert<uint8_t, kDoubleSize>(this, Vd, Vm, shift);
+ break;
+ case 16:
+ ShiftLeftAndInsert<uint16_t, kDoubleSize>(this, Vd, Vm, shift);
+ break;
+ case 32:
+ ShiftLeftAndInsert<uint32_t, kDoubleSize>(this, Vd, Vm, shift);
+ break;
+ case 64:
+ ShiftLeftAndInsert<uint64_t, kDoubleSize>(this, Vd, Vm, shift);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ } else if (instr->Bits(11, 8) == 0x4 && instr->Bit(6) == 0 &&
+ instr->Bit(4) == 1) {
+ // vsri.<size> Dd, Dm, shift
+ int imm7 = instr->Bits(21, 16);
+ if (instr->Bit(7) != 0) imm7 += 64;
+ int size = base::bits::RoundDownToPowerOfTwo32(imm7);
+ int shift = 2 * size - imm7;
+ int Vd = instr->VFPDRegValue(kDoublePrecision);
+ int Vm = instr->VFPMRegValue(kDoublePrecision);
+ switch (size) {
+ case 8:
+ ShiftRightAndInsert<uint8_t, kDoubleSize>(this, Vd, Vm, shift);
+ break;
+ case 16:
+ ShiftRightAndInsert<uint16_t, kDoubleSize>(this, Vd, Vm, shift);
+ break;
+ case 32:
+ ShiftRightAndInsert<uint32_t, kDoubleSize>(this, Vd, Vm, shift);
+ break;
+ case 64:
+ ShiftRightAndInsert<uint64_t, kDoubleSize>(this, Vd, Vm, shift);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
} else {
UNIMPLEMENTED();
}
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index f5ebf219cb..35fc2f930e 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -27,18 +27,14 @@ namespace internal {
#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
(entry(p0, p1, p2, p3, p4))
-typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
- void*, int*, int, Address, int, Isolate*);
-
+typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*, int*,
+ int, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
-// The fifth argument is a dummy that reserves the space used for
-// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
- (FUNCTION_CAST<arm_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6, \
- p7, p8))
+ (FUNCTION_CAST<arm_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on arm uses the C stack, we
@@ -365,7 +361,7 @@ class Simulator {
static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
void* page);
- // Runtime call support.
+ // Runtime call support. Uses the isolate in a thread-safe way.
static void* RedirectExternalReference(
Isolate* isolate, void* external_function,
v8::internal::ExternalReference::Type type);
@@ -549,9 +545,8 @@ class Simulator {
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
- Simulator::current(isolate) \
- ->Call(entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
-
+ Simulator::current(isolate)->Call(entry, 9, p0, p1, p2, p3, p4, p5, p6, p7, \
+ p8)
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code. The JS-based limit normally points near the end of
diff --git a/deps/v8/src/arm64/assembler-arm64-inl.h b/deps/v8/src/arm64/assembler-arm64-inl.h
index f6bb6a8893..e865b634b5 100644
--- a/deps/v8/src/arm64/assembler-arm64-inl.h
+++ b/deps/v8/src/arm64/assembler-arm64-inl.h
@@ -832,20 +832,20 @@ template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
+ visitor->VisitEmbeddedPointer(host(), this);
} else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
+ visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::CELL) {
- visitor->VisitCell(this);
+ visitor->VisitCellPointer(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(this);
+ visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
- visitor->VisitInternalReference(this);
+ visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
- visitor->VisitDebugTarget(this);
+ visitor->VisitDebugTarget(host(), this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(this);
+ visitor->VisitRuntimeEntry(host(), this);
}
}
diff --git a/deps/v8/src/arm64/assembler-arm64.cc b/deps/v8/src/arm64/assembler-arm64.cc
index ac6931dec7..ec12e77274 100644
--- a/deps/v8/src/arm64/assembler-arm64.cc
+++ b/deps/v8/src/arm64/assembler-arm64.cc
@@ -2905,7 +2905,14 @@ void Assembler::GrowBuffer() {
} else {
desc.buffer_size = buffer_size_ + 1 * MB;
}
- CHECK_GT(desc.buffer_size, 0); // No overflow.
+
+ // Some internal data structures overflow for very large buffers,
+ // they must ensure that kMaximalBufferSize is not too large.
+ if (desc.buffer_size > kMaximalBufferSize ||
+ static_cast<size_t>(desc.buffer_size) >
+ isolate_data().max_old_generation_size_) {
+ V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ }
byte* buffer = reinterpret_cast<byte*>(buffer_);
diff --git a/deps/v8/src/arm64/assembler-arm64.h b/deps/v8/src/arm64/assembler-arm64.h
index ea1d94f628..e4ca410abd 100644
--- a/deps/v8/src/arm64/assembler-arm64.h
+++ b/deps/v8/src/arm64/assembler-arm64.h
@@ -2134,6 +2134,9 @@ class Assembler : public AssemblerBase {
int next_veneer_pool_check_;
private:
+ // Avoid overflows for displacements etc.
+ static const int kMaximalBufferSize = 512 * MB;
+
// If a veneer is emitted for a branch instruction, that instruction must be
// removed from the associated label's link chain so that the assembler does
// not later attempt (likely unsuccessfully) to patch it to branch directly to
diff --git a/deps/v8/src/arm64/code-stubs-arm64.cc b/deps/v8/src/arm64/code-stubs-arm64.cc
index ec00581566..c3c3367b10 100644
--- a/deps/v8/src/arm64/code-stubs-arm64.cc
+++ b/deps/v8/src/arm64/code-stubs-arm64.cc
@@ -1267,73 +1267,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Ret();
}
-void RegExpExecStub::Generate(MacroAssembler* masm) {
-#ifdef V8_INTERPRETED_REGEXP
- // This case is handled prior to the RegExpExecStub call.
- __ Abort(kUnexpectedRegExpExecCall);
-#else // V8_INTERPRETED_REGEXP
- // Isolates: note we add an additional parameter here (isolate pointer).
- __ EnterExitFrame(false, x10, 1);
- DCHECK(csp.Is(__ StackPointer()));
-
- // We have 9 arguments to pass to the regexp code, therefore we have to pass
- // one on the stack and the rest as registers.
-
- // Note that the placement of the argument on the stack isn't standard
- // AAPCS64:
- // csp[0]: Space for the return address placed by DirectCEntryStub.
- // csp[8]: Argument 9, the current isolate address.
-
- __ Mov(x10, ExternalReference::isolate_address(isolate()));
- __ Poke(x10, kPointerSize);
-
- // Argument 1 (x0): Subject string.
- CHECK(x0.is(RegExpExecDescriptor::StringRegister()));
-
- // Argument 2 (x1): Previous index, already there.
- CHECK(x1.is(RegExpExecDescriptor::LastIndexRegister()));
-
- // Argument 3 (x2): Input start.
- // Argument 4 (x3): Input end.
- CHECK(x2.is(RegExpExecDescriptor::StringStartRegister()));
- CHECK(x3.is(RegExpExecDescriptor::StringEndRegister()));
-
- // Argument 5 (x4): static offsets vector buffer.
- __ Mov(x4, ExternalReference::address_of_static_offsets_vector(isolate()));
-
- // Argument 6 (x5): Set the number of capture registers to zero to force
- // global regexps to behave as non-global. This stub is not used for global
- // regexps.
- __ Mov(x5, 0);
-
- // Argument 7 (x6): Start (high end) of backtracking stack memory area.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate());
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate());
- __ Mov(x10, address_of_regexp_stack_memory_address);
- __ Ldr(x10, MemOperand(x10));
- __ Mov(x11, address_of_regexp_stack_memory_size);
- __ Ldr(x11, MemOperand(x11));
- __ Add(x6, x10, x11);
-
- // Argument 8 (x7): Indicate that this is a direct call from JavaScript.
- __ Mov(x7, 1);
-
- // Locate the code entry and call it.
- Register code_object = RegExpExecDescriptor::CodeRegister();
- __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag);
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm, code_object);
-
- __ LeaveExitFrame(false, x10, true);
-
- // Return the smi-tagged result.
- __ SmiTag(x0);
- __ Ret();
-#endif
-}
-
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub,
Register argc, Register function,
@@ -3098,9 +3031,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
__ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
}
- if (!call_data_undefined()) {
- __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
- }
+ __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
Register isolate_reg = x5;
__ Mov(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 901259f2b4..a178e1d95e 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -31,16 +31,35 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
+ Address code_start_address = code->instruction_start();
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- // TODO(jkummerow): if (FLAG_zap_code_space), make the code object's
- // entry sequence unusable (see other architectures).
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+
+ {
+ PatchingAssembler patcher(Assembler::IsolateData(isolate), pointer, 1);
+ patcher.brk(0);
+ }
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ PatchingAssembler patcher(Assembler::IsolateData(isolate),
+ code_start_address + osr_offset, 1);
+ patcher.brk(0);
+ }
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(code->deoptimization_data());
- Address code_start_address = code->instruction_start();
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
diff --git a/deps/v8/src/arm64/interface-descriptors-arm64.cc b/deps/v8/src/arm64/interface-descriptors-arm64.cc
index c73d371e8f..887adddf29 100644
--- a/deps/v8/src/arm64/interface-descriptors-arm64.cc
+++ b/deps/v8/src/arm64/interface-descriptors-arm64.cc
@@ -57,11 +57,6 @@ const Register MathPowTaggedDescriptor::exponent() { return x11; }
const Register MathPowIntegerDescriptor::exponent() { return x12; }
-const Register RegExpExecDescriptor::StringRegister() { return x0; }
-const Register RegExpExecDescriptor::LastIndexRegister() { return x1; }
-const Register RegExpExecDescriptor::StringStartRegister() { return x2; }
-const Register RegExpExecDescriptor::StringEndRegister() { return x3; }
-const Register RegExpExecDescriptor::CodeRegister() { return x8; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
@@ -182,8 +177,19 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: target
+ // x0: number of arguments
+ // x2: start index (to supported rest parameters)
+ Register registers[] = {x1, x0, x2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // x3: new target
+ // x1: target
+ // x0: number of arguments
// x2: start index (to supported rest parameters)
- Register registers[] = {x1, x2};
+ Register registers[] = {x1, x3, x0, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.cc b/deps/v8/src/arm64/macro-assembler-arm64.cc
index 5edcd7b044..2282c941ba 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/arm64/macro-assembler-arm64.cc
@@ -129,7 +129,12 @@ void MacroAssembler::LogicalMacro(const Register& rd,
} else {
// Immediate can't be encoded: synthesize using move immediate.
Register temp = temps.AcquireSameSizeAs(rn);
- Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
+
+ // If the left-hand input is the stack pointer, we can't pre-shift the
+ // immediate, as the encoding won't allow the subsequent post shift.
+ PreShiftImmMode mode = rn.Is(csp) ? kNoShift : kAnyShift;
+ Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate, mode);
+
if (rd.Is(csp)) {
// If rd is the stack pointer we cannot use it as the destination
// register so we use the temp register as an intermediate again.
@@ -437,17 +442,23 @@ bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
return false;
}
-
Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
- int64_t imm) {
+ int64_t imm,
+ PreShiftImmMode mode) {
int reg_size = dst.SizeInBits();
-
// Encode the immediate in a single move instruction, if possible.
if (TryOneInstrMoveImmediate(dst, imm)) {
// The move was successful; nothing to do here.
} else {
// Pre-shift the immediate to the least-significant bits of the register.
int shift_low = CountTrailingZeros(imm, reg_size);
+ if (mode == kLimitShiftForSP) {
+ // When applied to the stack pointer, the subsequent arithmetic operation
+ // can use the extend form to shift left by a maximum of four bits. Right
+ // shifts are not allowed, so we filter them out later before the new
+ // immediate is tested.
+ shift_low = std::min(shift_low, 4);
+ }
int64_t imm_low = imm >> shift_low;
// Pre-shift the immediate to the most-significant bits of the register. We
@@ -456,13 +467,13 @@ Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
// If this new immediate is encodable, the set bits will be eliminated by
// the post shift on the following instruction.
int shift_high = CountLeadingZeros(imm, reg_size);
- int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
+ int64_t imm_high = (imm << shift_high) | ((INT64_C(1) << shift_high) - 1);
- if (TryOneInstrMoveImmediate(dst, imm_low)) {
+ if ((mode != kNoShift) && TryOneInstrMoveImmediate(dst, imm_low)) {
// The new immediate has been moved into the destination's low bits:
// return a new leftward-shifting operand.
return Operand(dst, LSL, shift_low);
- } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
+ } else if ((mode == kAnyShift) && TryOneInstrMoveImmediate(dst, imm_high)) {
// The new immediate has been moved into the destination's high bits:
// return a new rightward-shifting operand.
return Operand(dst, LSR, shift_high);
@@ -498,8 +509,21 @@ void MacroAssembler::AddSubMacro(const Register& rd,
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireSameSizeAs(rn);
if (operand.IsImmediate()) {
+ PreShiftImmMode mode = kAnyShift;
+
+ // If the destination or source register is the stack pointer, we can
+ // only pre-shift the immediate right by values supported in the add/sub
+ // extend encoding.
+ if (rd.Is(csp)) {
+ // If the destination is SP and flags will be set, we can't pre-shift
+ // the immediate at all.
+ mode = (S == SetFlags) ? kNoShift : kLimitShiftForSP;
+ } else if (rn.Is(csp)) {
+ mode = kLimitShiftForSP;
+ }
+
Operand imm_operand =
- MoveImmediateForShiftedOp(temp, operand.ImmediateValue());
+ MoveImmediateForShiftedOp(temp, operand.ImmediateValue(), mode);
AddSub(rd, rn, imm_operand, S, op);
} else {
Mov(temp, operand);
@@ -1791,14 +1815,13 @@ void MacroAssembler::CallCFunction(ExternalReference function,
CallCFunction(temp, num_of_reg_args, num_of_double_args);
}
+static const int kRegisterPassedArguments = 8;
void MacroAssembler::CallCFunction(Register function,
int num_of_reg_args,
int num_of_double_args) {
+ DCHECK_LE(num_of_reg_args + num_of_double_args, kMaxCParameters);
DCHECK(has_frame());
- // We can pass 8 integer arguments in registers. If we need to pass more than
- // that, we'll need to implement support for passing them on the stack.
- DCHECK(num_of_reg_args <= 8);
// If we're passing doubles, we're limited to the following prototypes
// (defined by ExternalReference::Type):
@@ -1811,6 +1834,10 @@ void MacroAssembler::CallCFunction(Register function,
DCHECK((num_of_double_args + num_of_reg_args) <= 2);
}
+ // We rely on the frame alignment being 16 bytes, which means we never need
+ // to align the CSP by an unknown number of bytes and we always know the delta
+ // between the stack pointer and the frame pointer.
+ DCHECK(ActivationFrameAlignment() == 16);
// If the stack pointer is not csp, we need to derive an aligned csp from the
// current stack pointer.
@@ -1819,16 +1846,18 @@ void MacroAssembler::CallCFunction(Register function,
AssertStackConsistency();
int sp_alignment = ActivationFrameAlignment();
- // The ABI mandates at least 16-byte alignment.
- DCHECK(sp_alignment >= 16);
- DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
-
// The current stack pointer is a callee saved register, and is preserved
// across the call.
DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
- // Align and synchronize the system stack pointer with jssp.
- Bic(csp, old_stack_pointer, sp_alignment - 1);
+ // If more than eight arguments are passed to the function, we expect the
+ // ninth argument onwards to have been placed on the csp-based stack
+ // already. We assume csp already points to the last stack-passed argument
+ // in that case.
+ // Otherwise, align and synchronize the system stack pointer with jssp.
+ if (num_of_reg_args <= kRegisterPassedArguments) {
+ Bic(csp, old_stack_pointer, sp_alignment - 1);
+ }
SetStackPointer(csp);
}
@@ -1836,19 +1865,39 @@ void MacroAssembler::CallCFunction(Register function,
// so the return address in the link register stays correct.
Call(function);
- if (!csp.Is(old_stack_pointer)) {
+ if (csp.Is(old_stack_pointer)) {
+ if (num_of_reg_args > kRegisterPassedArguments) {
+ // Drop the register passed arguments.
+ int claim_slots = RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
+ Drop(claim_slots);
+ }
+ } else {
+ DCHECK(jssp.Is(old_stack_pointer));
if (emit_debug_code()) {
- // Because the stack pointer must be aligned on a 16-byte boundary, the
- // aligned csp can be up to 12 bytes below the jssp. This is the case
- // where we only pushed one W register on top of an aligned jssp.
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- DCHECK(ActivationFrameAlignment() == 16);
- Sub(temp, csp, old_stack_pointer);
- // We want temp <= 0 && temp >= -12.
- Cmp(temp, 0);
- Ccmp(temp, -12, NFlag, le);
- Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
+
+ if (num_of_reg_args > kRegisterPassedArguments) {
+ // We don't need to drop stack arguments, as the stack pointer will be
+ // jssp when returning from this function. However, in debug builds, we
+ // can check that jssp is as expected.
+ int claim_slots =
+ RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
+
+ // Check jssp matches the previous value on the stack.
+ Ldr(temp, MemOperand(csp, claim_slots * kPointerSize));
+ Cmp(jssp, temp);
+ Check(eq, kTheStackWasCorruptedByMacroAssemblerCall);
+ } else {
+ // Because the stack pointer must be aligned on a 16-byte boundary, the
+ // aligned csp can be up to 12 bytes below the jssp. This is the case
+ // where we only pushed one W register on top of an aligned jssp.
+ Sub(temp, csp, old_stack_pointer);
+ // We want temp <= 0 && temp >= -12.
+ Cmp(temp, 0);
+ Ccmp(temp, -12, NFlag, le);
+ Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
+ }
}
SetStackPointer(old_stack_pointer);
}
@@ -2547,6 +2596,8 @@ void MacroAssembler::TruncateDoubleToI(Register result,
}
Bind(&done);
+ // Keep our invariant that the upper 32 bits are zero.
+ Uxtw(result.W(), result.W());
}
@@ -3733,7 +3784,7 @@ void MacroAssembler::RecordWriteField(
Add(scratch, object, offset - kHeapObjectTag);
if (emit_debug_code()) {
Label ok;
- Tst(scratch, (1 << kPointerSizeLog2) - 1);
+ Tst(scratch, kPointerSize - 1);
B(eq, &ok);
Abort(kUnalignedCellInWriteBarrier);
Bind(&ok);
diff --git a/deps/v8/src/arm64/macro-assembler-arm64.h b/deps/v8/src/arm64/macro-assembler-arm64.h
index e60fbe33fe..6c77dd5b01 100644
--- a/deps/v8/src/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/arm64/macro-assembler-arm64.h
@@ -162,6 +162,21 @@ enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
+// The macro assembler supports moving automatically pre-shifted immediates for
+// arithmetic and logical instructions, and then applying a post shift in the
+// instruction to undo the modification, in order to reduce the code emitted for
+// an operation. For example:
+//
+// Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1.
+//
+// This optimisation can be only partially applied when the stack pointer is an
+// operand or destination, so this enumeration is used to control the shift.
+enum PreShiftImmMode {
+ kNoShift, // Don't pre-shift.
+ kLimitShiftForSP, // Limit pre-shift for add/sub extend use.
+ kAnyShift // Allow any pre-shift.
+};
+
class MacroAssembler : public Assembler {
public:
MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size,
@@ -276,7 +291,8 @@ class MacroAssembler : public Assembler {
// dst is not necessarily equal to imm; it may have had a shifting operation
// applied to it that will be subsequently undone by the shift applied in the
// Operand.
- Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm);
+ Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm,
+ PreShiftImmMode mode);
// Conditional macros.
inline void Ccmp(const Register& rn,
diff --git a/deps/v8/src/arm64/simulator-arm64.cc b/deps/v8/src/arm64/simulator-arm64.cc
index b536fd5e9c..fb0e614982 100644
--- a/deps/v8/src/arm64/simulator-arm64.cc
+++ b/deps/v8/src/arm64/simulator-arm64.cc
@@ -208,7 +208,6 @@ int64_t Simulator::CallRegExp(byte* entry,
int64_t output_size,
Address stack_base,
int64_t direct_call,
- void* return_address,
Isolate* isolate) {
CallArgument args[] = {
CallArgument(input),
@@ -219,7 +218,6 @@ int64_t Simulator::CallRegExp(byte* entry,
CallArgument(output_size),
CallArgument(stack_base),
CallArgument(direct_call),
- CallArgument(return_address),
CallArgument(isolate),
CallArgument::End()
};
@@ -540,14 +538,11 @@ void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
// uses the ObjectPair structure.
// The simulator assumes all runtime calls return two 64-bits values. If they
// don't, register x1 is clobbered. This is fine because x1 is caller-saved.
-typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
- int64_t arg1,
- int64_t arg2,
- int64_t arg3,
- int64_t arg4,
- int64_t arg5,
- int64_t arg6,
- int64_t arg7);
+typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0, int64_t arg1,
+ int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5,
+ int64_t arg6, int64_t arg7,
+ int64_t arg8);
typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int64_t arg0, int64_t arg1,
int64_t arg2, int64_t arg3,
@@ -589,6 +584,19 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
FATAL("ALIGNMENT EXCEPTION");
}
+ int64_t* stack_pointer = reinterpret_cast<int64_t*>(sp());
+
+ const int64_t arg0 = xreg(0);
+ const int64_t arg1 = xreg(1);
+ const int64_t arg2 = xreg(2);
+ const int64_t arg3 = xreg(3);
+ const int64_t arg4 = xreg(4);
+ const int64_t arg5 = xreg(5);
+ const int64_t arg6 = xreg(6);
+ const int64_t arg7 = xreg(7);
+ const int64_t arg8 = stack_pointer[0];
+ STATIC_ASSERT(kMaxCParameters == 9);
+
switch (redirection->type()) {
default:
TraceSim("Type: Unknown.\n");
@@ -606,15 +614,20 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
// We don't know how many arguments are being passed, but we can
// pass 8 without touching the stack. They will be ignored by the
// host function if they aren't used.
- TraceSim("Arguments: "
- "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
- "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
- "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
- "0x%016" PRIx64 ", 0x%016" PRIx64,
- xreg(0), xreg(1), xreg(2), xreg(3),
- xreg(4), xreg(5), xreg(6), xreg(7));
- ObjectPair result = target(xreg(0), xreg(1), xreg(2), xreg(3),
- xreg(4), xreg(5), xreg(6), xreg(7));
+ TraceSim(
+ "Arguments: "
+ "0x%016" PRIx64 ", 0x%016" PRIx64
+ ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64
+ ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64
+ ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64
+ ", "
+ "0x%016" PRIx64,
+ arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
+ ObjectPair result =
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
TraceSim("Returned: {%p, %p}\n", static_cast<void*>(result.x),
static_cast<void*>(result.y));
#ifdef DEBUG
@@ -636,16 +649,18 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
// host function if they aren't used.
TraceSim(
"Arguments: "
- "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
- "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
- "0x%016" PRIx64 ", 0x%016" PRIx64 ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64
+ ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64
+ ", "
+ "0x%016" PRIx64 ", 0x%016" PRIx64
+ ", "
"0x%016" PRIx64 ", 0x%016" PRIx64,
- xreg(0), xreg(1), xreg(2), xreg(3), xreg(4), xreg(5), xreg(6),
- xreg(7));
+ arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
// Return location passed in x8.
ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(xreg(8));
- ObjectTriple result = target(xreg(0), xreg(1), xreg(2), xreg(3), xreg(4),
- xreg(5), xreg(6), xreg(7));
+ ObjectTriple result =
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
TraceSim("Returned: {%p, %p, %p}\n", static_cast<void*>(result.x),
static_cast<void*>(result.y), static_cast<void*>(result.z));
#ifdef DEBUG
@@ -786,6 +801,8 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
void* Simulator::RedirectExternalReference(Isolate* isolate,
void* external_function,
ExternalReference::Type type) {
+ base::LockGuard<base::Mutex> lock_guard(
+ isolate->simulator_redirection_mutex());
Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address_of_redirect_call();
}
diff --git a/deps/v8/src/arm64/simulator-arm64.h b/deps/v8/src/arm64/simulator-arm64.h
index 3016e616e4..48fc1c7bc6 100644
--- a/deps/v8/src/arm64/simulator-arm64.h
+++ b/deps/v8/src/arm64/simulator-arm64.h
@@ -36,17 +36,14 @@ typedef int (*arm64_regexp_matcher)(String* input,
int64_t output_size,
Address stack_base,
int64_t direct_call,
- void* return_address,
Isolate* isolate);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm64_regexp_matcher.
-// The ninth argument is a dummy that reserves the space used for
-// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
(FUNCTION_CAST<arm64_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
- NULL, p8))
+ p8))
// Running without a simulator there is nothing to do.
class SimulatorStack : public v8::internal::AllStatic {
@@ -201,7 +198,6 @@ class Simulator : public DecoderVisitor {
int64_t output_size,
Address stack_base,
int64_t direct_call,
- void* return_address,
Isolate* isolate);
// A wrapper class that stores an argument for one of the above Call
@@ -277,7 +273,7 @@ class Simulator : public DecoderVisitor {
void ResetState();
- // Runtime call support.
+ // Runtime call support. Uses the isolate in a thread-safe way.
static void* RedirectExternalReference(Isolate* isolate,
void* external_function,
ExternalReference::Type type);
@@ -973,8 +969,7 @@ class Simulator : public DecoderVisitor {
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
static_cast<int>(Simulator::current(isolate)->CallRegExp( \
- entry, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
-
+ entry, p0, p1, p2, p3, p4, p5, p6, p7, p8))
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code. The JS-based limit normally points near the end of
diff --git a/deps/v8/src/asmjs/OWNERS b/deps/v8/src/asmjs/OWNERS
index b994be3e17..4f54661aeb 100644
--- a/deps/v8/src/asmjs/OWNERS
+++ b/deps/v8/src/asmjs/OWNERS
@@ -1,11 +1,8 @@
-# Keep in sync with test/cctest/asmjs/OWNERS.
-
set noparent
ahaas@chromium.org
bradnelson@chromium.org
clemensh@chromium.org
-jpp@chromium.org
mtrofin@chromium.org
rossberg@chromium.org
titzer@chromium.org
diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc
index fb46026a98..516bce2543 100644
--- a/deps/v8/src/asmjs/asm-js.cc
+++ b/deps/v8/src/asmjs/asm-js.cc
@@ -6,10 +6,10 @@
#include "src/api-natives.h"
#include "src/api.h"
+#include "src/asmjs/asm-names.h"
#include "src/asmjs/asm-parser.h"
-#include "src/asmjs/asm-typer.h"
-#include "src/asmjs/asm-wasm-builder.h"
#include "src/assert-scope.h"
+#include "src/ast/ast.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/compilation-info.h"
#include "src/execution.h"
@@ -18,7 +18,6 @@
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects.h"
-#include "src/parsing/parse-info.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-js.h"
@@ -27,286 +26,266 @@
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-result.h"
-typedef uint8_t byte;
-
-using v8::internal::wasm::ErrorThrower;
-
namespace v8 {
namespace internal {
+const char* const AsmJs::kSingleFunctionName = "__single_function__";
+
namespace {
enum WasmDataEntries {
kWasmDataCompiledModule,
- kWasmDataForeignGlobals,
kWasmDataUsesArray,
- kWasmDataScript,
- kWasmDataScriptPosition,
kWasmDataEntryCount,
};
-Handle<i::Object> StdlibMathMember(i::Isolate* isolate,
- Handle<JSReceiver> stdlib,
- Handle<Name> name) {
- if (stdlib.is_null()) {
- return Handle<i::Object>();
- }
- Handle<i::Name> math_name(
+Handle<Object> StdlibMathMember(Isolate* isolate, Handle<JSReceiver> stdlib,
+ Handle<Name> name) {
+ Handle<Name> math_name(
isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("Math")));
- MaybeHandle<i::Object> maybe_math = i::Object::GetProperty(stdlib, math_name);
- if (maybe_math.is_null()) {
- return Handle<i::Object>();
- }
- Handle<i::Object> math = maybe_math.ToHandleChecked();
- if (!math->IsJSReceiver()) {
- return Handle<i::Object>();
- }
- MaybeHandle<i::Object> maybe_value = i::Object::GetProperty(math, name);
- if (maybe_value.is_null()) {
- return Handle<i::Object>();
- }
- return maybe_value.ToHandleChecked();
+ Handle<Object> math = JSReceiver::GetDataProperty(stdlib, math_name);
+ if (!math->IsJSReceiver()) return isolate->factory()->undefined_value();
+ Handle<JSReceiver> math_receiver = Handle<JSReceiver>::cast(math);
+ Handle<Object> value = JSReceiver::GetDataProperty(math_receiver, name);
+ return value;
}
-bool IsStdlibMemberValid(i::Isolate* isolate, Handle<JSReceiver> stdlib,
- Handle<i::Object> member_id) {
- int32_t member_kind;
- if (!member_id->ToInt32(&member_kind)) {
- UNREACHABLE();
- }
- switch (member_kind) {
- case wasm::AsmTyper::StandardMember::kNone:
- case wasm::AsmTyper::StandardMember::kModule:
- case wasm::AsmTyper::StandardMember::kStdlib:
- case wasm::AsmTyper::StandardMember::kHeap:
- case wasm::AsmTyper::StandardMember::kFFI: {
- // Nothing to check for these.
- return true;
- }
- case wasm::AsmTyper::StandardMember::kInfinity: {
- if (stdlib.is_null()) {
- return false;
- }
- Handle<i::Name> name(isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("Infinity")));
- MaybeHandle<i::Object> maybe_value = i::Object::GetProperty(stdlib, name);
- if (maybe_value.is_null()) {
- return false;
- }
- Handle<i::Object> value = maybe_value.ToHandleChecked();
+bool IsStdlibMemberValid(Isolate* isolate, Handle<JSReceiver> stdlib,
+ wasm::AsmJsParser::StandardMember member,
+ bool* is_typed_array) {
+ switch (member) {
+ case wasm::AsmJsParser::StandardMember::kInfinity: {
+ Handle<Name> name = isolate->factory()->infinity_string();
+ Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name);
return value->IsNumber() && std::isinf(value->Number());
}
- case wasm::AsmTyper::StandardMember::kNaN: {
- if (stdlib.is_null()) {
- return false;
- }
- Handle<i::Name> name(isolate->factory()->InternalizeOneByteString(
- STATIC_CHAR_VECTOR("NaN")));
- MaybeHandle<i::Object> maybe_value = i::Object::GetProperty(stdlib, name);
- if (maybe_value.is_null()) {
- return false;
- }
- Handle<i::Object> value = maybe_value.ToHandleChecked();
+ case wasm::AsmJsParser::StandardMember::kNaN: {
+ Handle<Name> name = isolate->factory()->nan_string();
+ Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name);
return value->IsNaN();
}
-#define STDLIB_MATH_FUNC(CamelName, fname) \
- case wasm::AsmTyper::StandardMember::k##CamelName: { \
- Handle<i::Name> name(isolate->factory()->InternalizeOneByteString( \
- STATIC_CHAR_VECTOR(#fname))); \
- Handle<i::Object> value = StdlibMathMember(isolate, stdlib, name); \
- if (value.is_null() || !value->IsJSFunction()) { \
- return false; \
- } \
- Handle<i::JSFunction> func(i::JSFunction::cast(*value)); \
- return func->shared()->code() == \
- isolate->builtins()->builtin(Builtins::k##CamelName); \
+#define STDLIB_MATH_FUNC(fname, FName, ignore1, ignore2) \
+ case wasm::AsmJsParser::StandardMember::kMath##FName: { \
+ Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
+ STATIC_CHAR_VECTOR(#fname))); \
+ Handle<Object> value = StdlibMathMember(isolate, stdlib, name); \
+ if (!value->IsJSFunction()) return false; \
+ Handle<JSFunction> func = Handle<JSFunction>::cast(value); \
+ return func->shared()->code() == \
+ isolate->builtins()->builtin(Builtins::kMath##FName); \
}
- STDLIB_MATH_FUNC(MathAcos, acos)
- STDLIB_MATH_FUNC(MathAsin, asin)
- STDLIB_MATH_FUNC(MathAtan, atan)
- STDLIB_MATH_FUNC(MathCos, cos)
- STDLIB_MATH_FUNC(MathSin, sin)
- STDLIB_MATH_FUNC(MathTan, tan)
- STDLIB_MATH_FUNC(MathExp, exp)
- STDLIB_MATH_FUNC(MathLog, log)
- STDLIB_MATH_FUNC(MathCeil, ceil)
- STDLIB_MATH_FUNC(MathFloor, floor)
- STDLIB_MATH_FUNC(MathSqrt, sqrt)
- STDLIB_MATH_FUNC(MathAbs, abs)
- STDLIB_MATH_FUNC(MathClz32, clz32)
- STDLIB_MATH_FUNC(MathMin, min)
- STDLIB_MATH_FUNC(MathMax, max)
- STDLIB_MATH_FUNC(MathAtan2, atan2)
- STDLIB_MATH_FUNC(MathPow, pow)
- STDLIB_MATH_FUNC(MathImul, imul)
- STDLIB_MATH_FUNC(MathFround, fround)
+ STDLIB_MATH_FUNCTION_LIST(STDLIB_MATH_FUNC)
#undef STDLIB_MATH_FUNC
-#define STDLIB_MATH_CONST(cname, const_value) \
- case wasm::AsmTyper::StandardMember::kMath##cname: { \
- i::Handle<i::Name> name(isolate->factory()->InternalizeOneByteString( \
- STATIC_CHAR_VECTOR(#cname))); \
- i::Handle<i::Object> value = StdlibMathMember(isolate, stdlib, name); \
- return !value.is_null() && value->IsNumber() && \
- value->Number() == const_value; \
+#define STDLIB_MATH_CONST(cname, const_value) \
+ case wasm::AsmJsParser::StandardMember::kMath##cname: { \
+ Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
+ STATIC_CHAR_VECTOR(#cname))); \
+ Handle<Object> value = StdlibMathMember(isolate, stdlib, name); \
+ return value->IsNumber() && value->Number() == const_value; \
}
- STDLIB_MATH_CONST(E, 2.718281828459045)
- STDLIB_MATH_CONST(LN10, 2.302585092994046)
- STDLIB_MATH_CONST(LN2, 0.6931471805599453)
- STDLIB_MATH_CONST(LOG2E, 1.4426950408889634)
- STDLIB_MATH_CONST(LOG10E, 0.4342944819032518)
- STDLIB_MATH_CONST(PI, 3.141592653589793)
- STDLIB_MATH_CONST(SQRT1_2, 0.7071067811865476)
- STDLIB_MATH_CONST(SQRT2, 1.4142135623730951)
+ STDLIB_MATH_VALUE_LIST(STDLIB_MATH_CONST)
#undef STDLIB_MATH_CONST
- default: { UNREACHABLE(); }
+#define STDLIB_ARRAY_TYPE(fname, FName) \
+ case wasm::AsmJsParser::StandardMember::k##FName: { \
+ *is_typed_array = true; \
+ Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
+ STATIC_CHAR_VECTOR(#FName))); \
+ Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name); \
+ if (!value->IsJSFunction()) return false; \
+ Handle<JSFunction> func = Handle<JSFunction>::cast(value); \
+ return func.is_identical_to(isolate->fname()); \
+ }
+ STDLIB_ARRAY_TYPE(int8_array_fun, Int8Array)
+ STDLIB_ARRAY_TYPE(uint8_array_fun, Uint8Array)
+ STDLIB_ARRAY_TYPE(int16_array_fun, Int16Array)
+ STDLIB_ARRAY_TYPE(uint16_array_fun, Uint16Array)
+ STDLIB_ARRAY_TYPE(int32_array_fun, Int32Array)
+ STDLIB_ARRAY_TYPE(uint32_array_fun, Uint32Array)
+ STDLIB_ARRAY_TYPE(float32_array_fun, Float32Array)
+ STDLIB_ARRAY_TYPE(float64_array_fun, Float64Array)
+#undef STDLIB_ARRAY_TYPE
}
+ UNREACHABLE();
return false;
}
+void Report(Handle<Script> script, int position, Vector<const char> text,
+ MessageTemplate::Template message_template,
+ v8::Isolate::MessageErrorLevel level) {
+ Isolate* isolate = script->GetIsolate();
+ MessageLocation location(script, position, position);
+ Handle<String> text_object = isolate->factory()->InternalizeUtf8String(text);
+ Handle<JSMessageObject> message = MessageHandler::MakeMessageObject(
+ isolate, message_template, &location, text_object,
+ Handle<FixedArray>::null());
+ message->set_error_level(level);
+ MessageHandler::ReportMessage(isolate, &location, message);
+}
+
+// Hook to report successful execution of {AsmJs::CompileAsmViaWasm} phase.
+void ReportCompilationSuccess(Handle<Script> script, int position,
+ double translate_time, double compile_time,
+ size_t module_size) {
+ if (FLAG_suppress_asm_messages || !FLAG_trace_asm_time) return;
+ EmbeddedVector<char, 100> text;
+ int length = SNPrintF(
+ text, "success, asm->wasm: %0.3f ms, compile: %0.3f ms, %" PRIuS " bytes",
+ translate_time, compile_time, module_size);
+ CHECK_NE(-1, length);
+ text.Truncate(length);
+ Report(script, position, text, MessageTemplate::kAsmJsCompiled,
+ v8::Isolate::kMessageInfo);
+}
+
+// Hook to report failed execution of {AsmJs::CompileAsmViaWasm} phase.
+void ReportCompilationFailure(Handle<Script> script, int position,
+ const char* reason) {
+ if (FLAG_suppress_asm_messages) return;
+ Vector<const char> text = CStrVector(reason);
+ Report(script, position, text, MessageTemplate::kAsmJsInvalid,
+ v8::Isolate::kMessageWarning);
+}
+
+// Hook to report successful execution of {AsmJs::InstantiateAsmWasm} phase.
+void ReportInstantiationSuccess(Handle<Script> script, int position,
+ double instantiate_time) {
+ if (FLAG_suppress_asm_messages || !FLAG_trace_asm_time) return;
+ EmbeddedVector<char, 50> text;
+ int length = SNPrintF(text, "success, %0.3f ms", instantiate_time);
+ CHECK_NE(-1, length);
+ text.Truncate(length);
+ Report(script, position, text, MessageTemplate::kAsmJsInstantiated,
+ v8::Isolate::kMessageInfo);
+}
+
+// Hook to report failed execution of {AsmJs::InstantiateAsmWasm} phase.
+void ReportInstantiationFailure(Handle<Script> script, int position,
+ const char* reason) {
+ if (FLAG_suppress_asm_messages) return;
+ Vector<const char> text = CStrVector(reason);
+ Report(script, position, text, MessageTemplate::kAsmJsLinkingFailed,
+ v8::Isolate::kMessageWarning);
+}
+
} // namespace
MaybeHandle<FixedArray> AsmJs::CompileAsmViaWasm(CompilationInfo* info) {
wasm::ZoneBuffer* module = nullptr;
wasm::ZoneBuffer* asm_offsets = nullptr;
Handle<FixedArray> uses_array;
- Handle<FixedArray> foreign_globals;
- base::ElapsedTimer asm_wasm_timer;
- asm_wasm_timer.Start();
- wasm::AsmWasmBuilder builder(info);
- if (FLAG_fast_validate_asm) {
- wasm::AsmJsParser parser(info->isolate(), info->zone(), info->script(),
+ Handle<WasmModuleObject> compiled;
+
+ // The compilation of asm.js modules is split into two distinct steps:
+ // [1] The asm.js module source is parsed, validated, and translated to a
+ // valid WebAssembly module. The result are two vectors representing the
+ // encoded module as well as encoded source position information.
+ // [2] The module is handed to WebAssembly which decodes it into an internal
+ // representation and eventually compiles it to machine code.
+ double translate_time; // Time (milliseconds) taken to execute step [1].
+ double compile_time; // Time (milliseconds) taken to execute step [2].
+
+ // Step 1: Translate asm.js module to WebAssembly module.
+ {
+ HistogramTimerScope translate_time_scope(
+ info->isolate()->counters()->asm_wasm_translation_time());
+ size_t compile_zone_start = info->zone()->allocation_size();
+ base::ElapsedTimer translate_timer;
+ translate_timer.Start();
+
+ Zone* compile_zone = info->zone();
+ Zone translate_zone(info->isolate()->allocator(), ZONE_NAME);
+ wasm::AsmJsParser parser(info->isolate(), &translate_zone, info->script(),
info->literal()->start_position(),
info->literal()->end_position());
if (!parser.Run()) {
DCHECK(!info->isolate()->has_pending_exception());
- if (!FLAG_suppress_asm_messages) {
- MessageLocation location(info->script(), parser.failure_location(),
- parser.failure_location());
- Handle<String> message =
- info->isolate()
- ->factory()
- ->NewStringFromUtf8(CStrVector(parser.failure_message()))
- .ToHandleChecked();
- Handle<JSMessageObject> error_message =
- MessageHandler::MakeMessageObject(
- info->isolate(), MessageTemplate::kAsmJsInvalid, &location,
- message, Handle<JSArray>::null());
- error_message->set_error_level(v8::Isolate::kMessageWarning);
- MessageHandler::ReportMessage(info->isolate(), &location,
- error_message);
- }
+ ReportCompilationFailure(info->script(), parser.failure_location(),
+ parser.failure_message());
return MaybeHandle<FixedArray>();
}
- Zone* zone = info->zone();
- module = new (zone) wasm::ZoneBuffer(zone);
+ module = new (compile_zone) wasm::ZoneBuffer(compile_zone);
parser.module_builder()->WriteTo(*module);
- asm_offsets = new (zone) wasm::ZoneBuffer(zone);
+ asm_offsets = new (compile_zone) wasm::ZoneBuffer(compile_zone);
parser.module_builder()->WriteAsmJsOffsetTable(*asm_offsets);
- // TODO(bradnelson): Remove foreign_globals plumbing (as we don't need it
- // for the new parser).
- foreign_globals = info->isolate()->factory()->NewFixedArray(0);
uses_array = info->isolate()->factory()->NewFixedArray(
static_cast<int>(parser.stdlib_uses()->size()));
int count = 0;
for (auto i : *parser.stdlib_uses()) {
uses_array->set(count++, Smi::FromInt(i));
}
- } else {
- auto asm_wasm_result = builder.Run(&foreign_globals);
- if (!asm_wasm_result.success) {
- DCHECK(!info->isolate()->has_pending_exception());
- if (!FLAG_suppress_asm_messages) {
- MessageHandler::ReportMessage(info->isolate(),
- builder.typer()->message_location(),
- builder.typer()->error_message());
- }
- return MaybeHandle<FixedArray>();
- }
- module = asm_wasm_result.module_bytes;
- asm_offsets = asm_wasm_result.asm_offset_table;
- wasm::AsmTyper::StdlibSet uses = builder.typer()->StdlibUses();
- uses_array = info->isolate()->factory()->NewFixedArray(
- static_cast<int>(uses.size()));
- int count = 0;
- for (auto i : uses) {
- uses_array->set(count++, Smi::FromInt(i));
+ size_t compile_zone_size =
+ info->zone()->allocation_size() - compile_zone_start;
+ size_t translate_zone_size = translate_zone.allocation_size();
+ info->isolate()
+ ->counters()
+ ->asm_wasm_translation_peak_memory_bytes()
+ ->AddSample(static_cast<int>(translate_zone_size));
+ translate_time = translate_timer.Elapsed().InMillisecondsF();
+ if (FLAG_trace_asm_parser) {
+ PrintF(
+ "[asm.js translation successful: time=%0.3fms, "
+ "translate_zone=%" PRIuS "KB, compile_zone+=%" PRIuS "KB]\n",
+ translate_time, translate_zone_size / KB, compile_zone_size / KB);
}
}
- double asm_wasm_time = asm_wasm_timer.Elapsed().InMillisecondsF();
- Vector<const byte> asm_offsets_vec(asm_offsets->begin(),
- static_cast<int>(asm_offsets->size()));
-
- base::ElapsedTimer compile_timer;
- compile_timer.Start();
- ErrorThrower thrower(info->isolate(), "Asm.js -> WebAssembly conversion");
- MaybeHandle<JSObject> compiled = SyncCompileTranslatedAsmJs(
- info->isolate(), &thrower,
- wasm::ModuleWireBytes(module->begin(), module->end()), info->script(),
- asm_offsets_vec);
- DCHECK(!compiled.is_null());
- DCHECK(!thrower.error());
- double compile_time = compile_timer.Elapsed().InMillisecondsF();
- DCHECK_GE(module->end(), module->begin());
- uintptr_t wasm_size = module->end() - module->begin();
+ // Step 2: Compile and decode the WebAssembly module.
+ {
+ base::ElapsedTimer compile_timer;
+ compile_timer.Start();
+ wasm::ErrorThrower thrower(info->isolate(), "AsmJs::Compile");
+ MaybeHandle<WasmModuleObject> maybe_compiled = SyncCompileTranslatedAsmJs(
+ info->isolate(), &thrower,
+ wasm::ModuleWireBytes(module->begin(), module->end()), info->script(),
+ Vector<const byte>(asm_offsets->begin(), asm_offsets->size()));
+ DCHECK(!maybe_compiled.is_null());
+ DCHECK(!thrower.error());
+ compile_time = compile_timer.Elapsed().InMillisecondsF();
+ compiled = maybe_compiled.ToHandleChecked();
+ }
+ // The result is a compiled module and serialized standard library uses.
Handle<FixedArray> result =
info->isolate()->factory()->NewFixedArray(kWasmDataEntryCount);
- result->set(kWasmDataCompiledModule, *compiled.ToHandleChecked());
- result->set(kWasmDataForeignGlobals, *foreign_globals);
+ result->set(kWasmDataCompiledModule, *compiled);
result->set(kWasmDataUsesArray, *uses_array);
- result->set(kWasmDataScript, *info->script());
- result->set(kWasmDataScriptPosition,
- Smi::FromInt(info->literal()->position()));
-
- MessageLocation location(info->script(), info->literal()->position(),
- info->literal()->position());
- char text[100];
- int length;
- if (FLAG_predictable) {
- length = base::OS::SNPrintF(text, arraysize(text), "success");
- } else {
- length = base::OS::SNPrintF(
- text, arraysize(text),
- "success, asm->wasm: %0.3f ms, compile: %0.3f ms, %" PRIuPTR " bytes",
- asm_wasm_time, compile_time, wasm_size);
- }
- DCHECK_NE(-1, length);
- USE(length);
- Handle<String> stext(info->isolate()->factory()->InternalizeUtf8String(text));
- Handle<JSMessageObject> message = MessageHandler::MakeMessageObject(
- info->isolate(), MessageTemplate::kAsmJsCompiled, &location, stext,
- Handle<JSArray>::null());
- message->set_error_level(v8::Isolate::kMessageInfo);
- if (!FLAG_suppress_asm_messages && FLAG_trace_asm_time) {
- MessageHandler::ReportMessage(info->isolate(), &location, message);
- }
-
+ ReportCompilationSuccess(info->script(), info->literal()->position(),
+ translate_time, compile_time, module->size());
return result;
}
-bool AsmJs::IsStdlibValid(i::Isolate* isolate, Handle<FixedArray> wasm_data,
- Handle<JSReceiver> stdlib) {
- i::Handle<i::FixedArray> uses(
- i::FixedArray::cast(wasm_data->get(kWasmDataUsesArray)));
- for (int i = 0; i < uses->length(); ++i) {
- if (!IsStdlibMemberValid(isolate, stdlib,
- uses->GetValueChecked<i::Object>(isolate, i))) {
- return false;
- }
- }
- return true;
-}
-
-MaybeHandle<Object> AsmJs::InstantiateAsmWasm(i::Isolate* isolate,
+MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
+ Handle<SharedFunctionInfo> shared,
Handle<FixedArray> wasm_data,
- Handle<JSArrayBuffer> memory,
- Handle<JSReceiver> foreign) {
+ Handle<JSReceiver> stdlib,
+ Handle<JSReceiver> foreign,
+ Handle<JSArrayBuffer> memory) {
base::ElapsedTimer instantiate_timer;
instantiate_timer.Start();
- i::Handle<i::WasmModuleObject> module(
- i::WasmModuleObject::cast(wasm_data->get(kWasmDataCompiledModule)));
- i::Handle<i::FixedArray> foreign_globals(
- i::FixedArray::cast(wasm_data->get(kWasmDataForeignGlobals)));
+ Handle<FixedArray> stdlib_uses(
+ FixedArray::cast(wasm_data->get(kWasmDataUsesArray)));
+ Handle<WasmModuleObject> module(
+ WasmModuleObject::cast(wasm_data->get(kWasmDataCompiledModule)));
+ Handle<Script> script(Script::cast(shared->script()));
+ // TODO(mstarzinger): The position currently points to the module definition
+ // but should instead point to the instantiation site (more intuitive).
+ int position = shared->start_position();
+
+ // Check that all used stdlib members are valid.
+ bool stdlib_use_of_typed_array_present = false;
+ for (int i = 0; i < stdlib_uses->length(); ++i) {
+ if (stdlib.is_null()) {
+ ReportInstantiationFailure(script, position, "Requires standard library");
+ return MaybeHandle<Object>();
+ }
+ int member_id = Smi::cast(stdlib_uses->get(i))->value();
+ wasm::AsmJsParser::StandardMember member =
+ static_cast<wasm::AsmJsParser::StandardMember>(member_id);
+ if (!IsStdlibMemberValid(isolate, stdlib, member,
+ &stdlib_use_of_typed_array_present)) {
+ ReportInstantiationFailure(script, position, "Unexpected stdlib member");
+ return MaybeHandle<Object>();
+ }
+ }
// Create the ffi object for foreign functions {"": foreign}.
Handle<JSObject> ffi_object;
@@ -318,85 +297,47 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(i::Isolate* isolate,
foreign, NONE);
}
- ErrorThrower thrower(isolate, "Asm.js -> WebAssembly instantiation");
- i::MaybeHandle<i::Object> maybe_module_object =
- i::wasm::SyncInstantiate(isolate, &thrower, module, ffi_object, memory);
+ // Check that a valid heap buffer is provided if required.
+ if (stdlib_use_of_typed_array_present) {
+ if (memory.is_null()) {
+ ReportInstantiationFailure(script, position, "Requires heap buffer");
+ return MaybeHandle<Object>();
+ }
+ size_t size = NumberToSize(memory->byte_length());
+ // TODO(mstarzinger): We currently only limit byte length of the buffer to
+ // be a multiple of 8, we should enforce the stricter spec limits here.
+ if (size % FixedTypedArrayBase::kMaxElementSize != 0) {
+ ReportInstantiationFailure(script, position, "Unexpected heap size");
+ return MaybeHandle<Object>();
+ }
+ }
+
+ wasm::ErrorThrower thrower(isolate, "AsmJs::Instantiate");
+ MaybeHandle<Object> maybe_module_object =
+ wasm::SyncInstantiate(isolate, &thrower, module, ffi_object, memory);
if (maybe_module_object.is_null()) {
- thrower.Reify(); // Ensure exceptions do not propagate.
+ thrower.Reset(); // Ensure exceptions do not propagate.
+ ReportInstantiationFailure(script, position, "Internal wasm failure");
return MaybeHandle<Object>();
}
DCHECK(!thrower.error());
- i::Handle<i::Object> module_object = maybe_module_object.ToHandleChecked();
+ Handle<Object> module_object = maybe_module_object.ToHandleChecked();
- if (!FLAG_fast_validate_asm) {
- i::Handle<i::Name> init_name(isolate->factory()->InternalizeUtf8String(
- wasm::AsmWasmBuilder::foreign_init_name));
- i::Handle<i::Object> init =
- i::Object::GetProperty(module_object, init_name).ToHandleChecked();
+ ReportInstantiationSuccess(script, position,
+ instantiate_timer.Elapsed().InMillisecondsF());
- i::Handle<i::Object> undefined(isolate->heap()->undefined_value(), isolate);
- i::Handle<i::Object>* foreign_args_array =
- new i::Handle<i::Object>[foreign_globals->length()];
- for (int j = 0; j < foreign_globals->length(); j++) {
- if (!foreign.is_null()) {
- i::MaybeHandle<i::Name> name = i::Object::ToName(
- isolate, i::Handle<i::Object>(foreign_globals->get(j), isolate));
- if (!name.is_null()) {
- i::MaybeHandle<i::Object> val =
- i::Object::GetProperty(foreign, name.ToHandleChecked());
- if (!val.is_null()) {
- foreign_args_array[j] = val.ToHandleChecked();
- continue;
- }
- }
- }
- foreign_args_array[j] = undefined;
- }
- i::MaybeHandle<i::Object> retval =
- i::Execution::Call(isolate, init, undefined, foreign_globals->length(),
- foreign_args_array);
- delete[] foreign_args_array;
- DCHECK(!retval.is_null());
- }
-
- i::Handle<i::Name> single_function_name(
- isolate->factory()->InternalizeUtf8String(
- wasm::AsmWasmBuilder::single_function_name));
- i::MaybeHandle<i::Object> single_function =
- i::Object::GetProperty(module_object, single_function_name);
+ Handle<Name> single_function_name(
+ isolate->factory()->InternalizeUtf8String(AsmJs::kSingleFunctionName));
+ MaybeHandle<Object> single_function =
+ Object::GetProperty(module_object, single_function_name);
if (!single_function.is_null() &&
!single_function.ToHandleChecked()->IsUndefined(isolate)) {
return single_function;
}
- i::Handle<i::Script> script(i::Script::cast(wasm_data->get(kWasmDataScript)));
- int32_t position = 0;
- if (!wasm_data->get(kWasmDataScriptPosition)->ToInt32(&position)) {
- UNREACHABLE();
- }
- MessageLocation location(script, position, position);
- char text[50];
- int length;
- if (FLAG_predictable) {
- length = base::OS::SNPrintF(text, arraysize(text), "success");
- } else {
- length = base::OS::SNPrintF(text, arraysize(text), "success, %0.3f ms",
- instantiate_timer.Elapsed().InMillisecondsF());
- }
- DCHECK_NE(-1, length);
- USE(length);
- Handle<String> stext(isolate->factory()->InternalizeUtf8String(text));
- Handle<JSMessageObject> message = MessageHandler::MakeMessageObject(
- isolate, MessageTemplate::kAsmJsInstantiated, &location, stext,
- Handle<JSArray>::null());
- message->set_error_level(v8::Isolate::kMessageInfo);
- if (!FLAG_suppress_asm_messages && FLAG_trace_asm_time) {
- MessageHandler::ReportMessage(isolate, &location, message);
- }
-
Handle<String> exports_name =
isolate->factory()->InternalizeUtf8String("exports");
- return i::Object::GetProperty(module_object, exports_name);
+ return Object::GetProperty(module_object, exports_name);
}
} // namespace internal
diff --git a/deps/v8/src/asmjs/asm-js.h b/deps/v8/src/asmjs/asm-js.h
index a7795dc541..4d3e9035f0 100644
--- a/deps/v8/src/asmjs/asm-js.h
+++ b/deps/v8/src/asmjs/asm-js.h
@@ -5,6 +5,8 @@
#ifndef V8_ASMJS_ASM_JS_H_
#define V8_ASMJS_ASM_JS_H_
+// Clients of this interface shouldn't depend on lots of asmjs internals.
+// Do not include anything from src/asmjs here!
#include "src/globals.h"
namespace v8 {
@@ -12,19 +14,25 @@ namespace internal {
class CompilationInfo;
class JSArrayBuffer;
+class SharedFunctionInfo;
-// Interface to compile and instantiate for asmjs.
+// Interface to compile and instantiate for asm.js modules.
class AsmJs {
public:
static MaybeHandle<FixedArray> CompileAsmViaWasm(CompilationInfo* info);
- static bool IsStdlibValid(Isolate* isolate, Handle<FixedArray> wasm_data,
- Handle<JSReceiver> stdlib);
static MaybeHandle<Object> InstantiateAsmWasm(Isolate* isolate,
+ Handle<SharedFunctionInfo>,
Handle<FixedArray> wasm_data,
- Handle<JSArrayBuffer> memory,
- Handle<JSReceiver> foreign);
+ Handle<JSReceiver> stdlib,
+ Handle<JSReceiver> foreign,
+ Handle<JSArrayBuffer> memory);
+
+ // Special export name used to indicate that the module exports a single
+ // function instead of a JavaScript object holding multiple functions.
+ static const char* const kSingleFunctionName;
};
} // namespace internal
} // namespace v8
-#endif
+
+#endif // V8_ASMJS_ASM_JS_H_
diff --git a/deps/v8/src/asmjs/asm-names.h b/deps/v8/src/asmjs/asm-names.h
index 1cc151dc4c..12b485363a 100644
--- a/deps/v8/src/asmjs/asm-names.h
+++ b/deps/v8/src/asmjs/asm-names.h
@@ -5,15 +5,16 @@
#ifndef V8_ASMJS_ASM_NAMES_H_
#define V8_ASMJS_ASM_NAMES_H_
+// V(stdlib.Math.<name>, constant-value)
#define STDLIB_MATH_VALUE_LIST(V) \
- V(E) \
- V(LN10) \
- V(LN2) \
- V(LOG2E) \
- V(LOG10E) \
- V(PI) \
- V(SQRT1_2) \
- V(SQRT2)
+ V(E, 2.718281828459045) \
+ V(LN10, 2.302585092994046) \
+ V(LN2, 0.6931471805599453) \
+ V(LOG2E, 1.4426950408889634) \
+ V(LOG10E, 0.4342944819032518) \
+ V(PI, 3.141592653589793) \
+ V(SQRT1_2, 0.7071067811865476) \
+ V(SQRT2, 1.4142135623730951)
// V(stdlib.Math.<name>, Name, wasm-opcode, asm-js-type)
#define STDLIB_MATH_FUNCTION_MONOMORPHIC_LIST(V) \
@@ -107,4 +108,4 @@
V(kUnsigned, -3, "{unsigned value}") \
V(kDouble, -4, "{double value}")
-#endif
+#endif // V8_ASMJS_ASM_NAMES_H_
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index c18f7d1bf2..51b8f7bbc2 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -4,21 +4,17 @@
#include "src/asmjs/asm-parser.h"
-// Required to get M_E etc. for MSVC.
-// References from STDLIB_MATH_VALUE_LIST in asm-names.h
-#if defined(_WIN32)
-#define _USE_MATH_DEFINES
-#endif
#include <math.h>
#include <string.h>
#include <algorithm>
+#include "src/asmjs/asm-js.h"
#include "src/asmjs/asm-types.h"
#include "src/objects-inl.h"
#include "src/objects.h"
#include "src/parsing/scanner-character-streams.h"
-#include "src/wasm/wasm-macro-gen.h"
+#include "src/parsing/scanner.h"
#include "src/wasm/wasm-opcodes.h"
namespace v8 {
@@ -29,23 +25,22 @@ namespace wasm {
#define FAIL_AND_RETURN(ret, msg) \
failed_ = true; \
failure_message_ = msg; \
- failure_location_ = scanner_.GetPosition(); \
+ failure_location_ = static_cast<int>(scanner_.Position()); \
if (FLAG_trace_asm_parser) { \
PrintF("[asm.js failure: %s, token: '%s', see: %s:%d]\n", msg, \
scanner_.Name(scanner_.Token()).c_str(), __FILE__, __LINE__); \
} \
return ret;
#else
-#define FAIL_AND_RETURN(ret, msg) \
- failed_ = true; \
- failure_message_ = msg; \
- failure_location_ = scanner_.GetPosition(); \
+#define FAIL_AND_RETURN(ret, msg) \
+ failed_ = true; \
+ failure_message_ = msg; \
+ failure_location_ = static_cast<int>(scanner_.Position()); \
return ret;
#endif
#define FAIL(msg) FAIL_AND_RETURN(, msg)
#define FAILn(msg) FAIL_AND_RETURN(nullptr, msg)
-#define FAILf(msg) FAIL_AND_RETURN(false, msg)
#define EXPECT_TOKEN_OR_RETURN(ret, token) \
do { \
@@ -57,7 +52,6 @@ namespace wasm {
#define EXPECT_TOKEN(token) EXPECT_TOKEN_OR_RETURN(, token)
#define EXPECT_TOKENn(token) EXPECT_TOKEN_OR_RETURN(nullptr, token)
-#define EXPECT_TOKENf(token) EXPECT_TOKEN_OR_RETURN(false, token)
#define RECURSE_OR_RETURN(ret, call) \
do { \
@@ -71,7 +65,6 @@ namespace wasm {
#define RECURSE(call) RECURSE_OR_RETURN(, call)
#define RECURSEn(call) RECURSE_OR_RETURN(nullptr, call)
-#define RECURSEf(call) RECURSE_OR_RETURN(false, call)
#define TOK(name) AsmJsScanner::kToken_##name
@@ -91,6 +84,8 @@ AsmJsParser::AsmJsParser(Isolate* isolate, Zone* zone, Handle<Script> script,
inside_heap_assignment_(false),
heap_access_type_(nullptr),
block_stack_(zone),
+ call_coercion_(nullptr),
+ call_coercion_deferred_(nullptr),
pending_label_(0),
global_imports_(zone) {
InitializeStdlibTypes();
@@ -161,7 +156,7 @@ FunctionSig* AsmJsParser::ConvertSignature(
} else if (param->IsA(AsmType::Int())) {
sig_builder.AddParam(kWasmI32);
} else {
- return nullptr;
+ UNREACHABLE();
}
}
if (!return_type->IsA(AsmType::Void())) {
@@ -172,7 +167,7 @@ FunctionSig* AsmJsParser::ConvertSignature(
} else if (return_type->IsA(AsmType::Signed())) {
sig_builder.AddReturn(kWasmI32);
} else {
- return 0;
+ UNREACHABLE();
}
}
return sig_builder.Build();
@@ -200,16 +195,6 @@ class AsmJsParser::TemporaryVariableScope {
int local_depth_;
};
-AsmJsParser::VarInfo::VarInfo()
- : type(AsmType::None()),
- function_builder(nullptr),
- import(nullptr),
- mask(-1),
- index(0),
- kind(VarKind::kUnused),
- mutable_variable(true),
- function_defined(false) {}
-
wasm::AsmJsParser::VarInfo* AsmJsParser::GetVarInfo(
AsmJsScanner::token_t token) {
if (AsmJsScanner::IsGlobal(token)) {
@@ -234,52 +219,20 @@ wasm::AsmJsParser::VarInfo* AsmJsParser::GetVarInfo(
}
uint32_t AsmJsParser::VarIndex(VarInfo* info) {
- if (info->import != nullptr) {
- return info->index;
- } else {
- return info->index + static_cast<uint32_t>(global_imports_.size());
- }
+ DCHECK(info->kind == VarKind::kGlobal);
+ return info->index + static_cast<uint32_t>(global_imports_.size());
}
-void AsmJsParser::AddGlobalImport(std::string name, AsmType* type,
+void AsmJsParser::AddGlobalImport(Vector<const char> name, AsmType* type,
ValueType vtype, bool mutable_variable,
VarInfo* info) {
- // TODO(bradnelson): Refactor memory management here.
- // AsmModuleBuilder should really own import names.
- char* name_data = zone()->NewArray<char>(name.size());
- memcpy(name_data, name.data(), name.size());
- if (mutable_variable) {
- // Allocate a separate variable for the import.
- DeclareGlobal(info, true, type, vtype);
- // Record the need to initialize the global from the import.
- global_imports_.push_back({name_data, name.size(), 0, info->index, true});
- } else {
- // Just use the import directly.
- global_imports_.push_back({name_data, name.size(), 0, info->index, false});
- }
- GlobalImport& gi = global_imports_.back();
- // TODO(bradnelson): Reuse parse buffer memory / make wasm-module-builder
- // managed the memory for the import name (currently have to keep our
- // own memory for it).
- gi.import_index = module_builder_->AddGlobalImport(
- name_data, static_cast<int>(name.size()), vtype);
- if (!mutable_variable) {
- info->DeclareGlobalImport(type, gi.import_index);
- }
-}
-
-void AsmJsParser::VarInfo::DeclareGlobalImport(AsmType* type, uint32_t index) {
- kind = VarKind::kGlobal;
- this->type = type;
- this->index = index;
- mutable_variable = false;
-}
+ // Allocate a separate variable for the import.
+ // TODO(mstarzinger): Consider using the imported global directly instead of
+ // allocating a separate global variable for immutable (i.e. const) imports.
+ DeclareGlobal(info, mutable_variable, type, vtype);
-void AsmJsParser::VarInfo::DeclareStdlibFunc(VarKind kind, AsmType* type) {
- this->kind = kind;
- this->type = type;
- index = 0; // unused
- mutable_variable = false;
+ // Record the need to initialize the global from the import.
+ global_imports_.push_back({name, vtype, info});
}
void AsmJsParser::DeclareGlobal(VarInfo* info, bool mutable_variable,
@@ -291,6 +244,14 @@ void AsmJsParser::DeclareGlobal(VarInfo* info, bool mutable_variable,
info->mutable_variable = mutable_variable;
}
+void AsmJsParser::DeclareStdlibFunc(VarInfo* info, VarKind kind,
+ AsmType* type) {
+ info->kind = kind;
+ info->type = type;
+ info->index = 0; // unused
+ info->mutable_variable = false;
+}
+
uint32_t AsmJsParser::TempVariable(int index) {
if (index + 1 > function_temp_locals_used_) {
function_temp_locals_used_ = index + 1;
@@ -298,6 +259,13 @@ uint32_t AsmJsParser::TempVariable(int index) {
return function_temp_locals_offset_ + index;
}
+Vector<const char> AsmJsParser::CopyCurrentIdentifierString() {
+ const std::string& str = scanner_.GetIdentifierString();
+ char* buffer = zone()->NewArray<char>(str.size());
+ str.copy(buffer, str.size());
+ return Vector<const char>(buffer, static_cast<int>(str.size()));
+}
+
void AsmJsParser::SkipSemicolon() {
if (Check(';')) {
// Had a semicolon.
@@ -385,13 +353,11 @@ void AsmJsParser::ValidateModule() {
// Add start function to init things.
WasmFunctionBuilder* start = module_builder_->AddFunction();
module_builder_->MarkStartFunction(start);
- for (auto global_import : global_imports_) {
- if (global_import.needs_init) {
- start->EmitWithVarInt(kExprGetGlobal, global_import.import_index);
- start->EmitWithVarInt(kExprSetGlobal,
- static_cast<uint32_t>(global_import.global_index +
- global_imports_.size()));
- }
+ for (auto& global_import : global_imports_) {
+ uint32_t import_index = module_builder_->AddGlobalImport(
+ global_import.import_name, global_import.value_type);
+ start->EmitWithI32V(kExprGetGlobal, import_index);
+ start->EmitWithI32V(kExprSetGlobal, VarIndex(global_import.var_info));
}
start->Emit(kExprEnd);
FunctionSig::Builder b(zone(), 0, 0);
@@ -459,7 +425,7 @@ void AsmJsParser::ValidateModuleVar(bool mutable_variable) {
}
EXPECT_TOKEN('=');
double dvalue = 0.0;
- uint64_t uvalue = 0;
+ uint32_t uvalue = 0;
if (CheckForDouble(&dvalue)) {
DeclareGlobal(info, mutable_variable, AsmType::Double(), kWasmF64,
WasmInitExpr(dvalue));
@@ -489,8 +455,8 @@ void AsmJsParser::ValidateModuleVar(bool mutable_variable) {
} else if (Check(stdlib_name_)) {
EXPECT_TOKEN('.');
RECURSE(ValidateModuleVarStdlib(info));
- } else if (ValidateModuleVarImport(info, mutable_variable)) {
- // Handled inside.
+ } else if (Peek(foreign_name_) || Peek('+')) {
+ RECURSE(ValidateModuleVarImport(info, mutable_variable));
} else if (scanner_.IsGlobal()) {
RECURSE(ValidateModuleVarFromGlobal(info, mutable_variable));
} else {
@@ -526,7 +492,7 @@ void AsmJsParser::ValidateModuleVarFromGlobal(VarInfo* info,
negate = true;
}
double dvalue = 0.0;
- uint64_t uvalue = 0;
+ uint32_t uvalue = 0;
if (CheckForDouble(&dvalue)) {
if (negate) {
dvalue = -dvalue;
@@ -547,38 +513,31 @@ void AsmJsParser::ValidateModuleVarFromGlobal(VarInfo* info,
}
// 6.1 ValidateModule - foreign imports
-bool AsmJsParser::ValidateModuleVarImport(VarInfo* info,
+void AsmJsParser::ValidateModuleVarImport(VarInfo* info,
bool mutable_variable) {
if (Check('+')) {
- EXPECT_TOKENf(foreign_name_);
- EXPECT_TOKENf('.');
- AddGlobalImport(scanner_.GetIdentifierString(), AsmType::Double(), kWasmF64,
- mutable_variable, info);
+ EXPECT_TOKEN(foreign_name_);
+ EXPECT_TOKEN('.');
+ Vector<const char> name = CopyCurrentIdentifierString();
+ AddGlobalImport(name, AsmType::Double(), kWasmF64, mutable_variable, info);
scanner_.Next();
- return true;
- } else if (Check(foreign_name_)) {
- EXPECT_TOKENf('.');
- std::string import_name = scanner_.GetIdentifierString();
+ } else {
+ EXPECT_TOKEN(foreign_name_);
+ EXPECT_TOKEN('.');
+ Vector<const char> name = CopyCurrentIdentifierString();
scanner_.Next();
if (Check('|')) {
if (!CheckForZero()) {
- FAILf("Expected |0 type annotation for foreign integer import");
+ FAIL("Expected |0 type annotation for foreign integer import");
}
- AddGlobalImport(import_name, AsmType::Int(), kWasmI32, mutable_variable,
- info);
- return true;
+ AddGlobalImport(name, AsmType::Int(), kWasmI32, mutable_variable, info);
+ } else {
+ info->kind = VarKind::kImportedFunction;
+ info->import = new (zone()->New(sizeof(FunctionImportInfo)))
+ FunctionImportInfo({name, WasmModuleBuilder::SignatureMap(zone())});
+ info->mutable_variable = false;
}
- info->kind = VarKind::kImportedFunction;
- function_import_info_.resize(function_import_info_.size() + 1);
- info->import = &function_import_info_.back();
- // TODO(bradnelson): Refactor memory management here.
- // AsmModuleBuilder should really own import names.
- info->import->function_name = zone()->NewArray<char>(import_name.size());
- memcpy(info->import->function_name, import_name.data(), import_name.size());
- info->import->function_name_size = import_name.size();
- return true;
}
- return false;
}
// 6.1 ValidateModule - one variable
@@ -589,7 +548,8 @@ void AsmJsParser::ValidateModuleVarNewStdlib(VarInfo* info) {
switch (Consume()) {
#define V(name, _junk1, _junk2, _junk3) \
case TOK(name): \
- info->DeclareStdlibFunc(VarKind::kSpecial, AsmType::name()); \
+ DeclareStdlibFunc(info, VarKind::kSpecial, AsmType::name()); \
+ stdlib_uses_.insert(StandardMember::k##name); \
break;
STDLIB_ARRAY_TYPE_LIST(V)
#undef V
@@ -608,18 +568,18 @@ void AsmJsParser::ValidateModuleVarStdlib(VarInfo* info) {
if (Check(TOK(Math))) {
EXPECT_TOKEN('.');
switch (Consume()) {
-#define V(name) \
+#define V(name, const_value) \
case TOK(name): \
DeclareGlobal(info, false, AsmType::Double(), kWasmF64, \
- WasmInitExpr(M_##name)); \
- stdlib_uses_.insert(AsmTyper::kMath##name); \
+ WasmInitExpr(const_value)); \
+ stdlib_uses_.insert(StandardMember::kMath##name); \
break;
STDLIB_MATH_VALUE_LIST(V)
#undef V
#define V(name, Name, op, sig) \
case TOK(name): \
- info->DeclareStdlibFunc(VarKind::kMath##Name, stdlib_##sig##_); \
- stdlib_uses_.insert(AsmTyper::kMath##Name); \
+ DeclareStdlibFunc(info, VarKind::kMath##Name, stdlib_##sig##_); \
+ stdlib_uses_.insert(StandardMember::kMath##Name); \
break;
STDLIB_MATH_FUNCTION_LIST(V)
#undef V
@@ -629,11 +589,11 @@ void AsmJsParser::ValidateModuleVarStdlib(VarInfo* info) {
} else if (Check(TOK(Infinity))) {
DeclareGlobal(info, false, AsmType::Double(), kWasmF64,
WasmInitExpr(std::numeric_limits<double>::infinity()));
- stdlib_uses_.insert(AsmTyper::kInfinity);
+ stdlib_uses_.insert(StandardMember::kInfinity);
} else if (Check(TOK(NaN))) {
DeclareGlobal(info, false, AsmType::Double(), kWasmF64,
WasmInitExpr(std::numeric_limits<double>::quiet_NaN()));
- stdlib_uses_.insert(AsmTyper::kNaN);
+ stdlib_uses_.insert(StandardMember::kNaN);
} else {
FAIL("Invalid member of stdlib");
}
@@ -643,10 +603,10 @@ void AsmJsParser::ValidateModuleVarStdlib(VarInfo* info) {
void AsmJsParser::ValidateExport() {
// clang-format off
EXPECT_TOKEN(TOK(return));
- // clang format on
+ // clang-format on
if (Check('{')) {
for (;;) {
- std::string name = scanner_.GetIdentifierString();
+ Vector<const char> name = CopyCurrentIdentifierString();
if (!scanner_.IsGlobal() && !scanner_.IsLocal()) {
FAIL("Illegal export name");
}
@@ -659,8 +619,7 @@ void AsmJsParser::ValidateExport() {
if (info->kind != VarKind::kFunction) {
FAIL("Expected function");
}
- info->function_builder->ExportAs(
- {name.c_str(), static_cast<int>(name.size())});
+ module_builder_->AddExport(name, info->function_builder);
if (Check(',')) {
if (!Peek('}')) {
continue;
@@ -677,8 +636,8 @@ void AsmJsParser::ValidateExport() {
if (info->kind != VarKind::kFunction) {
FAIL("Single function export must be a function");
}
- const char* single_function_name = "__single_function__";
- info->function_builder->ExportAs(CStrVector(single_function_name));
+ module_builder_->AddExport(CStrVector(AsmJs::kSingleFunctionName),
+ info->function_builder);
}
}
@@ -711,7 +670,6 @@ void AsmJsParser::ValidateFunctionTable() {
// Only store the function into a table if we used the table somewhere
// (i.e. tables are first seen at their use sites and allocated there).
if (table_info->kind == VarKind::kTable) {
- DCHECK_GE(table_info->mask, 0);
if (count >= static_cast<uint64_t>(table_info->mask) + 1) {
FAIL("Exceeded function table size");
}
@@ -744,13 +702,14 @@ void AsmJsParser::ValidateFunction() {
FAIL("Expected function name");
}
- std::string function_name_raw = scanner_.GetIdentifierString();
+ Vector<const char> function_name_str = CopyCurrentIdentifierString();
AsmJsScanner::token_t function_name = Consume();
VarInfo* function_info = GetVarInfo(function_name);
if (function_info->kind == VarKind::kUnused) {
function_info->kind = VarKind::kFunction;
function_info->function_builder = module_builder_->AddFunction();
function_info->index = function_info->function_builder->func_index();
+ function_info->mutable_variable = false;
} else if (function_info->kind != VarKind::kFunction) {
FAIL("Function name collides with variable");
} else if (function_info->function_defined) {
@@ -758,12 +717,7 @@ void AsmJsParser::ValidateFunction() {
}
function_info->function_defined = true;
- // TODO(bradnelson): Cleanup memory management here.
- // WasmModuleBuilder should own these.
- char* function_name_chr = zone()->NewArray<char>(function_name_raw.size());
- memcpy(function_name_chr, function_name_raw.data(), function_name_raw.size());
- function_info->function_builder->SetName(
- {function_name_chr, static_cast<int>(function_name_raw.size())});
+ function_info->function_builder->SetName(function_name_str);
current_function_builder_ = function_info->function_builder;
return_type_ = nullptr;
@@ -781,21 +735,27 @@ void AsmJsParser::ValidateFunction() {
function_temp_locals_used_ = 0;
function_temp_locals_depth_ = 0;
+ bool last_statement_is_return = false;
while (!failed_ && !Peek('}')) {
+ // clang-format off
+ last_statement_is_return = Peek(TOK(return));
+ // clang-format on
RECURSE(ValidateStatement());
}
EXPECT_TOKEN('}');
- if (return_type_ == nullptr) {
- return_type_ = AsmType::Void();
+ if (!last_statement_is_return) {
+ if (return_type_ == nullptr) {
+ return_type_ = AsmType::Void();
+ } else if (!return_type_->IsA(AsmType::Void())) {
+ FAIL("Expected return at end of non-void function");
+ }
}
+ DCHECK_NOT_NULL(return_type_);
// TODO(bradnelson): WasmModuleBuilder can't take this in the right order.
// We should fix that so we can use it instead.
FunctionSig* sig = ConvertSignature(return_type_, params);
- if (sig == nullptr) {
- FAIL("Invalid function signature in declaration");
- }
current_function_builder_->SetSignature(sig);
for (auto local : locals) {
current_function_builder_->AddLocal(local);
@@ -870,7 +830,8 @@ void AsmJsParser::ValidateFunctionParams(std::vector<AsmType*>* params) {
info->index = static_cast<uint32_t>(params->size());
params->push_back(AsmType::Double());
} else {
- if (!GetVarInfo(Consume())->type->IsA(stdlib_fround_)) {
+ if (!scanner_.IsGlobal() ||
+ !GetVarInfo(Consume())->type->IsA(stdlib_fround_)) {
FAIL("Expected fround");
}
EXPECT_TOKEN('(');
@@ -904,15 +865,14 @@ void AsmJsParser::ValidateFunctionLocals(
// Store types.
EXPECT_TOKEN('=');
double dvalue = 0.0;
- uint64_t uvalue = 0;
+ uint32_t uvalue = 0;
if (Check('-')) {
if (CheckForDouble(&dvalue)) {
info->kind = VarKind::kLocal;
info->type = AsmType::Double();
info->index = static_cast<uint32_t>(param_count + locals->size());
locals->push_back(kWasmF64);
- byte code[] = {WASM_F64(-dvalue)};
- current_function_builder_->EmitCode(code, sizeof(code));
+ current_function_builder_->EmitF64Const(-dvalue);
current_function_builder_->EmitSetLocal(info->index);
} else if (CheckForUnsigned(&uvalue)) {
if (uvalue > 0x7fffffff) {
@@ -946,7 +906,7 @@ void AsmJsParser::ValidateFunctionLocals(
} else {
FAIL("Bad local variable definition");
}
- current_function_builder_->EmitWithVarInt(kExprGetGlobal,
+ current_function_builder_->EmitWithI32V(kExprGetGlobal,
VarIndex(sinfo));
current_function_builder_->EmitSetLocal(info->index);
} else if (sinfo->type->IsA(stdlib_fround_)) {
@@ -964,8 +924,7 @@ void AsmJsParser::ValidateFunctionLocals(
if (negate) {
dvalue = -dvalue;
}
- byte code[] = {WASM_F32(dvalue)};
- current_function_builder_->EmitCode(code, sizeof(code));
+ current_function_builder_->EmitF32Const(dvalue);
current_function_builder_->EmitSetLocal(info->index);
} else if (CheckForUnsigned(&uvalue)) {
if (uvalue > 0x7fffffff) {
@@ -979,9 +938,8 @@ void AsmJsParser::ValidateFunctionLocals(
if (negate) {
value = -value;
}
- double fvalue = static_cast<double>(value);
- byte code[] = {WASM_F32(fvalue)};
- current_function_builder_->EmitCode(code, sizeof(code));
+ float fvalue = static_cast<float>(value);
+ current_function_builder_->EmitF32Const(fvalue);
current_function_builder_->EmitSetLocal(info->index);
} else {
FAIL("Expected variable initial value");
@@ -995,8 +953,7 @@ void AsmJsParser::ValidateFunctionLocals(
info->type = AsmType::Double();
info->index = static_cast<uint32_t>(param_count + locals->size());
locals->push_back(kWasmF64);
- byte code[] = {WASM_F64(dvalue)};
- current_function_builder_->EmitCode(code, sizeof(code));
+ current_function_builder_->EmitF64Const(dvalue);
current_function_builder_->EmitSetLocal(info->index);
} else if (CheckForUnsigned(&uvalue)) {
info->kind = VarKind::kLocal;
@@ -1020,7 +977,7 @@ void AsmJsParser::ValidateFunctionLocals(
}
}
-// ValidateStatement
+// 6.5 ValidateStatement
void AsmJsParser::ValidateStatement() {
call_coercion_ = nullptr;
if (Peek('{')) {
@@ -1106,7 +1063,7 @@ void AsmJsParser::IfStatement() {
// 6.5.5 ReturnStatement
void AsmJsParser::ReturnStatement() {
// clang-format off
- EXPECT_TOKEN(TOK(return ));
+ EXPECT_TOKEN(TOK(return));
// clang-format on
if (!Peek(';') && !Peek('}')) {
// TODO(bradnelson): See if this can be factored out.
@@ -1121,8 +1078,10 @@ void AsmJsParser::ReturnStatement() {
} else {
FAIL("Invalid return type");
}
- } else {
+ } else if (return_type_ == nullptr) {
return_type_ = AsmType::Void();
+ } else if (!return_type_->IsA(AsmType::Void())) {
+ FAIL("Invalid void return type");
}
current_function_builder_->Emit(kExprReturn);
SkipSemicolon();
@@ -1202,7 +1161,11 @@ void AsmJsParser::ForStatement() {
EXPECT_TOKEN(TOK(for));
EXPECT_TOKEN('(');
if (!Peek(';')) {
- Expression(nullptr);
+ AsmType* ret;
+ RECURSE(ret = Expression(nullptr));
+ if (!ret->IsA(AsmType::Void())) {
+ current_function_builder_->Emit(kExprDrop);
+ }
}
EXPECT_TOKEN(';');
// a: block {
@@ -1217,20 +1180,21 @@ void AsmJsParser::ForStatement() {
current_function_builder_->EmitWithU8(kExprBrIf, 1);
}
EXPECT_TOKEN(';');
- // Stash away INCREMENT
- size_t increment_position = current_function_builder_->GetPosition();
- if (!Peek(')')) {
- RECURSE(Expression(nullptr));
- }
- std::vector<byte> increment_code;
- current_function_builder_->StashCode(&increment_code, increment_position);
+ // Race past INCREMENT
+ size_t increment_position = scanner_.Position();
+ ScanToClosingParenthesis();
EXPECT_TOKEN(')');
// BODY
RECURSE(ValidateStatement());
// INCREMENT
- current_function_builder_->EmitCode(
- increment_code.data(), static_cast<uint32_t>(increment_code.size()));
+ size_t end_position = scanner_.Position();
+ scanner_.Seek(increment_position);
+ if (!Peek(')')) {
+ RECURSE(Expression(nullptr));
+ // NOTE: No explicit drop because below break is an implicit drop.
+ }
current_function_builder_->EmitWithU8(kExprBr, 0);
+ scanner_.Seek(end_position);
// }
End();
// }
@@ -1250,7 +1214,7 @@ void AsmJsParser::BreakStatement() {
FAIL("Illegal break");
}
current_function_builder_->Emit(kExprBr);
- current_function_builder_->EmitVarInt(depth);
+ current_function_builder_->EmitI32V(depth);
SkipSemicolon();
}
@@ -1266,8 +1230,7 @@ void AsmJsParser::ContinueStatement() {
if (depth < 0) {
FAIL("Illegal continue");
}
- current_function_builder_->Emit(kExprBr);
- current_function_builder_->EmitVarInt(depth);
+ current_function_builder_->EmitWithI32V(kExprBr, depth);
SkipSemicolon();
}
@@ -1300,7 +1263,8 @@ void AsmJsParser::SwitchStatement() {
pending_label_ = 0;
// TODO(bradnelson): Make less weird.
std::vector<int32_t> cases;
- GatherCases(&cases); // Skips { implicitly.
+ GatherCases(&cases);
+ EXPECT_TOKEN('{');
size_t count = cases.size() + 1;
for (size_t i = 0; i < count; ++i) {
BareBegin(BlockKind::kOther);
@@ -1311,9 +1275,9 @@ void AsmJsParser::SwitchStatement() {
current_function_builder_->EmitGetLocal(tmp);
current_function_builder_->EmitI32Const(c);
current_function_builder_->Emit(kExprI32Eq);
- current_function_builder_->EmitWithVarInt(kExprBrIf, table_pos++);
+ current_function_builder_->EmitWithI32V(kExprBrIf, table_pos++);
}
- current_function_builder_->EmitWithVarInt(kExprBr, table_pos++);
+ current_function_builder_->EmitWithI32V(kExprBr, table_pos++);
while (!failed_ && Peek(TOK(case))) {
current_function_builder_->Emit(kExprEnd);
BareEnd();
@@ -1335,7 +1299,7 @@ void AsmJsParser::ValidateCase() {
if (Check('-')) {
negate = true;
}
- uint64_t uvalue;
+ uint32_t uvalue;
if (!CheckForUnsigned(&uvalue)) {
FAIL("Expected numeric literal");
}
@@ -1396,10 +1360,9 @@ AsmType* AsmJsParser::Expression(AsmType* expected) {
AsmType* AsmJsParser::NumericLiteral() {
call_coercion_ = nullptr;
double dvalue = 0.0;
- uint64_t uvalue = 0;
+ uint32_t uvalue = 0;
if (CheckForDouble(&dvalue)) {
- byte code[] = {WASM_F64(dvalue)};
- current_function_builder_->EmitCode(code, sizeof(code));
+ current_function_builder_->EmitF64Const(dvalue);
return AsmType::Double();
} else if (CheckForUnsigned(&uvalue)) {
if (uvalue <= 0x7fffffff) {
@@ -1431,7 +1394,7 @@ AsmType* AsmJsParser::Identifier() {
if (info->kind != VarKind::kGlobal) {
FAILn("Undefined global variable");
}
- current_function_builder_->EmitWithVarInt(kExprGetGlobal, VarIndex(info));
+ current_function_builder_->EmitWithI32V(kExprGetGlobal, VarIndex(info));
return info->type;
}
UNREACHABLE();
@@ -1463,7 +1426,8 @@ AsmType* AsmJsParser::CallExpression() {
// 6.8.5 MemberExpression
AsmType* AsmJsParser::MemberExpression() {
call_coercion_ = nullptr;
- ValidateHeapAccess();
+ RECURSEn(ValidateHeapAccess());
+ DCHECK_NOT_NULL(heap_access_type_);
if (Peek('=')) {
inside_heap_assignment_ = true;
return heap_access_type_->StoreType();
@@ -1490,6 +1454,7 @@ AsmType* AsmJsParser::AssignmentExpression() {
FAILn("Invalid assignment target");
}
inside_heap_assignment_ = false;
+ DCHECK_NOT_NULL(heap_access_type_);
AsmType* heap_type = heap_access_type_;
EXPECT_TOKENn('=');
AsmType* value;
@@ -1523,6 +1488,9 @@ AsmType* AsmJsParser::AssignmentExpression() {
if (info->kind == VarKind::kUnused) {
FAILn("Undeclared assignment target");
}
+ if (!info->mutable_variable) {
+ FAILn("Expected mutable variable in assignment");
+ }
DCHECK(is_local ? info->kind == VarKind::kLocal
: info->kind == VarKind::kGlobal);
AsmType* value;
@@ -1533,10 +1501,8 @@ AsmType* AsmJsParser::AssignmentExpression() {
if (info->kind == VarKind::kLocal) {
current_function_builder_->EmitTeeLocal(info->index);
} else if (info->kind == VarKind::kGlobal) {
- current_function_builder_->EmitWithVarUint(kExprSetGlobal,
- VarIndex(info));
- current_function_builder_->EmitWithVarUint(kExprGetGlobal,
- VarIndex(info));
+ current_function_builder_->EmitWithU32V(kExprSetGlobal, VarIndex(info));
+ current_function_builder_->EmitWithU32V(kExprGetGlobal, VarIndex(info));
} else {
UNREACHABLE();
}
@@ -1554,7 +1520,7 @@ AsmType* AsmJsParser::AssignmentExpression() {
AsmType* AsmJsParser::UnaryExpression() {
AsmType* ret;
if (Check('-')) {
- uint64_t uvalue;
+ uint32_t uvalue;
if (CheckForUnsigned(&uvalue)) {
// TODO(bradnelson): was supposed to be 0x7fffffff, check errata.
if (uvalue <= 0x80000000) {
@@ -1634,9 +1600,9 @@ AsmType* AsmJsParser::UnaryExpression() {
return ret;
}
-// 6.8.8 MultaplicativeExpression
+// 6.8.8 MultiplicativeExpression
AsmType* AsmJsParser::MultiplicativeExpression() {
- uint64_t uvalue;
+ uint32_t uvalue;
if (CheckForUnsignedBelow(0x100000, &uvalue)) {
if (Check('*')) {
AsmType* a;
@@ -1644,14 +1610,16 @@ AsmType* AsmJsParser::MultiplicativeExpression() {
if (!a->IsA(AsmType::Int())) {
FAILn("Expected int");
}
- current_function_builder_->EmitI32Const(static_cast<int32_t>(uvalue));
+ int32_t value = static_cast<int32_t>(uvalue);
+ current_function_builder_->EmitI32Const(value);
current_function_builder_->Emit(kExprI32Mul);
return AsmType::Intish();
}
scanner_.Rewind();
} else if (Check('-')) {
if (CheckForUnsignedBelow(0x100000, &uvalue)) {
- current_function_builder_->EmitI32Const(-static_cast<int32_t>(uvalue));
+ int32_t value = -static_cast<int32_t>(uvalue);
+ current_function_builder_->EmitI32Const(value);
if (Check('*')) {
AsmType* a;
RECURSEn(a = UnaryExpression());
@@ -1669,7 +1637,7 @@ AsmType* AsmJsParser::MultiplicativeExpression() {
RECURSEn(a = UnaryExpression());
for (;;) {
if (Check('*')) {
- uint64_t uvalue;
+ uint32_t uvalue;
if (Check('-')) {
if (CheckForUnsigned(&uvalue)) {
if (uvalue >= 0x100000) {
@@ -1678,7 +1646,8 @@ AsmType* AsmJsParser::MultiplicativeExpression() {
if (!a->IsA(AsmType::Int())) {
FAILn("Integer multiply of expects int");
}
- current_function_builder_->EmitI32Const(static_cast<int32_t>(uvalue));
+ int32_t value = -static_cast<int32_t>(uvalue);
+ current_function_builder_->EmitI32Const(value);
current_function_builder_->Emit(kExprI32Mul);
return AsmType::Intish();
}
@@ -1690,7 +1659,8 @@ AsmType* AsmJsParser::MultiplicativeExpression() {
if (!a->IsA(AsmType::Int())) {
FAILn("Integer multiply of expects int");
}
- current_function_builder_->EmitI32Const(static_cast<int32_t>(uvalue));
+ int32_t value = static_cast<int32_t>(uvalue);
+ current_function_builder_->EmitI32Const(value);
current_function_builder_->Emit(kExprI32Mul);
return AsmType::Intish();
}
@@ -1945,26 +1915,36 @@ AsmType* AsmJsParser::BitwiseXORExpression() {
// 6.8.15 BitwiseORExpression
AsmType* AsmJsParser::BitwiseORExpression() {
AsmType* a = nullptr;
+ call_coercion_deferred_position_ = scanner_.Position();
RECURSEn(a = BitwiseXORExpression());
while (Check('|')) {
- // TODO(bradnelson): Make it prettier.
AsmType* b = nullptr;
+ // Remember whether the first operand to this OR-expression has requested
+ // deferred validation of the |0 annotation.
+ // NOTE: This has to happen here to work recursively.
+ bool requires_zero = call_coercion_deferred_->IsExactly(AsmType::Signed());
+ call_coercion_deferred_ = nullptr;
+ // TODO(bradnelson): Make it prettier.
bool zero = false;
- int old_pos;
+ size_t old_pos;
size_t old_code;
- if (CheckForZero()) {
- old_pos = scanner_.GetPosition();
+ if (a->IsA(AsmType::Intish()) && CheckForZero()) {
+ old_pos = scanner_.Position();
old_code = current_function_builder_->GetPosition();
scanner_.Rewind();
zero = true;
}
RECURSEn(b = BitwiseXORExpression());
// Handle |0 specially.
- if (zero && old_pos == scanner_.GetPosition()) {
- current_function_builder_->StashCode(nullptr, old_code);
+ if (zero && old_pos == scanner_.Position()) {
+ current_function_builder_->DeleteCodeAfter(old_code);
a = AsmType::Signed();
continue;
}
+ // Anything not matching |0 breaks the lookahead in {ValidateCall}.
+ if (requires_zero) {
+ FAILn("Expected |0 type annotation for call");
+ }
if (a->IsA(AsmType::Intish()) && b->IsA(AsmType::Intish())) {
current_function_builder_->Emit(kExprI32Ior);
a = AsmType::Signed();
@@ -1972,6 +1952,7 @@ AsmType* AsmJsParser::BitwiseORExpression() {
FAILn("Expected intish for operator |.");
}
}
+ DCHECK_NULL(call_coercion_deferred_);
return a;
}
@@ -2026,6 +2007,7 @@ AsmType* AsmJsParser::ValidateCall() {
call_coercion_ = nullptr;
int call_pos = static_cast<int>(scanner_.Position());
int to_number_pos = static_cast<int>(call_coercion_position_);
+ bool allow_peek = (call_coercion_deferred_position_ == scanner_.Position());
AsmJsScanner::token_t function_name = Consume();
// Distinguish between ordinary function calls and function table calls. In
@@ -2037,30 +2019,31 @@ AsmType* AsmJsParser::ValidateCall() {
if (Check('[')) {
RECURSEn(EqualityExpression());
EXPECT_TOKENn('&');
- uint64_t mask = 0;
+ uint32_t mask = 0;
if (!CheckForUnsigned(&mask)) {
FAILn("Expected mask literal");
}
- if (mask > 0x7fffffff) {
- FAILn("Expected power of 2 mask");
- }
- if (!base::bits::IsPowerOfTwo32(static_cast<uint32_t>(1 + mask))) {
+ if (!base::bits::IsPowerOfTwo32(mask + 1)) {
FAILn("Expected power of 2 mask");
}
- current_function_builder_->EmitI32Const(static_cast<uint32_t>(mask));
+ current_function_builder_->EmitI32Const(mask);
current_function_builder_->Emit(kExprI32And);
EXPECT_TOKENn(']');
VarInfo* function_info = GetVarInfo(function_name);
if (function_info->kind == VarKind::kUnused) {
+ uint32_t index = module_builder_->AllocateIndirectFunctions(mask + 1);
+ if (index == std::numeric_limits<uint32_t>::max()) {
+ FAILn("Exceeded maximum function table size");
+ }
function_info->kind = VarKind::kTable;
- function_info->mask = static_cast<int32_t>(mask);
- function_info->index = module_builder_->AllocateIndirectFunctions(
- static_cast<uint32_t>(mask + 1));
+ function_info->mask = mask;
+ function_info->index = index;
+ function_info->mutable_variable = false;
} else {
if (function_info->kind != VarKind::kTable) {
FAILn("Expected call table");
}
- if (function_info->mask != static_cast<int32_t>(mask)) {
+ if (function_info->mask != mask) {
FAILn("Mask size mismatch");
}
}
@@ -2077,6 +2060,7 @@ AsmType* AsmJsParser::ValidateCall() {
function_info->kind = VarKind::kFunction;
function_info->function_builder = module_builder_->AddFunction();
function_info->index = function_info->function_builder->func_index();
+ function_info->mutable_variable = false;
} else {
if (function_info->kind != VarKind::kFunction &&
function_info->kind < VarKind::kImportedFunction) {
@@ -2100,7 +2084,6 @@ AsmType* AsmJsParser::ValidateCall() {
} else if (t->IsA(AsmType::Double())) {
param_types.push_back(AsmType::Double());
} else {
- std::string a = t->Name();
FAILn("Bad function argument type");
}
if (!Peek(')')) {
@@ -2109,12 +2092,30 @@ AsmType* AsmJsParser::ValidateCall() {
}
EXPECT_TOKENn(')');
+ // Reload {VarInfo} after parsing arguments as table might have grown.
+ VarInfo* function_info = GetVarInfo(function_name);
+
// We potentially use lookahead in order to determine the return type in case
- // it is not yet clear from the call context.
- // TODO(mstarzinger,6183): Several issues with look-ahead are known. Fix!
- // TODO(bradnelson): clarify how this binds, and why only float?
- if (Peek('|') &&
+ // it is not yet clear from the call context. Special care has to be taken to
+ // ensure the non-contextual lookahead is valid. The following restrictions
+ // substantiate the validity of the lookahead implemented below:
+ // - All calls (except stdlib calls) require some sort of type annotation.
+ // - The coercion to "signed" is part of the {BitwiseORExpression}, any
+ // intermittent expressions like parenthesis in `(callsite(..))|0` are
+ // syntactically not considered coercions.
+ // - The coercion to "double" as part of the {UnaryExpression} has higher
+ // precedence and wins in `+callsite(..)|0` cases. Only "float" return
+ // types are overridden in `fround(callsite(..)|0)` expressions.
+ // - Expected coercions to "signed" are flagged via {call_coercion_deferred}
+ // and later on validated as part of {BitwiseORExpression} to ensure they
+ // indeed apply to the current call expression.
+ // - The deferred validation is only allowed if {BitwiseORExpression} did
+ // promise to fulfill the request via {call_coercion_deferred_position}.
+ if (allow_peek && Peek('|') &&
+ function_info->kind <= VarKind::kImportedFunction &&
(return_type == nullptr || return_type->IsA(AsmType::Float()))) {
+ DCHECK_NULL(call_coercion_deferred_);
+ call_coercion_deferred_ = AsmType::Signed();
to_number_pos = static_cast<int>(scanner_.Position());
return_type = AsmType::Signed();
} else if (return_type == nullptr) {
@@ -2128,16 +2129,11 @@ AsmType* AsmJsParser::ValidateCall() {
function_type->AsFunctionType()->AddArgument(t);
}
FunctionSig* sig = ConvertSignature(return_type, param_types);
- if (sig == nullptr) {
- FAILn("Invalid function signature");
- }
uint32_t signature_index = module_builder_->AddSignature(sig);
// Emit actual function invocation depending on the kind. At this point we
// also determined the complete function type and can perform checking against
// the expected type or update the expected type in case of first occurrence.
- // Reload {VarInfo} as table might have grown.
- VarInfo* function_info = GetVarInfo(function_name);
if (function_info->kind == VarKind::kImportedFunction) {
for (auto t : param_specific_types) {
if (!t->IsA(AsmType::Extern())) {
@@ -2149,20 +2145,17 @@ AsmType* AsmJsParser::ValidateCall() {
}
DCHECK(function_info->import != nullptr);
// TODO(bradnelson): Factor out.
- uint32_t cache_index = function_info->import->cache.FindOrInsert(sig);
uint32_t index;
- if (cache_index >= function_info->import->cache_index.size()) {
- index = module_builder_->AddImport(
- function_info->import->function_name,
- static_cast<uint32_t>(function_info->import->function_name_size),
- sig);
- function_info->import->cache_index.push_back(index);
+ auto it = function_info->import->cache.find(sig);
+ if (it != function_info->import->cache.end()) {
+ index = it->second;
} else {
- index = function_info->import->cache_index[cache_index];
+ index =
+ module_builder_->AddImport(function_info->import->function_name, sig);
+ function_info->import->cache[sig] = index;
}
current_function_builder_->AddAsmWasmOffset(call_pos, to_number_pos);
- current_function_builder_->Emit(kExprCallFunction);
- current_function_builder_->EmitVarUint(index);
+ current_function_builder_->EmitWithU32V(kExprCallFunction, index);
} else if (function_info->kind > VarKind::kImportedFunction) {
AsmCallableType* callable = function_info->type->AsCallableType();
if (!callable) {
@@ -2171,16 +2164,13 @@ AsmType* AsmJsParser::ValidateCall() {
// TODO(bradnelson): Refactor AsmType to not need this.
if (callable->CanBeInvokedWith(return_type, param_specific_types)) {
// Return type ok.
- } else if (return_type->IsA(AsmType::Void()) &&
- callable->CanBeInvokedWith(AsmType::Float(),
+ } else if (callable->CanBeInvokedWith(AsmType::Float(),
param_specific_types)) {
return_type = AsmType::Float();
- } else if (return_type->IsA(AsmType::Void()) &&
- callable->CanBeInvokedWith(AsmType::Double(),
+ } else if (callable->CanBeInvokedWith(AsmType::Double(),
param_specific_types)) {
return_type = AsmType::Double();
- } else if (return_type->IsA(AsmType::Void()) &&
- callable->CanBeInvokedWith(AsmType::Signed(),
+ } else if (callable->CanBeInvokedWith(AsmType::Signed(),
param_specific_types)) {
return_type = AsmType::Signed();
} else {
@@ -2296,8 +2286,8 @@ AsmType* AsmJsParser::ValidateCall() {
current_function_builder_->EmitGetLocal(tmp.get()->get());
current_function_builder_->AddAsmWasmOffset(call_pos, to_number_pos);
current_function_builder_->Emit(kExprCallIndirect);
- current_function_builder_->EmitVarUint(signature_index);
- current_function_builder_->EmitVarUint(0); // table index
+ current_function_builder_->EmitU32V(signature_index);
+ current_function_builder_->EmitU32V(0); // table index
} else {
current_function_builder_->AddAsmWasmOffset(call_pos, to_number_pos);
current_function_builder_->Emit(kExprCallFunction);
@@ -2336,10 +2326,14 @@ void AsmJsParser::ValidateHeapAccess() {
VarInfo* info = GetVarInfo(Consume());
int32_t size = info->type->ElementSizeInBytes();
EXPECT_TOKEN('[');
- uint64_t offset;
+ uint32_t offset;
if (CheckForUnsigned(&offset)) {
// TODO(bradnelson): Check more things.
- if (offset > 0x7fffffff || offset * size > 0x7fffffff) {
+ // TODO(mstarzinger): Clarify and explain where this limit is coming from,
+ // as it is not mandated by the spec directly.
+ if (offset > 0x7fffffff ||
+ static_cast<uint64_t>(offset) * static_cast<uint64_t>(size) >
+ 0x7fffffff) {
FAIL("Heap access out of range");
}
if (Check(']')) {
@@ -2359,7 +2353,7 @@ void AsmJsParser::ValidateHeapAccess() {
} else {
RECURSE(index_type = AdditiveExpression());
EXPECT_TOKEN(TOK(SAR));
- uint64_t shift;
+ uint32_t shift;
if (!CheckForUnsigned(&shift)) {
FAIL("Expected shift of word size");
}
@@ -2409,8 +2403,25 @@ void AsmJsParser::ValidateFloatCoercion() {
EXPECT_TOKEN(')');
}
+void AsmJsParser::ScanToClosingParenthesis() {
+ int depth = 0;
+ for (;;) {
+ if (Peek('(')) {
+ ++depth;
+ } else if (Peek(')')) {
+ --depth;
+ if (depth < 0) {
+ break;
+ }
+ } else if (Peek(AsmJsScanner::kEndOfInput)) {
+ break;
+ }
+ scanner_.Next();
+ }
+}
+
void AsmJsParser::GatherCases(std::vector<int32_t>* cases) {
- int start = scanner_.GetPosition();
+ size_t start = scanner_.Position();
int depth = 0;
for (;;) {
if (Peek('{')) {
@@ -2423,7 +2434,7 @@ void AsmJsParser::GatherCases(std::vector<int32_t>* cases) {
} else if (depth == 1 && Peek(TOK(case))) {
scanner_.Next();
int32_t value;
- uint64_t uvalue;
+ uint32_t uvalue;
if (Check('-')) {
if (!CheckForUnsigned(&uvalue)) {
break;
diff --git a/deps/v8/src/asmjs/asm-parser.h b/deps/v8/src/asmjs/asm-parser.h
index c7f6a66352..2f20b4813d 100644
--- a/deps/v8/src/asmjs/asm-parser.h
+++ b/deps/v8/src/asmjs/asm-parser.h
@@ -5,14 +5,11 @@
#ifndef V8_ASMJS_ASM_PARSER_H_
#define V8_ASMJS_ASM_PARSER_H_
-#include <list>
#include <string>
#include <vector>
#include "src/asmjs/asm-scanner.h"
-#include "src/asmjs/asm-typer.h"
#include "src/asmjs/asm-types.h"
-#include "src/wasm/signature-map.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/zone/zone-containers.h"
@@ -31,13 +28,31 @@ namespace wasm {
// scopes (local + module wide).
class AsmJsParser {
public:
+ // clang-format off
+ enum StandardMember {
+ kInfinity,
+ kNaN,
+#define V(_unused1, name, _unused2, _unused3) kMath##name,
+ STDLIB_MATH_FUNCTION_LIST(V)
+#undef V
+#define V(name, _unused1) kMath##name,
+ STDLIB_MATH_VALUE_LIST(V)
+#undef V
+#define V(name, _unused1, _unused2, _unused3) k##name,
+ STDLIB_ARRAY_TYPE_LIST(V)
+#undef V
+ };
+ // clang-format on
+
+ typedef std::unordered_set<StandardMember, std::hash<int>> StdlibSet;
+
explicit AsmJsParser(Isolate* isolate, Zone* zone, Handle<Script> script,
int start, int end);
bool Run();
- const char* failure_message() const { return failure_message_.c_str(); }
+ const char* failure_message() const { return failure_message_; }
int failure_location() const { return failure_location_; }
WasmModuleBuilder* module_builder() { return module_builder_; }
- const AsmTyper::StdlibSet* stdlib_uses() const { return &stdlib_uses_; }
+ const StdlibSet* stdlib_uses() const { return &stdlib_uses_; }
private:
// clang-format off
@@ -52,40 +67,32 @@ class AsmJsParser {
#define V(_unused0, Name, _unused1, _unused2) kMath##Name,
STDLIB_MATH_FUNCTION_LIST(V)
#undef V
-#define V(Name) kMath##Name,
+#define V(Name, _unused1) kMath##Name,
STDLIB_MATH_VALUE_LIST(V)
#undef V
};
// clang-format on
struct FunctionImportInfo {
- char* function_name;
- size_t function_name_size;
- SignatureMap cache;
- std::vector<uint32_t> cache_index;
+ Vector<const char> function_name;
+ WasmModuleBuilder::SignatureMap cache;
};
struct VarInfo {
- AsmType* type;
- WasmFunctionBuilder* function_builder;
- FunctionImportInfo* import;
- int32_t mask;
- uint32_t index;
- VarKind kind;
- bool mutable_variable;
- bool function_defined;
-
- VarInfo();
- void DeclareGlobalImport(AsmType* type, uint32_t index);
- void DeclareStdlibFunc(VarKind kind, AsmType* type);
+ AsmType* type = AsmType::None();
+ WasmFunctionBuilder* function_builder = nullptr;
+ FunctionImportInfo* import = nullptr;
+ uint32_t mask = 0;
+ uint32_t index = 0;
+ VarKind kind = VarKind::kUnused;
+ bool mutable_variable = true;
+ bool function_defined = false;
};
struct GlobalImport {
- char* import_name;
- size_t import_name_size;
- uint32_t import_index;
- uint32_t global_index;
- bool needs_init;
+ Vector<const char> import_name;
+ ValueType value_type;
+ VarInfo* var_info;
};
enum class BlockKind { kRegular, kLoop, kOther };
@@ -103,9 +110,8 @@ class AsmJsParser {
WasmModuleBuilder* module_builder_;
WasmFunctionBuilder* current_function_builder_;
AsmType* return_type_;
- std::uintptr_t stack_limit_;
- AsmTyper::StdlibSet stdlib_uses_;
- std::list<FunctionImportInfo> function_import_info_;
+ uintptr_t stack_limit_;
+ StdlibSet stdlib_uses_;
ZoneVector<VarInfo> global_var_info_;
ZoneVector<VarInfo> local_var_info_;
@@ -115,7 +121,7 @@ class AsmJsParser {
// Error Handling related
bool failed_;
- std::string failure_message_;
+ const char* failure_message_;
int failure_location_;
// Module Related.
@@ -143,19 +149,28 @@ class AsmJsParser {
AsmType* stdlib_fround_;
// When making calls, the return type is needed to lookup signatures.
- // For +callsite(..) or fround(callsite(..)) use this value to pass
+ // For `+callsite(..)` or `fround(callsite(..))` use this value to pass
// along the coercion.
AsmType* call_coercion_;
// The source position associated with the above {call_coercion}.
size_t call_coercion_position_;
+ // When making calls, the coercion can also appear in the source stream
+ // syntactically "behind" the call site. For `callsite(..)|0` use this
+ // value to flag that such a coercion must happen.
+ AsmType* call_coercion_deferred_;
+
+ // The source position at which requesting a deferred coercion via the
+ // aforementioned {call_coercion_deferred} is allowed.
+ size_t call_coercion_deferred_position_;
+
// Used to track the last label we've seen so it can be matched to later
// statements it's attached to.
AsmJsScanner::token_t pending_label_;
- // Global imports.
- // NOTE: Holds the strings referenced in wasm-module-builder for imports.
+ // Global imports. The list of imported variables that are copied during
+ // module instantiation into a corresponding global variable.
ZoneLinkedList<GlobalImport> global_imports_;
Zone* zone() { return zone_; }
@@ -192,7 +207,7 @@ class AsmJsParser {
}
}
- inline bool CheckForUnsigned(uint64_t* value) {
+ inline bool CheckForUnsigned(uint32_t* value) {
if (scanner_.IsUnsigned()) {
*value = scanner_.AsUnsigned();
scanner_.Next();
@@ -202,7 +217,7 @@ class AsmJsParser {
}
}
- inline bool CheckForUnsignedBelow(uint64_t limit, uint64_t* value) {
+ inline bool CheckForUnsignedBelow(uint32_t limit, uint32_t* value) {
if (scanner_.IsUnsigned() && scanner_.AsUnsigned() < limit) {
*value = scanner_.AsUnsigned();
scanner_.Next();
@@ -225,13 +240,16 @@ class AsmJsParser {
void DeclareGlobal(VarInfo* info, bool mutable_variable, AsmType* type,
ValueType vtype,
const WasmInitExpr& init = WasmInitExpr());
+ void DeclareStdlibFunc(VarInfo* info, VarKind kind, AsmType* type);
+ void AddGlobalImport(Vector<const char> name, AsmType* type, ValueType vtype,
+ bool mutable_variable, VarInfo* info);
// Allocates a temporary local variable. The given {index} is absolute within
// the function body, consider using {TemporaryVariableScope} when nesting.
uint32_t TempVariable(int index);
- void AddGlobalImport(std::string name, AsmType* type, ValueType vtype,
- bool mutable_variable, VarInfo* info);
+ // Preserves a copy of the scanner's current identifier string in the zone.
+ Vector<const char> CopyCurrentIdentifierString();
// Use to set up block stack layers (including synthetic ones for if-else).
// Begin/Loop/End below are implemented with these plus code generation.
@@ -251,12 +269,11 @@ class AsmJsParser {
FunctionSig* ConvertSignature(AsmType* return_type,
const std::vector<AsmType*>& params);
- // 6.1 ValidateModule
- void ValidateModule();
- void ValidateModuleParameters();
- void ValidateModuleVars();
+ void ValidateModule(); // 6.1 ValidateModule
+ void ValidateModuleParameters(); // 6.1 ValidateModule - parameters
+ void ValidateModuleVars(); // 6.1 ValidateModule - variables
void ValidateModuleVar(bool mutable_variable);
- bool ValidateModuleVarImport(VarInfo* info, bool mutable_variable);
+ void ValidateModuleVarImport(VarInfo* info, bool mutable_variable);
void ValidateModuleVarStdlib(VarInfo* info);
void ValidateModuleVarNewStdlib(VarInfo* info);
void ValidateModuleVarFromGlobal(VarInfo* info, bool mutable_variable);
@@ -267,7 +284,7 @@ class AsmJsParser {
void ValidateFunctionParams(std::vector<AsmType*>* params);
void ValidateFunctionLocals(size_t param_count,
std::vector<ValueType>* locals);
- void ValidateStatement(); // ValidateStatement
+ void ValidateStatement(); // 6.5 ValidateStatement
void Block(); // 6.5.1 Block
void ExpressionStatement(); // 6.5.2 ExpressionStatement
void EmptyStatement(); // 6.5.3 EmptyStatement
@@ -291,7 +308,7 @@ class AsmJsParser {
AsmType* MemberExpression(); // 6.8.5 MemberExpression
AsmType* AssignmentExpression(); // 6.8.6 AssignmentExpression
AsmType* UnaryExpression(); // 6.8.7 UnaryExpression
- AsmType* MultiplicativeExpression(); // 6.8.8 MultaplicativeExpression
+ AsmType* MultiplicativeExpression(); // 6.8.8 MultiplicativeExpression
AsmType* AdditiveExpression(); // 6.8.9 AdditiveExpression
AsmType* ShiftExpression(); // 6.8.10 ShiftExpression
AsmType* RelationalExpression(); // 6.8.11 RelationalExpression
@@ -306,6 +323,14 @@ class AsmJsParser {
void ValidateHeapAccess(); // 6.10 ValidateHeapAccess
void ValidateFloatCoercion(); // 6.11 ValidateFloatCoercion
+ // Used as part of {ForStatement}. Scans forward to the next `)` in order to
+ // skip over the third expression in a for-statement. This is one piece that
+ // makes this parser not be a pure single-pass.
+ void ScanToClosingParenthesis();
+
+ // Used as part of {SwitchStatement}. Collects all case labels in the current
+ // switch-statement, then resets the scanner position. This is one piece that
+ // makes this parser not be a pure single-pass.
void GatherCases(std::vector<int32_t>* cases);
};
diff --git a/deps/v8/src/asmjs/asm-scanner.cc b/deps/v8/src/asmjs/asm-scanner.cc
index 5f272652f4..14b07306fd 100644
--- a/deps/v8/src/asmjs/asm-scanner.cc
+++ b/deps/v8/src/asmjs/asm-scanner.cc
@@ -35,8 +35,10 @@ AsmJsScanner::AsmJsScanner()
STDLIB_MATH_FUNCTION_LIST(V)
STDLIB_ARRAY_TYPE_LIST(V)
#undef V
-#define V(name) property_names_[#name] = kToken_##name;
+#define V(name, _junk1) property_names_[#name] = kToken_##name;
STDLIB_MATH_VALUE_LIST(V)
+#undef V
+#define V(name) property_names_[#name] = kToken_##name;
STDLIB_OTHER_LIST(V)
#undef V
#define V(name) global_names_[#name] = kToken_##name;
@@ -70,7 +72,7 @@ void AsmJsScanner::Next() {
if (Token() == kDouble) {
PrintF("%lf ", AsDouble());
} else if (Token() == kUnsigned) {
- PrintF("%" PRIu64 " ", AsUnsigned());
+ PrintF("%" PRIu32 " ", AsUnsigned());
} else {
std::string name = Name(Token());
PrintF("%s ", name.c_str());
@@ -210,12 +212,7 @@ std::string AsmJsScanner::Name(token_t token) const {
}
#endif
-int AsmJsScanner::GetPosition() const {
- DCHECK(!rewind_);
- return static_cast<int>(stream_->pos());
-}
-
-void AsmJsScanner::Seek(int pos) {
+void AsmJsScanner::Seek(size_t pos) {
stream_->Seek(pos);
preceding_token_ = kUninitialized;
token_ = kUninitialized;
@@ -311,9 +308,8 @@ void AsmJsScanner::ConsumeNumber(uc32 ch) {
UnicodeCache cache;
double_value_ = StringToDouble(
&cache,
- Vector<uint8_t>(
- const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(number.data())),
- static_cast<int>(number.size())),
+ Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(number.data()),
+ static_cast<int>(number.size())),
ALLOW_HEX | ALLOW_OCTAL | ALLOW_BINARY | ALLOW_IMPLICIT_OCTAL);
if (std::isnan(double_value_)) {
// Check if string to number conversion didn't consume all the characters.
@@ -335,6 +331,11 @@ void AsmJsScanner::ConsumeNumber(uc32 ch) {
if (has_dot) {
token_ = kDouble;
} else {
+ // Exceeding safe integer range is an error.
+ if (double_value_ > static_cast<double>(kMaxUInt32)) {
+ token_ = kParseError;
+ return;
+ }
unsigned_value_ = static_cast<uint32_t>(double_value_);
token_ = kUnsigned;
}
diff --git a/deps/v8/src/asmjs/asm-scanner.h b/deps/v8/src/asmjs/asm-scanner.h
index 4ac5370a1f..d519862a83 100644
--- a/deps/v8/src/asmjs/asm-scanner.h
+++ b/deps/v8/src/asmjs/asm-scanner.h
@@ -43,12 +43,15 @@ class V8_EXPORT_PRIVATE AsmJsScanner {
void Next();
// Back up by one token.
void Rewind();
- // Get raw string for current identifier.
+
+ // Get raw string for current identifier. Note that the returned string will
+ // become invalid when the scanner advances, create a copy to preserve it.
const std::string& GetIdentifierString() const {
// Identifier strings don't work after a rewind.
DCHECK(!rewind_);
return identifier_string_;
}
+
// Check if we just passed a newline.
bool IsPrecededByNewline() const {
// Newline tracking doesn't work if you back up.
@@ -62,10 +65,9 @@ class V8_EXPORT_PRIVATE AsmJsScanner {
std::string Name(token_t token) const;
#endif
- // Get current position (to use with Seek).
- int GetPosition() const;
- // Restores old position (token after that position).
- void Seek(int pos);
+ // Restores old position (token after that position). Note that it is not
+ // allowed to rewind right after a seek, because previous tokens are unknown.
+ void Seek(size_t pos);
// Select whether identifiers are resolved in global or local scope,
// and which scope new identifiers are added to.
@@ -90,12 +92,19 @@ class V8_EXPORT_PRIVATE AsmJsScanner {
return token - kGlobalsStart;
}
- // Methods to check if the current token is an asm.js "number" (contains a
- // dot) or an "unsigned" (a number without a dot).
+ // Methods to check if the current token is a numeric literal considered an
+ // asm.js "double" (contains a dot) or an "unsigned" (without a dot). Note
+ // that numbers without a dot outside the [0 .. 2^32) range are errors.
bool IsUnsigned() const { return Token() == kUnsigned; }
- uint64_t AsUnsigned() const { return unsigned_value_; }
+ uint32_t AsUnsigned() const {
+ DCHECK(IsUnsigned());
+ return unsigned_value_;
+ }
bool IsDouble() const { return Token() == kDouble; }
- double AsDouble() const { return double_value_; }
+ double AsDouble() const {
+ DCHECK(IsDouble());
+ return double_value_;
+ }
// clang-format off
enum {
@@ -111,9 +120,11 @@ class V8_EXPORT_PRIVATE AsmJsScanner {
STDLIB_MATH_FUNCTION_LIST(V)
STDLIB_ARRAY_TYPE_LIST(V)
#undef V
+#define V(name, _junk1) kToken_##name,
+ STDLIB_MATH_VALUE_LIST(V)
+#undef V
#define V(name) kToken_##name,
STDLIB_OTHER_LIST(V)
- STDLIB_MATH_VALUE_LIST(V)
KEYWORD_NAME_LIST(V)
#undef V
#define V(rawname, name) kToken_##name,
@@ -142,7 +153,7 @@ class V8_EXPORT_PRIVATE AsmJsScanner {
std::unordered_map<std::string, token_t> property_names_;
int global_count_;
double double_value_;
- uint64_t unsigned_value_;
+ uint32_t unsigned_value_;
bool preceded_by_newline_;
// Consume multiple characters.
diff --git a/deps/v8/src/asmjs/asm-typer.cc b/deps/v8/src/asmjs/asm-typer.cc
deleted file mode 100644
index 1d18360db1..0000000000
--- a/deps/v8/src/asmjs/asm-typer.cc
+++ /dev/null
@@ -1,2963 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/asmjs/asm-typer.h"
-
-#include <algorithm>
-#include <limits>
-#include <memory>
-#include <string>
-
-#include "include/v8.h"
-#include "src/v8.h"
-
-#include "src/asmjs/asm-types.h"
-#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
-#include "src/base/bits.h"
-#include "src/codegen.h"
-#include "src/globals.h"
-#include "src/messages.h"
-#include "src/objects-inl.h"
-#include "src/utils.h"
-#include "src/vector.h"
-
-#define FAIL_LOCATION_RAW(location, msg) \
- do { \
- Handle<String> message( \
- isolate_->factory()->InternalizeOneByteString(msg)); \
- error_message_ = MessageHandler::MakeMessageObject( \
- isolate_, MessageTemplate::kAsmJsInvalid, (location), message, \
- Handle<JSArray>::null()); \
- error_message_->set_error_level(v8::Isolate::kMessageWarning); \
- message_location_ = *(location); \
- return AsmType::None(); \
- } while (false)
-
-#define FAIL_RAW(node, msg) \
- do { \
- MessageLocation location(script_, node->position(), node->position()); \
- FAIL_LOCATION_RAW(&location, msg); \
- } while (false)
-
-#define FAIL_LOCATION(location, msg) \
- FAIL_LOCATION_RAW(location, STATIC_CHAR_VECTOR(msg))
-
-#define FAIL(node, msg) FAIL_RAW(node, STATIC_CHAR_VECTOR(msg))
-
-#define RECURSE(call) \
- do { \
- if (GetCurrentStackPosition() < stack_limit_) { \
- stack_overflow_ = true; \
- FAIL(root_, "Stack overflow while parsing asm.js module."); \
- } \
- \
- AsmType* result = (call); \
- if (stack_overflow_) { \
- return AsmType::None(); \
- } \
- \
- if (result == AsmType::None()) { \
- return AsmType::None(); \
- } \
- } while (false)
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-namespace {
-static const uint32_t LargestFixNum = std::numeric_limits<int32_t>::max();
-} // namespace
-
-using v8::internal::AstNode;
-using v8::internal::GetCurrentStackPosition;
-
-// ----------------------------------------------------------------------------
-// Implementation of AsmTyper::FlattenedStatements
-
-AsmTyper::FlattenedStatements::FlattenedStatements(Zone* zone,
- ZoneList<Statement*>* s)
- : context_stack_(zone) {
- context_stack_.emplace_back(Context(s));
-}
-
-Statement* AsmTyper::FlattenedStatements::Next() {
- for (;;) {
- if (context_stack_.empty()) {
- return nullptr;
- }
-
- Context* current = &context_stack_.back();
-
- if (current->statements_->length() <= current->next_index_) {
- context_stack_.pop_back();
- continue;
- }
-
- Statement* current_statement =
- current->statements_->at(current->next_index_++);
- if (current_statement->IsBlock()) {
- context_stack_.emplace_back(
- Context(current_statement->AsBlock()->statements()));
- continue;
- }
-
- return current_statement;
- }
-}
-
-// ----------------------------------------------------------------------------
-// Implementation of AsmTyper::SourceLayoutTracker
-
-bool AsmTyper::SourceLayoutTracker::IsValid() const {
- const Section* kAllSections[] = {&use_asm_, &globals_, &functions_, &tables_,
- &exports_};
- for (size_t ii = 0; ii < arraysize(kAllSections); ++ii) {
- const auto& curr_section = *kAllSections[ii];
- for (size_t jj = ii + 1; jj < arraysize(kAllSections); ++jj) {
- if (curr_section.IsPrecededBy(*kAllSections[jj])) {
- return false;
- }
- }
- }
- return true;
-}
-
-void AsmTyper::SourceLayoutTracker::Section::AddNewElement(
- const AstNode& node) {
- const int node_pos = node.position();
- if (start_ == kNoSourcePosition) {
- start_ = node_pos;
- } else {
- start_ = std::min(start_, node_pos);
- }
- if (end_ == kNoSourcePosition) {
- end_ = node_pos;
- } else {
- end_ = std::max(end_, node_pos);
- }
-}
-
-bool AsmTyper::SourceLayoutTracker::Section::IsPrecededBy(
- const Section& other) const {
- if (start_ == kNoSourcePosition) {
- DCHECK_EQ(end_, kNoSourcePosition);
- return false;
- }
- if (other.start_ == kNoSourcePosition) {
- DCHECK_EQ(other.end_, kNoSourcePosition);
- return false;
- }
- DCHECK_LE(start_, end_);
- DCHECK_LE(other.start_, other.end_);
- return other.start_ <= end_;
-}
-
-// ----------------------------------------------------------------------------
-// Implementation of AsmTyper::VariableInfo
-
-AsmTyper::VariableInfo* AsmTyper::VariableInfo::ForSpecialSymbol(
- Zone* zone, StandardMember standard_member) {
- DCHECK(standard_member == kStdlib || standard_member == kFFI ||
- standard_member == kHeap || standard_member == kModule);
- auto* new_var_info = new (zone) VariableInfo(AsmType::None());
- new_var_info->standard_member_ = standard_member;
- new_var_info->mutability_ = kImmutableGlobal;
- return new_var_info;
-}
-
-AsmTyper::VariableInfo* AsmTyper::VariableInfo::Clone(Zone* zone) const {
- CHECK(standard_member_ != kNone);
- CHECK(!type_->IsA(AsmType::None()));
- auto* new_var_info = new (zone) VariableInfo(type_);
- new_var_info->standard_member_ = standard_member_;
- new_var_info->mutability_ = mutability_;
- return new_var_info;
-}
-
-void AsmTyper::VariableInfo::SetFirstForwardUse(
- const MessageLocation& source_location) {
- missing_definition_ = true;
- source_location_ = source_location;
-}
-
-// ----------------------------------------------------------------------------
-// Implementation of AsmTyper
-
-AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Handle<Script> script,
- FunctionLiteral* root)
- : isolate_(isolate),
- zone_(zone),
- script_(script),
- root_(root),
- forward_definitions_(zone),
- ffi_use_signatures_(zone),
- stdlib_types_(zone),
- stdlib_math_types_(zone),
- module_info_(VariableInfo::ForSpecialSymbol(zone_, kModule)),
- global_scope_(ZoneHashMap::kDefaultHashMapCapacity,
- ZoneAllocationPolicy(zone)),
- local_scope_(ZoneHashMap::kDefaultHashMapCapacity,
- ZoneAllocationPolicy(zone)),
- stack_limit_(isolate->stack_guard()->real_climit()),
- fround_type_(AsmType::FroundType(zone_)),
- ffi_type_(AsmType::FFIType(zone_)),
- function_pointer_tables_(zone_) {
- InitializeStdlib();
-}
-
-namespace {
-bool ValidAsmIdentifier(Handle<String> name) {
- static const char* kInvalidAsmNames[] = {"eval", "arguments"};
-
- for (size_t ii = 0; ii < arraysize(kInvalidAsmNames); ++ii) {
- if (strcmp(name->ToCString().get(), kInvalidAsmNames[ii]) == 0) {
- return false;
- }
- }
- return true;
-}
-} // namespace
-
-void AsmTyper::InitializeStdlib() {
- auto* d = AsmType::Double();
- auto* dq = AsmType::DoubleQ();
- auto* dq2d = AsmType::Function(zone_, d);
- dq2d->AsFunctionType()->AddArgument(dq);
-
- auto* dqdq2d = AsmType::Function(zone_, d);
- dqdq2d->AsFunctionType()->AddArgument(dq);
- dqdq2d->AsFunctionType()->AddArgument(dq);
-
- auto* f = AsmType::Float();
- auto* fq = AsmType::FloatQ();
- auto* fq2f = AsmType::Function(zone_, f);
- fq2f->AsFunctionType()->AddArgument(fq);
-
- auto* s = AsmType::Signed();
- auto* s2s = AsmType::Function(zone_, s);
- s2s->AsFunctionType()->AddArgument(s);
-
- auto* i = AsmType::Int();
- auto* i2s = AsmType::Function(zone_, s);
- i2s->AsFunctionType()->AddArgument(i);
-
- auto* ii2s = AsmType::Function(zone_, s);
- ii2s->AsFunctionType()->AddArgument(i);
- ii2s->AsFunctionType()->AddArgument(i);
-
- auto* minmax_d = AsmType::MinMaxType(zone_, d, d);
- // *VIOLATION* The float variant is not part of the spec, but firefox accepts
- // it.
- auto* minmax_f = AsmType::MinMaxType(zone_, f, f);
- auto* minmax_i = AsmType::MinMaxType(zone_, s, i);
- auto* minmax = AsmType::OverloadedFunction(zone_);
- minmax->AsOverloadedFunctionType()->AddOverload(minmax_i);
- minmax->AsOverloadedFunctionType()->AddOverload(minmax_f);
- minmax->AsOverloadedFunctionType()->AddOverload(minmax_d);
-
- auto* fround = fround_type_;
-
- auto* abs = AsmType::OverloadedFunction(zone_);
- abs->AsOverloadedFunctionType()->AddOverload(s2s);
- abs->AsOverloadedFunctionType()->AddOverload(dq2d);
- abs->AsOverloadedFunctionType()->AddOverload(fq2f);
-
- auto* ceil = AsmType::OverloadedFunction(zone_);
- ceil->AsOverloadedFunctionType()->AddOverload(dq2d);
- ceil->AsOverloadedFunctionType()->AddOverload(fq2f);
-
- auto* floor = ceil;
- auto* sqrt = ceil;
-
- struct StandardMemberInitializer {
- const char* name;
- StandardMember standard_member;
- AsmType* type;
- };
-
- const StandardMemberInitializer stdlib[] = {{"Infinity", kInfinity, d},
- {"NaN", kNaN, d},
-#define ASM_TYPED_ARRAYS(V) \
- V(Uint8) \
- V(Int8) \
- V(Uint16) \
- V(Int16) \
- V(Uint32) \
- V(Int32) \
- V(Float32) \
- V(Float64)
-
-#define ASM_TYPED_ARRAY(TypeName) \
- {#TypeName "Array", kNone, AsmType::TypeName##Array()},
- ASM_TYPED_ARRAYS(ASM_TYPED_ARRAY)
-#undef ASM_TYPED_ARRAY
- };
- for (size_t ii = 0; ii < arraysize(stdlib); ++ii) {
- stdlib_types_[stdlib[ii].name] = new (zone_) VariableInfo(stdlib[ii].type);
- stdlib_types_[stdlib[ii].name]->set_standard_member(
- stdlib[ii].standard_member);
- stdlib_types_[stdlib[ii].name]->set_mutability(
- VariableInfo::kImmutableGlobal);
- }
-
- const StandardMemberInitializer math[] = {
- {"PI", kMathPI, d},
- {"E", kMathE, d},
- {"LN2", kMathLN2, d},
- {"LN10", kMathLN10, d},
- {"LOG2E", kMathLOG2E, d},
- {"LOG10E", kMathLOG10E, d},
- {"SQRT2", kMathSQRT2, d},
- {"SQRT1_2", kMathSQRT1_2, d},
- {"imul", kMathImul, ii2s},
- {"abs", kMathAbs, abs},
- // NOTE: clz32 should return fixnum. The current typer can only return
- // Signed, Float, or Double, so it returns Signed in our version of
- // asm.js.
- {"clz32", kMathClz32, i2s},
- {"ceil", kMathCeil, ceil},
- {"floor", kMathFloor, floor},
- {"fround", kMathFround, fround},
- {"pow", kMathPow, dqdq2d},
- {"exp", kMathExp, dq2d},
- {"log", kMathLog, dq2d},
- {"min", kMathMin, minmax},
- {"max", kMathMax, minmax},
- {"sqrt", kMathSqrt, sqrt},
- {"cos", kMathCos, dq2d},
- {"sin", kMathSin, dq2d},
- {"tan", kMathTan, dq2d},
- {"acos", kMathAcos, dq2d},
- {"asin", kMathAsin, dq2d},
- {"atan", kMathAtan, dq2d},
- {"atan2", kMathAtan2, dqdq2d},
- };
- for (size_t ii = 0; ii < arraysize(math); ++ii) {
- stdlib_math_types_[math[ii].name] = new (zone_) VariableInfo(math[ii].type);
- stdlib_math_types_[math[ii].name]->set_standard_member(
- math[ii].standard_member);
- stdlib_math_types_[math[ii].name]->set_mutability(
- VariableInfo::kImmutableGlobal);
- }
-}
-
-// Used for 5.5 GlobalVariableTypeAnnotations
-AsmTyper::VariableInfo* AsmTyper::ImportLookup(Property* import) {
- auto* obj = import->obj();
- auto* key = import->key()->AsLiteral();
- if (key == nullptr) {
- return nullptr;
- }
-
- ObjectTypeMap* stdlib = &stdlib_types_;
- if (auto* obj_as_property = obj->AsProperty()) {
- // This can only be stdlib.Math
- auto* math_name = obj_as_property->key()->AsLiteral();
- if (math_name == nullptr || !math_name->IsPropertyName()) {
- return nullptr;
- }
-
- if (!math_name->AsPropertyName()->IsUtf8EqualTo(CStrVector("Math"))) {
- return nullptr;
- }
-
- auto* stdlib_var_proxy = obj_as_property->obj()->AsVariableProxy();
- if (stdlib_var_proxy == nullptr) {
- return nullptr;
- }
- obj = stdlib_var_proxy;
- stdlib = &stdlib_math_types_;
- }
-
- auto* obj_as_var_proxy = obj->AsVariableProxy();
- if (obj_as_var_proxy == nullptr) {
- return nullptr;
- }
-
- auto* obj_info = Lookup(obj_as_var_proxy->var());
- if (obj_info == nullptr) {
- return nullptr;
- }
-
- if (obj_info->IsFFI()) {
- // For FFI we can't validate import->key, so assume this is OK.
- return obj_info;
- }
-
- if (!key->IsPropertyName()) {
- return nullptr;
- }
-
- std::unique_ptr<char[]> aname = key->AsPropertyName()->ToCString();
- ObjectTypeMap::iterator i = stdlib->find(std::string(aname.get()));
- if (i == stdlib->end()) {
- return nullptr;
- }
- stdlib_uses_.insert(i->second->standard_member());
- return i->second;
-}
-
-AsmTyper::VariableInfo* AsmTyper::Lookup(Variable* variable) const {
- const ZoneHashMap* scope = in_function_ ? &local_scope_ : &global_scope_;
- ZoneHashMap::Entry* entry =
- scope->Lookup(variable, ComputePointerHash(variable));
- if (entry == nullptr && in_function_) {
- entry = global_scope_.Lookup(variable, ComputePointerHash(variable));
- }
-
- if (entry == nullptr && !module_name_.is_null() &&
- module_name_->Equals(*variable->name())) {
- return module_info_;
- }
-
- return entry ? reinterpret_cast<VariableInfo*>(entry->value) : nullptr;
-}
-
-void AsmTyper::AddForwardReference(VariableProxy* proxy, VariableInfo* info) {
- MessageLocation location(script_, proxy->position(), proxy->position());
- info->SetFirstForwardUse(location);
- forward_definitions_.push_back(info);
-}
-
-bool AsmTyper::AddGlobal(Variable* variable, VariableInfo* info) {
- // We can't DCHECK(!in_function_) because function may actually install global
- // names (forward defined functions and function tables.)
- DCHECK(info->mutability() != VariableInfo::kInvalidMutability);
- DCHECK(info->IsGlobal());
- DCHECK(ValidAsmIdentifier(variable->name()));
-
- if (!module_name_.is_null() && module_name_->Equals(*variable->name())) {
- return false;
- }
-
- ZoneHashMap::Entry* entry = global_scope_.LookupOrInsert(
- variable, ComputePointerHash(variable), ZoneAllocationPolicy(zone_));
-
- if (entry->value != nullptr) {
- return false;
- }
-
- entry->value = info;
- return true;
-}
-
-bool AsmTyper::AddLocal(Variable* variable, VariableInfo* info) {
- DCHECK(in_function_);
- DCHECK(info->mutability() != VariableInfo::kInvalidMutability);
- DCHECK(!info->IsGlobal());
- DCHECK(ValidAsmIdentifier(variable->name()));
-
- ZoneHashMap::Entry* entry = local_scope_.LookupOrInsert(
- variable, ComputePointerHash(variable), ZoneAllocationPolicy(zone_));
-
- if (entry->value != nullptr) {
- return false;
- }
-
- entry->value = info;
- return true;
-}
-
-void AsmTyper::SetTypeOf(AstNode* node, AsmType* type) {
- DCHECK_NE(type, AsmType::None());
- if (in_function_) {
- DCHECK(function_node_types_.find(node) == function_node_types_.end());
- function_node_types_.insert(std::make_pair(node, type));
- } else {
- DCHECK(module_node_types_.find(node) == module_node_types_.end());
- module_node_types_.insert(std::make_pair(node, type));
- }
-}
-
-namespace {
-bool IsLiteralDouble(Literal* literal) {
- return literal->raw_value()->IsNumber() &&
- literal->raw_value()->ContainsDot();
-}
-
-bool IsLiteralInt(Literal* literal) {
- return literal->raw_value()->IsNumber() &&
- !literal->raw_value()->ContainsDot();
-}
-
-bool IsLiteralMinus1(Literal* literal) {
- return IsLiteralInt(literal) && literal->raw_value()->AsNumber() == -1.0;
-}
-
-bool IsLiteral1Dot0(Literal* literal) {
- return IsLiteralDouble(literal) && literal->raw_value()->AsNumber() == 1.0;
-}
-
-bool IsLiteral0(Literal* literal) {
- return IsLiteralInt(literal) && literal->raw_value()->AsNumber() == 0.0;
-}
-} // namespace
-
-AsmType* AsmTyper::TypeOf(AstNode* node) const {
- auto node_type_iter = function_node_types_.find(node);
- if (node_type_iter != function_node_types_.end()) {
- return node_type_iter->second;
- }
- node_type_iter = module_node_types_.find(node);
- if (node_type_iter != module_node_types_.end()) {
- return node_type_iter->second;
- }
-
- // Sometimes literal nodes are not added to the node_type_ map simply because
- // their are not visited with ValidateExpression().
- if (auto* literal = node->AsLiteral()) {
- if (IsLiteralDouble(literal)) {
- return AsmType::Double();
- }
- if (!IsLiteralInt(literal)) {
- return AsmType::None();
- }
- uint32_t u;
- if (literal->value()->ToUint32(&u)) {
- if (u > LargestFixNum) {
- return AsmType::Unsigned();
- }
- return AsmType::FixNum();
- }
- int32_t i;
- if (literal->value()->ToInt32(&i)) {
- return AsmType::Signed();
- }
- }
-
- return AsmType::None();
-}
-
-AsmType* AsmTyper::TypeOf(Variable* v) const { return Lookup(v)->type(); }
-
-AsmTyper::StandardMember AsmTyper::VariableAsStandardMember(Variable* var) {
- auto* var_info = Lookup(var);
- if (var_info == nullptr) {
- return kNone;
- }
- StandardMember member = var_info->standard_member();
- return member;
-}
-
-AsmType* AsmTyper::FailWithMessage(const char* text) {
- FAIL_RAW(root_, OneByteVector(text));
-}
-
-bool AsmTyper::Validate() {
- return ValidateBeforeFunctionsPhase() &&
- !AsmType::None()->IsExactly(ValidateModuleFunctions(root_)) &&
- ValidateAfterFunctionsPhase();
-}
-
-bool AsmTyper::ValidateBeforeFunctionsPhase() {
- if (!AsmType::None()->IsExactly(ValidateModuleBeforeFunctionsPhase(root_))) {
- return true;
- }
- return false;
-}
-
-bool AsmTyper::ValidateInnerFunction(FunctionDeclaration* fun_decl) {
- if (!AsmType::None()->IsExactly(ValidateModuleFunction(fun_decl))) {
- return true;
- }
- return false;
-}
-
-bool AsmTyper::ValidateAfterFunctionsPhase() {
- if (!AsmType::None()->IsExactly(ValidateModuleAfterFunctionsPhase(root_))) {
- return true;
- }
- return false;
-}
-
-void AsmTyper::ClearFunctionNodeTypes() { function_node_types_.clear(); }
-
-AsmType* AsmTyper::TriggerParsingError() { FAIL(root_, "Parsing error"); }
-
-namespace {
-bool IsUseAsmDirective(Statement* first_statement) {
- ExpressionStatement* use_asm = first_statement->AsExpressionStatement();
- if (use_asm == nullptr) {
- return false;
- }
-
- Literal* use_asm_literal = use_asm->expression()->AsLiteral();
-
- if (use_asm_literal == nullptr) {
- return false;
- }
-
- return use_asm_literal->raw_value()->AsString()->IsOneByteEqualTo("use asm");
-}
-
-Assignment* ExtractInitializerExpression(Statement* statement) {
- auto* expr_stmt = statement->AsExpressionStatement();
- if (expr_stmt == nullptr) {
- // Done with initializers.
- return nullptr;
- }
- auto* assign = expr_stmt->expression()->AsAssignment();
- if (assign == nullptr) {
- // Done with initializers.
- return nullptr;
- }
- if (assign->op() != Token::INIT) {
- // Done with initializers.
- return nullptr;
- }
- return assign;
-}
-
-} // namespace
-
-// 6.1 ValidateModule
-AsmType* AsmTyper::ValidateModuleBeforeFunctionsPhase(FunctionLiteral* fun) {
- DeclarationScope* scope = fun->scope();
- if (!scope->is_function_scope()) FAIL(fun, "Not at function scope.");
- if (scope->inner_scope_calls_eval()) {
- FAIL(fun, "Invalid asm.js module using eval.");
- }
- if (!ValidAsmIdentifier(fun->name()))
- FAIL(fun, "Invalid asm.js identifier in module name.");
- module_name_ = fun->name();
-
- // Allowed parameters: Stdlib, FFI, Mem
- static const int MaxModuleParameters = 3;
- if (scope->num_parameters() > MaxModuleParameters) {
- FAIL(fun, "asm.js modules may not have more than three parameters.");
- }
-
- struct {
- StandardMember standard_member;
- } kModuleParamInfo[3] = {
- {kStdlib}, {kFFI}, {kHeap},
- };
-
- for (int ii = 0; ii < scope->num_parameters(); ++ii) {
- Variable* param = scope->parameter(ii);
- DCHECK(param);
-
- if (!ValidAsmIdentifier(param->name())) {
- FAIL(fun, "Invalid asm.js identifier in module parameter.");
- }
-
- auto* param_info = VariableInfo::ForSpecialSymbol(
- zone_, kModuleParamInfo[ii].standard_member);
-
- if (!AddGlobal(param, param_info)) {
- FAIL(fun, "Redeclared identifier in module parameter.");
- }
- }
-
- FlattenedStatements iter(zone_, fun->body());
- auto* use_asm_directive = iter.Next();
- if (use_asm_directive == nullptr) {
- FAIL(fun, "Missing \"use asm\".");
- }
- // Check for extra assignment inserted by the parser when in this form:
- // (function Module(a, b, c) {... })
- ExpressionStatement* estatement = use_asm_directive->AsExpressionStatement();
- if (estatement != nullptr) {
- Assignment* assignment = estatement->expression()->AsAssignment();
- if (assignment != nullptr && assignment->target()->IsVariableProxy() &&
- assignment->target()
- ->AsVariableProxy()
- ->var()
- ->is_sloppy_function_name()) {
- use_asm_directive = iter.Next();
- }
- }
- if (!IsUseAsmDirective(use_asm_directive)) {
- FAIL(fun, "Missing \"use asm\".");
- }
- source_layout_.AddUseAsm(*use_asm_directive);
- module_return_ = nullptr;
-
- // *VIOLATION* The spec states that globals should be followed by function
- // declarations, which should be followed by function pointer tables, followed
- // by the module export (return) statement. Our AST might be rearraged by the
- // parser, so we can't rely on it being in source code order.
- while (Statement* current = iter.Next()) {
- if (auto* assign = ExtractInitializerExpression(current)) {
- if (assign->value()->IsArrayLiteral()) {
- // Save function tables for later validation.
- function_pointer_tables_.push_back(assign);
- } else {
- RECURSE(ValidateGlobalDeclaration(assign));
- source_layout_.AddGlobal(*assign);
- }
- continue;
- }
-
- if (auto* current_as_return = current->AsReturnStatement()) {
- if (module_return_ != nullptr) {
- FAIL(fun, "Multiple export statements.");
- }
- module_return_ = current_as_return;
- source_layout_.AddExport(*module_return_);
- continue;
- }
-
- FAIL(current, "Invalid top-level statement in asm.js module.");
- }
-
- return AsmType::Int(); // Any type that is not AsmType::None();
-}
-
-AsmType* AsmTyper::ValidateModuleFunction(FunctionDeclaration* fun_decl) {
- RECURSE(ValidateFunction(fun_decl));
- source_layout_.AddFunction(*fun_decl);
-
- return AsmType::Int(); // Any type that is not AsmType::None();
-}
-
-AsmType* AsmTyper::ValidateModuleFunctions(FunctionLiteral* fun) {
- DeclarationScope* scope = fun->scope();
- Declaration::List* decls = scope->declarations();
- for (Declaration* decl : *decls) {
- if (FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration()) {
- RECURSE(ValidateModuleFunction(fun_decl));
- continue;
- }
- }
-
- return AsmType::Int(); // Any type that is not AsmType::None();
-}
-
-AsmType* AsmTyper::ValidateModuleAfterFunctionsPhase(FunctionLiteral* fun) {
- for (auto* function_table : function_pointer_tables_) {
- RECURSE(ValidateFunctionTable(function_table));
- source_layout_.AddTable(*function_table);
- }
-
- DeclarationScope* scope = fun->scope();
- Declaration::List* decls = scope->declarations();
- for (Declaration* decl : *decls) {
- if (decl->IsFunctionDeclaration()) {
- continue;
- }
-
- VariableDeclaration* var_decl = decl->AsVariableDeclaration();
- if (var_decl == nullptr) {
- FAIL(decl, "Invalid asm.js declaration.");
- }
-
- auto* var_proxy = var_decl->proxy();
- if (var_proxy == nullptr) {
- FAIL(decl, "Invalid asm.js declaration.");
- }
-
- if (Lookup(var_proxy->var()) == nullptr) {
- FAIL(decl, "Global variable missing initializer in asm.js module.");
- }
- }
-
- // 6.2 ValidateExport
- if (module_return_ == nullptr) {
- FAIL(fun, "Missing asm.js module export.");
- }
-
- for (auto* forward_def : forward_definitions_) {
- if (forward_def->missing_definition()) {
- FAIL_LOCATION(forward_def->source_location(),
- "Missing definition for forward declared identifier.");
- }
- }
-
- RECURSE(ValidateExport(module_return_));
-
- if (!source_layout_.IsValid()) {
- FAIL(fun, "Invalid asm.js source code layout.");
- }
-
- return AsmType::Int(); // Any type that is not AsmType::None();
-}
-
-namespace {
-bool IsDoubleAnnotation(BinaryOperation* binop) {
- // *VIOLATION* The parser replaces uses of +x with x*1.0.
- if (binop->op() != Token::MUL) {
- return false;
- }
-
- auto* right_as_literal = binop->right()->AsLiteral();
- if (right_as_literal == nullptr) {
- return false;
- }
-
- return IsLiteral1Dot0(right_as_literal);
-}
-
-bool IsIntAnnotation(BinaryOperation* binop) {
- if (binop->op() != Token::BIT_OR) {
- return false;
- }
-
- auto* right_as_literal = binop->right()->AsLiteral();
- if (right_as_literal == nullptr) {
- return false;
- }
-
- return IsLiteral0(right_as_literal);
-}
-} // namespace
-
-AsmType* AsmTyper::ValidateGlobalDeclaration(Assignment* assign) {
- DCHECK(!assign->is_compound());
- if (assign->is_compound()) {
- FAIL(assign,
- "Compound assignment not supported when declaring global variables.");
- }
-
- auto* target = assign->target();
- if (!target->IsVariableProxy()) {
- FAIL(target, "Module assignments may only assign to globals.");
- }
- auto* target_variable = target->AsVariableProxy()->var();
- auto* target_info = Lookup(target_variable);
-
- if (target_info != nullptr) {
- FAIL(target, "Redefined global variable.");
- }
-
- auto* value = assign->value();
- // Not all types of assignment are allowed by asm.js. See
- // 5.5 Global Variable Type Annotations.
- bool global_variable = false;
- if (value->IsLiteral() || value->IsCall()) {
- AsmType* type = nullptr;
- VariableInfo::Mutability mutability;
- if (target_variable->mode() == CONST) {
- mutability = VariableInfo::kConstGlobal;
- } else {
- mutability = VariableInfo::kMutableGlobal;
- }
- RECURSE(type = VariableTypeAnnotations(value, mutability));
- target_info = new (zone_) VariableInfo(type);
- target_info->set_mutability(mutability);
- global_variable = true;
- } else if (value->IsProperty()) {
- target_info = ImportLookup(value->AsProperty());
- if (target_info == nullptr) {
- FAIL(assign, "Invalid import.");
- }
- CHECK(target_info->mutability() == VariableInfo::kImmutableGlobal);
- if (target_info->IsFFI()) {
- // create a new target info that represents a foreign variable.
- target_info = new (zone_) VariableInfo(ffi_type_);
- target_info->set_mutability(VariableInfo::kImmutableGlobal);
- } else if (target_info->type()->IsA(AsmType::Heap())) {
- FAIL(assign, "Heap view types can not be aliased.");
- } else {
- target_info = target_info->Clone(zone_);
- }
- } else if (value->IsBinaryOperation()) {
- // This should either be:
- //
- // var <> = ffi.<>|0
- //
- // or
- //
- // var <> = +ffi.<>
- auto* value_binop = value->AsBinaryOperation();
- auto* left = value_binop->left();
- AsmType* import_type = nullptr;
-
- if (IsDoubleAnnotation(value_binop)) {
- import_type = AsmType::Double();
- } else if (IsIntAnnotation(value_binop)) {
- import_type = AsmType::Int();
- } else {
- FAIL(value,
- "Invalid initializer for foreign import - unrecognized annotation.");
- }
-
- if (!left->IsProperty()) {
- FAIL(value,
- "Invalid initializer for foreign import - must import member.");
- }
- target_info = ImportLookup(left->AsProperty());
- if (target_info == nullptr) {
- // TODO(jpp): this error message is innacurate: this may fail if the
- // object lookup fails, or if the property lookup fails, or even if the
- // import is bogus like a().c.
- FAIL(value,
- "Invalid initializer for foreign import - object lookup failed.");
- }
- CHECK(target_info->mutability() == VariableInfo::kImmutableGlobal);
- if (!target_info->IsFFI()) {
- FAIL(value,
- "Invalid initializer for foreign import - object is not the ffi.");
- }
-
- // Create a new target info that represents the foreign import.
- target_info = new (zone_) VariableInfo(import_type);
- target_info->set_mutability(VariableInfo::kMutableGlobal);
- } else if (value->IsCallNew()) {
- AsmType* type = nullptr;
- RECURSE(type = NewHeapView(value->AsCallNew()));
- target_info = new (zone_) VariableInfo(type);
- target_info->set_mutability(VariableInfo::kImmutableGlobal);
- } else if (auto* proxy = value->AsVariableProxy()) {
- auto* var_info = Lookup(proxy->var());
-
- if (var_info == nullptr) {
- FAIL(value, "Undeclared identifier in global initializer");
- }
-
- if (var_info->mutability() != VariableInfo::kConstGlobal) {
- FAIL(value, "Identifier used to initialize a global must be a const");
- }
-
- target_info = new (zone_) VariableInfo(var_info->type());
- if (target_variable->mode() == CONST) {
- target_info->set_mutability(VariableInfo::kConstGlobal);
- } else {
- target_info->set_mutability(VariableInfo::kMutableGlobal);
- }
- }
-
- if (target_info == nullptr) {
- FAIL(assign, "Invalid global variable initializer.");
- }
-
- if (!ValidAsmIdentifier(target_variable->name())) {
- FAIL(target, "Invalid asm.js identifier in global variable.");
- }
-
- if (!AddGlobal(target_variable, target_info)) {
- FAIL(assign, "Redeclared global identifier.");
- }
-
- DCHECK(target_info->type() != AsmType::None());
- if (!global_variable) {
- // Global variables have their types set in VariableTypeAnnotations.
- SetTypeOf(value, target_info->type());
- }
- SetTypeOf(assign, target_info->type());
- SetTypeOf(target, target_info->type());
- return target_info->type();
-}
-
-// 6.2 ValidateExport
-AsmType* AsmTyper::ExportType(VariableProxy* fun_export) {
- auto* fun_info = Lookup(fun_export->var());
- if (fun_info == nullptr) {
- FAIL(fun_export, "Undefined identifier in asm.js module export.");
- }
-
- if (fun_info->standard_member() != kNone) {
- FAIL(fun_export, "Module cannot export standard library functions.");
- }
-
- auto* type = fun_info->type();
- if (type->AsFFIType() != nullptr) {
- FAIL(fun_export, "Module cannot export foreign functions.");
- }
-
- if (type->AsFunctionTableType() != nullptr) {
- FAIL(fun_export, "Module cannot export function tables.");
- }
-
- if (fun_info->type()->AsFunctionType() == nullptr) {
- FAIL(fun_export, "Module export is not an asm.js function.");
- }
-
- if (!fun_export->var()->is_function()) {
- FAIL(fun_export, "Module exports must be function declarations.");
- }
-
- return type;
-}
-
-AsmType* AsmTyper::ValidateExport(ReturnStatement* exports) {
- // asm.js modules can export single functions, or multiple functions in an
- // object literal.
- if (auto* fun_export = exports->expression()->AsVariableProxy()) {
- // Exporting single function.
- AsmType* export_type;
- RECURSE(export_type = ExportType(fun_export));
- return export_type;
- }
-
- if (auto* obj_export = exports->expression()->AsObjectLiteral()) {
- // Exporting object literal.
- for (auto* prop : *obj_export->properties()) {
- if (!prop->key()->IsLiteral()) {
- FAIL(prop->key(),
- "Only normal object properties may be used in the export object "
- "literal.");
- }
- if (!prop->key()->AsLiteral()->IsPropertyName()) {
- FAIL(prop->key(),
- "Exported functions must have valid identifier names.");
- }
-
- auto* export_obj = prop->value()->AsVariableProxy();
- if (export_obj == nullptr) {
- FAIL(prop->value(), "Exported value must be an asm.js function name.");
- }
-
- RECURSE(ExportType(export_obj));
- }
-
- return AsmType::Int();
- }
-
- FAIL(exports, "Unrecognized expression in asm.js module export expression.");
-}
-
-// 6.3 ValidateFunctionTable
-AsmType* AsmTyper::ValidateFunctionTable(Assignment* assign) {
- if (assign->is_compound()) {
- FAIL(assign,
- "Compound assignment not supported when declaring global variables.");
- }
-
- auto* target = assign->target();
- if (!target->IsVariableProxy()) {
- FAIL(target, "Module assignments may only assign to globals.");
- }
- auto* target_variable = target->AsVariableProxy()->var();
-
- auto* value = assign->value()->AsArrayLiteral();
- CHECK(value != nullptr);
- ZoneList<Expression*>* pointers = value->values();
-
- // The function table size must be n = 2 ** m, for m >= 0;
- // TODO(jpp): should this be capped?
- if (!base::bits::IsPowerOfTwo32(pointers->length())) {
- FAIL(assign, "Invalid length for function pointer table.");
- }
-
- AsmType* table_element_type = nullptr;
- for (auto* initializer : *pointers) {
- auto* var_proxy = initializer->AsVariableProxy();
- if (var_proxy == nullptr) {
- FAIL(initializer,
- "Function pointer table initializer must be a function name.");
- }
-
- auto* var_info = Lookup(var_proxy->var());
- if (var_info == nullptr) {
- FAIL(var_proxy,
- "Undefined identifier in function pointer table initializer.");
- }
-
- if (var_info->standard_member() != kNone) {
- FAIL(initializer,
- "Function pointer table must not be a member of the standard "
- "library.");
- }
-
- auto* initializer_type = var_info->type();
- if (initializer_type->AsFunctionType() == nullptr) {
- FAIL(initializer,
- "Function pointer table initializer must be an asm.js function.");
- }
-
- DCHECK(var_info->type()->AsFFIType() == nullptr);
- DCHECK(var_info->type()->AsFunctionTableType() == nullptr);
-
- if (table_element_type == nullptr) {
- table_element_type = initializer_type;
- } else if (!initializer_type->IsA(table_element_type)) {
- FAIL(initializer, "Type mismatch in function pointer table initializer.");
- }
- }
-
- auto* target_info = Lookup(target_variable);
- if (target_info == nullptr) {
- // Function pointer tables are the last entities to be validates, so this is
- // unlikely to happen: only unreferenced function tables will not already
- // have an entry in the global scope.
- target_info = new (zone_) VariableInfo(AsmType::FunctionTableType(
- zone_, pointers->length(), table_element_type));
- target_info->set_mutability(VariableInfo::kImmutableGlobal);
- if (!ValidAsmIdentifier(target_variable->name())) {
- FAIL(target, "Invalid asm.js identifier in function table name.");
- }
- if (!AddGlobal(target_variable, target_info)) {
- DCHECK(false);
- FAIL(assign, "Redeclared global identifier in function table name.");
- }
- SetTypeOf(value, target_info->type());
- return target_info->type();
- }
-
- auto* target_info_table = target_info->type()->AsFunctionTableType();
- if (target_info_table == nullptr) {
- FAIL(assign, "Identifier redefined as function pointer table.");
- }
-
- if (!target_info->missing_definition()) {
- FAIL(assign, "Identifier redefined (function table name).");
- }
-
- if (static_cast<int>(target_info_table->length()) != pointers->length()) {
- FAIL(assign, "Function table size mismatch.");
- }
-
- DCHECK(target_info_table->signature()->AsFunctionType());
- if (!table_element_type->IsA(target_info_table->signature())) {
- FAIL(assign, "Function table initializer does not match previous type.");
- }
-
- target_info->MarkDefined();
- DCHECK(target_info->type() != AsmType::None());
- SetTypeOf(value, target_info->type());
-
- return target_info->type();
-}
-
-// 6.4 ValidateFunction
-AsmType* AsmTyper::ValidateFunction(FunctionDeclaration* fun_decl) {
- FunctionScope _(this);
-
- // Extract parameter types.
- auto* fun = fun_decl->fun();
-
- auto* fun_decl_proxy = fun_decl->proxy();
- if (fun_decl_proxy == nullptr) {
- FAIL(fun_decl, "Anonymous functions are not support in asm.js.");
- }
-
- Statement* current;
- FlattenedStatements iter(zone_, fun->body());
-
- size_t annotated_parameters = 0;
-
- // 5.3 Function type annotations
- // * parameters
- ZoneVector<AsmType*> parameter_types(zone_);
- for (; (current = iter.Next()) != nullptr; ++annotated_parameters) {
- auto* stmt = current->AsExpressionStatement();
- if (stmt == nullptr) {
- // Done with parameters.
- break;
- }
- auto* expr = stmt->expression()->AsAssignment();
- if (expr == nullptr || expr->is_compound()) {
- // Done with parameters.
- break;
- }
- auto* proxy = expr->target()->AsVariableProxy();
- if (proxy == nullptr) {
- // Done with parameters.
- break;
- }
- auto* param = proxy->var();
- if (param->location() != VariableLocation::PARAMETER ||
- param->index() != static_cast<int>(annotated_parameters)) {
- // Done with parameters.
- break;
- }
-
- AsmType* type;
- RECURSE(type = ParameterTypeAnnotations(param, expr->value()));
- DCHECK(type->IsParameterType());
- auto* param_info = new (zone_) VariableInfo(type);
- param_info->set_mutability(VariableInfo::kLocal);
- if (!ValidAsmIdentifier(proxy->name())) {
- FAIL(proxy, "Invalid asm.js identifier in parameter name.");
- }
-
- if (!AddLocal(param, param_info)) {
- FAIL(proxy, "Redeclared parameter.");
- }
- parameter_types.push_back(type);
- SetTypeOf(proxy, type);
- SetTypeOf(expr, type);
- SetTypeOf(expr->value(), type);
- }
-
- if (static_cast<int>(annotated_parameters) != fun->parameter_count()) {
- FAIL(fun_decl, "Incorrect parameter type annotations.");
- }
-
- // 5.3 Function type annotations
- // * locals
- for (; current; current = iter.Next()) {
- auto* initializer = ExtractInitializerExpression(current);
- if (initializer == nullptr) {
- // Done with locals.
- break;
- }
-
- auto* local = initializer->target()->AsVariableProxy();
- if (local == nullptr) {
- // Done with locals. It should never happen. Even if it does, the asm.js
- // code should not declare any other locals after this point, so we assume
- // this is OK. If any other variable declaration is found we report a
- // validation error.
- DCHECK(false);
- break;
- }
-
- AsmType* type;
- RECURSE(type = VariableTypeAnnotations(initializer->value()));
- auto* local_info = new (zone_) VariableInfo(type);
- local_info->set_mutability(VariableInfo::kLocal);
- if (!ValidAsmIdentifier(local->name())) {
- FAIL(local, "Invalid asm.js identifier in local variable.");
- }
-
- if (!AddLocal(local->var(), local_info)) {
- FAIL(initializer, "Redeclared local.");
- }
-
- SetTypeOf(local, type);
- SetTypeOf(initializer, type);
- }
-
- // 5.2 Return Type Annotations
- // *VIOLATION* we peel blocks to find the last statement in the asm module
- // because the parser may introduce synthetic blocks.
- ZoneList<Statement*>* statements = fun->body();
-
- do {
- if (statements->length() == 0) {
- return_type_ = AsmType::Void();
- } else {
- auto* last_statement = statements->last();
- auto* as_block = last_statement->AsBlock();
- if (as_block != nullptr) {
- statements = as_block->statements();
- } else {
- if (auto* ret_statement = last_statement->AsReturnStatement()) {
- RECURSE(return_type_ =
- ReturnTypeAnnotations(ret_statement->expression()));
- } else {
- return_type_ = AsmType::Void();
- }
- }
- }
- } while (return_type_ == AsmType::None());
-
- DCHECK(return_type_->IsReturnType());
-
- for (Declaration* decl : *fun->scope()->declarations()) {
- auto* var_decl = decl->AsVariableDeclaration();
- if (var_decl == nullptr) {
- FAIL(decl, "Functions may only define inner variables.");
- }
-
- auto* var_proxy = var_decl->proxy();
- if (var_proxy == nullptr) {
- FAIL(decl, "Invalid local declaration declaration.");
- }
-
- auto* var_info = Lookup(var_proxy->var());
- if (var_info == nullptr || var_info->IsGlobal()) {
- FAIL(decl, "Local variable missing initializer in asm.js module.");
- }
- }
-
- for (; current; current = iter.Next()) {
- AsmType* current_type;
- RECURSE(current_type = ValidateStatement(current));
- }
-
- auto* fun_type = AsmType::Function(zone_, return_type_);
- auto* fun_type_as_function = fun_type->AsFunctionType();
- for (auto* param_type : parameter_types) {
- fun_type_as_function->AddArgument(param_type);
- }
-
- auto* fun_var = fun_decl_proxy->var();
- auto* fun_info = new (zone_) VariableInfo(fun_type);
- fun_info->set_mutability(VariableInfo::kImmutableGlobal);
- auto* old_fun_info = Lookup(fun_var);
- if (old_fun_info == nullptr) {
- if (!ValidAsmIdentifier(fun_var->name())) {
- FAIL(fun_decl_proxy, "Invalid asm.js identifier in function name.");
- }
- if (!AddGlobal(fun_var, fun_info)) {
- DCHECK(false);
- FAIL(fun_decl, "Redeclared global identifier.");
- }
-
- SetTypeOf(fun, fun_type);
- return fun_type;
- }
-
- // Not necessarily an error -- fun_decl might have been used before being
- // defined. If that's the case, then the type in the global environment must
- // be the same as the type inferred by the parameter/return type annotations.
- auto* old_fun_type = old_fun_info->type();
- if (old_fun_type->AsFunctionType() == nullptr) {
- FAIL(fun_decl, "Identifier redefined as function.");
- }
-
- if (!old_fun_info->missing_definition()) {
- FAIL(fun_decl, "Identifier redefined (function name).");
- }
-
- if (!fun_type->IsA(old_fun_type)) {
- FAIL(fun_decl, "Signature mismatch when defining function.");
- }
-
- old_fun_info->MarkDefined();
- SetTypeOf(fun, fun_type);
-
- return fun_type;
-}
-
-// 6.5 ValidateStatement
-AsmType* AsmTyper::ValidateStatement(Statement* statement) {
- switch (statement->node_type()) {
- default:
- FAIL(statement, "Statement type invalid for asm.js.");
- case AstNode::kBlock:
- return ValidateBlockStatement(statement->AsBlock());
- case AstNode::kExpressionStatement:
- return ValidateExpressionStatement(statement->AsExpressionStatement());
- case AstNode::kEmptyStatement:
- return ValidateEmptyStatement(statement->AsEmptyStatement());
- case AstNode::kIfStatement:
- return ValidateIfStatement(statement->AsIfStatement());
- case AstNode::kReturnStatement:
- return ValidateReturnStatement(statement->AsReturnStatement());
- case AstNode::kWhileStatement:
- return ValidateWhileStatement(statement->AsWhileStatement());
- case AstNode::kDoWhileStatement:
- return ValidateDoWhileStatement(statement->AsDoWhileStatement());
- case AstNode::kForStatement:
- return ValidateForStatement(statement->AsForStatement());
- case AstNode::kBreakStatement:
- return ValidateBreakStatement(statement->AsBreakStatement());
- case AstNode::kContinueStatement:
- return ValidateContinueStatement(statement->AsContinueStatement());
- case AstNode::kSwitchStatement:
- return ValidateSwitchStatement(statement->AsSwitchStatement());
- }
-
- return AsmType::Void();
-}
-
-// 6.5.1 BlockStatement
-AsmType* AsmTyper::ValidateBlockStatement(Block* block) {
- FlattenedStatements iter(zone_, block->statements());
-
- while (auto* current = iter.Next()) {
- RECURSE(ValidateStatement(current));
- }
-
- return AsmType::Void();
-}
-
-// 6.5.2 ExpressionStatement
-AsmType* AsmTyper::ValidateExpressionStatement(ExpressionStatement* expr) {
- auto* expression = expr->expression();
- if (auto* call = expression->AsCall()) {
- RECURSE(ValidateCall(AsmType::Void(), call));
- } else {
- RECURSE(ValidateExpression(expression));
- }
-
- return AsmType::Void();
-}
-
-// 6.5.3 EmptyStatement
-AsmType* AsmTyper::ValidateEmptyStatement(EmptyStatement* empty) {
- return AsmType::Void();
-}
-
-// 6.5.4 IfStatement
-AsmType* AsmTyper::ValidateIfStatement(IfStatement* if_stmt) {
- AsmType* cond_type;
- RECURSE(cond_type = ValidateExpression(if_stmt->condition()));
- if (!cond_type->IsA(AsmType::Int())) {
- FAIL(if_stmt->condition(), "If condition must be type int.");
- }
- RECURSE(ValidateStatement(if_stmt->then_statement()));
- RECURSE(ValidateStatement(if_stmt->else_statement()));
- return AsmType::Void();
-}
-
-// 6.5.5 ReturnStatement
-AsmType* AsmTyper::ValidateReturnStatement(ReturnStatement* ret_stmt) {
- AsmType* ret_expr_type = AsmType::Void();
- if (auto* ret_expr = ret_stmt->expression()) {
- RECURSE(ret_expr_type = ValidateExpression(ret_expr));
- if (ret_expr_type == AsmType::Void()) {
- // *VIOLATION* The parser modifies the source code so that expressionless
- // returns will return undefined, so we need to allow that.
- if (!ret_expr->IsUndefinedLiteral()) {
- FAIL(ret_stmt, "Return statement expression can't be void.");
- }
- }
- }
-
- if (!ret_expr_type->IsA(return_type_)) {
- FAIL(ret_stmt, "Type mismatch in return statement.");
- }
-
- return ret_expr_type;
-}
-
-// 6.5.6 IterationStatement
-// 6.5.6.a WhileStatement
-AsmType* AsmTyper::ValidateWhileStatement(WhileStatement* while_stmt) {
- AsmType* cond_type;
- RECURSE(cond_type = ValidateExpression(while_stmt->cond()));
- if (!cond_type->IsA(AsmType::Int())) {
- FAIL(while_stmt->cond(), "While condition must be type int.");
- }
-
- if (auto* body = while_stmt->body()) {
- RECURSE(ValidateStatement(body));
- }
- return AsmType::Void();
-}
-
-// 6.5.6.b DoWhileStatement
-AsmType* AsmTyper::ValidateDoWhileStatement(DoWhileStatement* do_while) {
- AsmType* cond_type;
- RECURSE(cond_type = ValidateExpression(do_while->cond()));
- if (!cond_type->IsA(AsmType::Int())) {
- FAIL(do_while->cond(), "Do {} While condition must be type int.");
- }
-
- if (auto* body = do_while->body()) {
- RECURSE(ValidateStatement(body));
- }
- return AsmType::Void();
-}
-
-// 6.5.6.c ForStatement
-AsmType* AsmTyper::ValidateForStatement(ForStatement* for_stmt) {
- if (auto* init = for_stmt->init()) {
- RECURSE(ValidateStatement(init));
- }
-
- if (auto* cond = for_stmt->cond()) {
- AsmType* cond_type;
- RECURSE(cond_type = ValidateExpression(cond));
- if (!cond_type->IsA(AsmType::Int())) {
- FAIL(cond, "For condition must be type int.");
- }
- }
-
- if (auto* next = for_stmt->next()) {
- RECURSE(ValidateStatement(next));
- }
-
- if (auto* body = for_stmt->body()) {
- RECURSE(ValidateStatement(body));
- }
-
- return AsmType::Void();
-}
-
-// 6.5.7 BreakStatement
-AsmType* AsmTyper::ValidateBreakStatement(BreakStatement* brk_stmt) {
- return AsmType::Void();
-}
-
-// 6.5.8 ContinueStatement
-AsmType* AsmTyper::ValidateContinueStatement(ContinueStatement* cont_stmt) {
- return AsmType::Void();
-}
-
-// 6.5.9 LabelledStatement
-// No need to handle these here -- see the AsmTyper's definition.
-
-// 6.5.10 SwitchStatement
-AsmType* AsmTyper::ValidateSwitchStatement(SwitchStatement* stmt) {
- AsmType* cond_type;
- RECURSE(cond_type = ValidateExpression(stmt->tag()));
- if (!cond_type->IsA(AsmType::Signed())) {
- FAIL(stmt, "Switch tag must be signed.");
- }
-
- int default_pos = kNoSourcePosition;
- int last_case_pos = kNoSourcePosition;
- ZoneSet<int32_t> cases_seen(zone_);
- for (auto* a_case : *stmt->cases()) {
- if (a_case->is_default()) {
- CHECK(default_pos == kNoSourcePosition);
- RECURSE(ValidateDefault(a_case));
- default_pos = a_case->position();
- continue;
- }
-
- if (last_case_pos == kNoSourcePosition) {
- last_case_pos = a_case->position();
- } else {
- last_case_pos = std::max(last_case_pos, a_case->position());
- }
-
- int32_t case_lbl;
- RECURSE(ValidateCase(a_case, &case_lbl));
- auto case_lbl_pos = cases_seen.find(case_lbl);
- if (case_lbl_pos != cases_seen.end() && *case_lbl_pos == case_lbl) {
- FAIL(a_case, "Duplicated case label.");
- }
- cases_seen.insert(case_lbl);
- }
-
- if (!cases_seen.empty()) {
- const int64_t max_lbl = *cases_seen.rbegin();
- const int64_t min_lbl = *cases_seen.begin();
- if (max_lbl - min_lbl > std::numeric_limits<int32_t>::max()) {
- FAIL(stmt, "Out-of-bounds case label range.");
- }
- }
-
- if (last_case_pos != kNoSourcePosition && default_pos != kNoSourcePosition &&
- default_pos < last_case_pos) {
- FAIL(stmt, "Switch default must appear last.");
- }
-
- return AsmType::Void();
-}
-
-// 6.6 ValidateCase
-namespace {
-bool ExtractInt32CaseLabel(CaseClause* clause, int32_t* lbl) {
- auto* lbl_expr = clause->label()->AsLiteral();
-
- if (lbl_expr == nullptr) {
- return false;
- }
-
- if (!IsLiteralInt(lbl_expr)) {
- return false;
- }
-
- return lbl_expr->value()->ToInt32(lbl);
-}
-} // namespace
-
-AsmType* AsmTyper::ValidateCase(CaseClause* label, int32_t* case_lbl) {
- if (!ExtractInt32CaseLabel(label, case_lbl)) {
- FAIL(label, "Case label must be a 32-bit signed integer.");
- }
-
- FlattenedStatements iter(zone_, label->statements());
- while (auto* current = iter.Next()) {
- RECURSE(ValidateStatement(current));
- }
- return AsmType::Void();
-}
-
-// 6.7 ValidateDefault
-AsmType* AsmTyper::ValidateDefault(CaseClause* label) {
- FlattenedStatements iter(zone_, label->statements());
- while (auto* current = iter.Next()) {
- RECURSE(ValidateStatement(current));
- }
- return AsmType::Void();
-}
-
-// 6.8 ValidateExpression
-AsmType* AsmTyper::ValidateExpression(Expression* expr) {
- AsmType* expr_ty = AsmType::None();
-
- switch (expr->node_type()) {
- default:
- FAIL(expr, "Invalid asm.js expression.");
- case AstNode::kLiteral:
- RECURSE(expr_ty = ValidateNumericLiteral(expr->AsLiteral()));
- break;
- case AstNode::kVariableProxy:
- RECURSE(expr_ty = ValidateIdentifier(expr->AsVariableProxy()));
- break;
- case AstNode::kCall:
- RECURSE(expr_ty = ValidateCallExpression(expr->AsCall()));
- break;
- case AstNode::kProperty:
- RECURSE(expr_ty = ValidateMemberExpression(expr->AsProperty()));
- break;
- case AstNode::kAssignment:
- RECURSE(expr_ty = ValidateAssignmentExpression(expr->AsAssignment()));
- break;
- case AstNode::kUnaryOperation:
- RECURSE(expr_ty = ValidateUnaryExpression(expr->AsUnaryOperation()));
- break;
- case AstNode::kConditional:
- RECURSE(expr_ty = ValidateConditionalExpression(expr->AsConditional()));
- break;
- case AstNode::kCompareOperation:
- RECURSE(expr_ty = ValidateCompareOperation(expr->AsCompareOperation()));
- break;
- case AstNode::kBinaryOperation:
- RECURSE(expr_ty = ValidateBinaryOperation(expr->AsBinaryOperation()));
- break;
- }
-
- SetTypeOf(expr, expr_ty);
- return expr_ty;
-}
-
-AsmType* AsmTyper::ValidateCompareOperation(CompareOperation* cmp) {
- switch (cmp->op()) {
- default:
- FAIL(cmp, "Invalid asm.js comparison operator.");
- case Token::LT:
- case Token::LTE:
- case Token::GT:
- case Token::GTE:
- return ValidateRelationalExpression(cmp);
- case Token::EQ:
- case Token::NE:
- return ValidateEqualityExpression(cmp);
- }
-
- UNREACHABLE();
-}
-
-namespace {
-bool IsInvert(BinaryOperation* binop) {
- if (binop->op() != Token::BIT_XOR) {
- return false;
- }
-
- auto* right_as_literal = binop->right()->AsLiteral();
- if (right_as_literal == nullptr) {
- return false;
- }
-
- return IsLiteralMinus1(right_as_literal);
-}
-
-bool IsUnaryMinus(BinaryOperation* binop) {
- // *VIOLATION* The parser replaces uses of -x with x*-1.
- if (binop->op() != Token::MUL) {
- return false;
- }
-
- auto* right_as_literal = binop->right()->AsLiteral();
- if (right_as_literal == nullptr) {
- return false;
- }
-
- return IsLiteralMinus1(right_as_literal);
-}
-} // namespace
-
-AsmType* AsmTyper::ValidateBinaryOperation(BinaryOperation* expr) {
-#define UNOP_OVERLOAD(Src, Dest) \
- do { \
- if (left_type->IsA(AsmType::Src())) { \
- return AsmType::Dest(); \
- } \
- } while (0)
-
- switch (expr->op()) {
- default:
- FAIL(expr, "Invalid asm.js binary expression.");
- case Token::COMMA:
- return ValidateCommaExpression(expr);
- case Token::MUL:
- if (IsDoubleAnnotation(expr)) {
- // *VIOLATION* We can't be 100% sure this really IS a unary + in the asm
- // source so we have to be lenient, and treat this as a unary +.
- if (auto* Call = expr->left()->AsCall()) {
- return ValidateCall(AsmType::Double(), Call);
- }
- AsmType* left_type;
- RECURSE(left_type = ValidateExpression(expr->left()));
- SetTypeOf(expr->right(), AsmType::Double());
- UNOP_OVERLOAD(Signed, Double);
- UNOP_OVERLOAD(Unsigned, Double);
- UNOP_OVERLOAD(DoubleQ, Double);
- UNOP_OVERLOAD(FloatQ, Double);
- FAIL(expr, "Invalid type for conversion to double.");
- }
-
- if (IsUnaryMinus(expr)) {
- // *VIOLATION* the parser converts -x to x * -1.
- AsmType* left_type;
- RECURSE(left_type = ValidateExpression(expr->left()));
- SetTypeOf(expr->right(), left_type);
- UNOP_OVERLOAD(Int, Intish);
- UNOP_OVERLOAD(DoubleQ, Double);
- UNOP_OVERLOAD(FloatQ, Floatish);
- FAIL(expr, "Invalid type for unary -.");
- }
- // FALTHROUGH
- case Token::DIV:
- case Token::MOD:
- return ValidateMultiplicativeExpression(expr);
- case Token::ADD:
- case Token::SUB: {
- static const uint32_t kInitialIntishCount = 0;
- return ValidateAdditiveExpression(expr, kInitialIntishCount);
- }
- case Token::SAR:
- case Token::SHL:
- case Token::SHR:
- return ValidateShiftExpression(expr);
- case Token::BIT_AND:
- return ValidateBitwiseANDExpression(expr);
- case Token::BIT_XOR:
- if (IsInvert(expr)) {
- auto* left = expr->left();
- auto* left_as_binop = left->AsBinaryOperation();
-
- if (left_as_binop != nullptr && IsInvert(left_as_binop)) {
- // This is the special ~~ operator.
- AsmType* left_type;
- RECURSE(left_type = ValidateExpression(left_as_binop->left()));
- SetTypeOf(left_as_binop->right(), AsmType::FixNum());
- SetTypeOf(left_as_binop, AsmType::Signed());
- SetTypeOf(expr->right(), AsmType::FixNum());
- UNOP_OVERLOAD(Double, Signed);
- UNOP_OVERLOAD(FloatQ, Signed);
- FAIL(left_as_binop, "Invalid type for conversion to signed.");
- }
-
- AsmType* left_type;
- RECURSE(left_type = ValidateExpression(left));
- UNOP_OVERLOAD(Intish, Signed);
- FAIL(left, "Invalid type for ~.");
- }
-
- return ValidateBitwiseXORExpression(expr);
- case Token::BIT_OR:
- return ValidateBitwiseORExpression(expr);
- }
-#undef UNOP_OVERLOAD
- UNREACHABLE();
-}
-
-// 6.8.1 Expression
-AsmType* AsmTyper::ValidateCommaExpression(BinaryOperation* comma) {
- // The AST looks like:
- // (expr COMMA (expr COMMA (expr COMMA (... ))))
-
- auto* left = comma->left();
- if (auto* left_as_call = left->AsCall()) {
- RECURSE(ValidateCall(AsmType::Void(), left_as_call));
- } else {
- RECURSE(ValidateExpression(left));
- }
-
- auto* right = comma->right();
- AsmType* right_type = nullptr;
- if (auto* right_as_call = right->AsCall()) {
- RECURSE(right_type = ValidateFloatCoercion(right_as_call));
- if (right_type != AsmType::Float()) {
- // right_type == nullptr <-> right_as_call is not a call to fround.
- DCHECK(right_type == nullptr);
- RECURSE(right_type = ValidateCall(AsmType::Void(), right_as_call));
- // Unnanotated function call to something that's not fround must be a call
- // to a void function.
- DCHECK_EQ(right_type, AsmType::Void());
- }
- } else {
- RECURSE(right_type = ValidateExpression(right));
- }
-
- return right_type;
-}
-
-// 6.8.2 NumericLiteral
-AsmType* AsmTyper::ValidateNumericLiteral(Literal* literal) {
- // *VIOLATION* asm.js does not allow the use of undefined, but our parser
- // inserts them, so we have to handle them.
- if (literal->IsUndefinedLiteral()) {
- return AsmType::Void();
- }
-
- if (IsLiteralDouble(literal)) {
- return AsmType::Double();
- }
-
- // The parser collapses expressions like !0 and !123 to true/false.
- // We therefore need to permit these as alternate versions of 0 / 1.
- if (literal->raw_value()->IsTrue() || literal->raw_value()->IsFalse()) {
- return AsmType::Int();
- }
-
- uint32_t value;
- if (!literal->value()->ToUint32(&value)) {
- int32_t value;
- if (!literal->value()->ToInt32(&value)) {
- FAIL(literal, "Integer literal is out of range.");
- }
- // *VIOLATION* Not really a violation, but rather a difference in
- // validation. The spec handles -NumericLiteral in ValidateUnaryExpression,
- // but V8's AST represents the negative literals as Literals.
- return AsmType::Signed();
- }
-
- if (value <= LargestFixNum) {
- return AsmType::FixNum();
- }
-
- return AsmType::Unsigned();
-}
-
-// 6.8.3 Identifier
-AsmType* AsmTyper::ValidateIdentifier(VariableProxy* proxy) {
- auto* proxy_info = Lookup(proxy->var());
- if (proxy_info == nullptr) {
- FAIL(proxy, "Undeclared identifier.");
- }
- auto* type = proxy_info->type();
- if (type->IsA(AsmType::None()) || type->AsCallableType() != nullptr) {
- FAIL(proxy, "Identifier may not be accessed by ordinary expressions.");
- }
- return type;
-}
-
-// 6.8.4 CallExpression
-AsmType* AsmTyper::ValidateCallExpression(Call* call) {
- AsmType* return_type;
- RECURSE(return_type = ValidateFloatCoercion(call));
- if (return_type == nullptr) {
- FAIL(call, "Unanotated call to a function must be a call to fround.");
- }
- return return_type;
-}
-
-// 6.8.5 MemberExpression
-AsmType* AsmTyper::ValidateMemberExpression(Property* prop) {
- AsmType* return_type;
- RECURSE(return_type = ValidateHeapAccess(prop, LoadFromHeap));
- return return_type;
-}
-
-// 6.8.6 AssignmentExpression
-AsmType* AsmTyper::ValidateAssignmentExpression(Assignment* assignment) {
- AsmType* value_type;
- RECURSE(value_type = ValidateExpression(assignment->value()));
-
- if (assignment->op() == Token::INIT) {
- FAIL(assignment,
- "Local variable declaration must be at the top of the function.");
- }
-
- if (auto* target_as_proxy = assignment->target()->AsVariableProxy()) {
- auto* var = target_as_proxy->var();
- auto* target_info = Lookup(var);
-
- if (target_info == nullptr) {
- if (var->mode() != TEMPORARY) {
- FAIL(target_as_proxy, "Undeclared identifier.");
- }
- // Temporary variables are special: we add them to the local symbol table
- // as we see them, with the exact type of the variable's initializer. This
- // means that temporary variables might have nonsensical types (i.e.,
- // intish, float?, fixnum, and not just the "canonical" types.)
- auto* var_info = new (zone_) VariableInfo(value_type);
- var_info->set_mutability(VariableInfo::kLocal);
- if (!ValidAsmIdentifier(target_as_proxy->name())) {
- FAIL(target_as_proxy,
- "Invalid asm.js identifier in temporary variable.");
- }
-
- if (!AddLocal(var, var_info)) {
- FAIL(assignment, "Failed to add temporary variable to symbol table.");
- }
- return value_type;
- }
-
- if (!target_info->IsMutable()) {
- FAIL(assignment, "Can't assign to immutable symbol.");
- }
-
- DCHECK_NE(AsmType::None(), target_info->type());
- if (!value_type->IsA(target_info->type())) {
- FAIL(assignment, "Type mismatch in assignment.");
- }
-
- return value_type;
- }
-
- if (auto* target_as_property = assignment->target()->AsProperty()) {
- AsmType* allowed_store_types;
- RECURSE(allowed_store_types =
- ValidateHeapAccess(target_as_property, StoreToHeap));
-
- if (!value_type->IsA(allowed_store_types)) {
- FAIL(assignment, "Type mismatch in heap assignment.");
- }
-
- return value_type;
- }
-
- FAIL(assignment, "Invalid asm.js assignment.");
-}
-
-// 6.8.7 UnaryExpression
-AsmType* AsmTyper::ValidateUnaryExpression(UnaryOperation* unop) {
- // *VIOLATION* -NumericLiteral is validated in ValidateLiteral.
- // *VIOLATION* +UnaryExpression is validated in ValidateBinaryOperation.
- // *VIOLATION* ~UnaryOperation is validated in ValidateBinaryOperation.
- // *VIOLATION* ~~UnaryOperation is validated in ValidateBinaryOperation.
- DCHECK(unop->op() != Token::BIT_NOT);
- DCHECK(unop->op() != Token::ADD);
- AsmType* exp_type;
- RECURSE(exp_type = ValidateExpression(unop->expression()));
-#define UNOP_OVERLOAD(Src, Dest) \
- do { \
- if (exp_type->IsA(AsmType::Src())) { \
- return AsmType::Dest(); \
- } \
- } while (0)
-
- // 8.1 Unary Operators
- switch (unop->op()) {
- default:
- FAIL(unop, "Invalid unary operator.");
- case Token::ADD:
- // We can't test this because of the +x -> x * 1.0 transformation.
- DCHECK(false);
- UNOP_OVERLOAD(Signed, Double);
- UNOP_OVERLOAD(Unsigned, Double);
- UNOP_OVERLOAD(DoubleQ, Double);
- UNOP_OVERLOAD(FloatQ, Double);
- FAIL(unop, "Invalid type for unary +.");
- case Token::SUB:
- // We can't test this because of the -x -> x * -1.0 transformation.
- DCHECK(false);
- UNOP_OVERLOAD(Int, Intish);
- UNOP_OVERLOAD(DoubleQ, Double);
- UNOP_OVERLOAD(FloatQ, Floatish);
- FAIL(unop, "Invalid type for unary -.");
- case Token::BIT_NOT:
- // We can't test this because of the ~x -> x ^ -1 transformation.
- DCHECK(false);
- UNOP_OVERLOAD(Intish, Signed);
- FAIL(unop, "Invalid type for ~.");
- case Token::NOT:
- UNOP_OVERLOAD(Int, Int);
- FAIL(unop, "Invalid type for !.");
- }
-
-#undef UNOP_OVERLOAD
-
- UNREACHABLE();
-}
-
-// 6.8.8 MultiplicativeExpression
-namespace {
-bool IsIntishLiteralFactor(Expression* expr, int32_t* factor) {
- auto* literal = expr->AsLiteral();
- if (literal == nullptr) {
- return false;
- }
-
- if (!IsLiteralInt(literal)) {
- return false;
- }
-
- if (!literal->value()->ToInt32(factor)) {
- return false;
- }
- static const int32_t kIntishBound = 1 << 20;
- return -kIntishBound < *factor && *factor < kIntishBound;
-}
-} // namespace
-
-AsmType* AsmTyper::ValidateMultiplicativeExpression(BinaryOperation* binop) {
- DCHECK(!IsDoubleAnnotation(binop));
-
- auto* left = binop->left();
- auto* right = binop->right();
-
- bool intish_mul_failed = false;
- if (binop->op() == Token::MUL) {
- int32_t factor;
- if (IsIntishLiteralFactor(left, &factor)) {
- AsmType* right_type;
- RECURSE(right_type = ValidateExpression(right));
- if (right_type->IsA(AsmType::Int())) {
- return AsmType::Intish();
- }
- // Can't fail here, because the rhs might contain a valid intish factor.
- //
- // The solution is to flag that there was an error, and later on -- when
- // both lhs and rhs are evaluated -- complain.
- intish_mul_failed = true;
- }
-
- if (IsIntishLiteralFactor(right, &factor)) {
- AsmType* left_type;
- RECURSE(left_type = ValidateExpression(left));
- if (left_type->IsA(AsmType::Int())) {
- // *VIOLATION* This will also (and correctly) handle -X, when X is an
- // integer. Therefore, we don't need to handle this case within the if
- // block below.
- return AsmType::Intish();
- }
- intish_mul_failed = true;
-
- if (factor == -1) {
- // *VIOLATION* The frontend transforms -x into x * -1 (not -1.0, because
- // consistency is overrated.)
- if (left_type->IsA(AsmType::DoubleQ())) {
- return AsmType::Double();
- } else if (left_type->IsA(AsmType::FloatQ())) {
- return AsmType::Floatish();
- }
- }
- }
- }
-
- if (intish_mul_failed) {
- FAIL(binop, "Invalid types for intish * (or unary -).");
- }
-
- AsmType* left_type;
- AsmType* right_type;
- RECURSE(left_type = ValidateExpression(left));
- RECURSE(right_type = ValidateExpression(right));
-
-#define BINOP_OVERLOAD(Src0, Src1, Dest) \
- do { \
- if (left_type->IsA(AsmType::Src0()) && right_type->IsA(AsmType::Src1())) { \
- return AsmType::Dest(); \
- } \
- } while (0)
- switch (binop->op()) {
- default:
- FAIL(binop, "Invalid multiplicative expression.");
- case Token::MUL:
- BINOP_OVERLOAD(DoubleQ, DoubleQ, Double);
- BINOP_OVERLOAD(FloatQ, FloatQ, Floatish);
- FAIL(binop, "Invalid operands for *.");
- case Token::DIV:
- BINOP_OVERLOAD(Signed, Signed, Intish);
- BINOP_OVERLOAD(Unsigned, Unsigned, Intish);
- BINOP_OVERLOAD(DoubleQ, DoubleQ, Double);
- BINOP_OVERLOAD(FloatQ, FloatQ, Floatish);
- FAIL(binop, "Invalid operands for /.");
- case Token::MOD:
- BINOP_OVERLOAD(Signed, Signed, Intish);
- BINOP_OVERLOAD(Unsigned, Unsigned, Intish);
- BINOP_OVERLOAD(DoubleQ, DoubleQ, Double);
- FAIL(binop, "Invalid operands for %.");
- }
-#undef BINOP_OVERLOAD
-
- UNREACHABLE();
-}
-
-// 6.8.9 AdditiveExpression
-AsmType* AsmTyper::ValidateAdditiveExpression(BinaryOperation* binop,
- uint32_t intish_count) {
- static const uint32_t kMaxIntish = 1 << 20;
-
- auto* left = binop->left();
- auto* left_as_binop = left->AsBinaryOperation();
- AsmType* left_type;
-
- // TODO(jpp): maybe use an iterative approach instead of the recursion to
- // ValidateAdditiveExpression.
- if (left_as_binop != nullptr && (left_as_binop->op() == Token::ADD ||
- left_as_binop->op() == Token::SUB)) {
- RECURSE(left_type =
- ValidateAdditiveExpression(left_as_binop, intish_count + 1));
- SetTypeOf(left_as_binop, left_type);
- } else {
- RECURSE(left_type = ValidateExpression(left));
- }
-
- auto* right = binop->right();
- auto* right_as_binop = right->AsBinaryOperation();
- AsmType* right_type;
-
- if (right_as_binop != nullptr && (right_as_binop->op() == Token::ADD ||
- right_as_binop->op() == Token::SUB)) {
- RECURSE(right_type =
- ValidateAdditiveExpression(right_as_binop, intish_count + 1));
- SetTypeOf(right_as_binop, right_type);
- } else {
- RECURSE(right_type = ValidateExpression(right));
- }
-
- if (left_type->IsA(AsmType::FloatQ()) && right_type->IsA(AsmType::FloatQ())) {
- return AsmType::Floatish();
- }
-
- if (left_type->IsA(AsmType::Int()) && right_type->IsA(AsmType::Int())) {
- if (intish_count == 0) {
- return AsmType::Intish();
- }
- if (intish_count < kMaxIntish) {
- return AsmType::Int();
- }
- FAIL(binop, "Too many uncoerced integer additive expressions.");
- }
-
- if (left_type->IsA(AsmType::Double()) && right_type->IsA(AsmType::Double())) {
- return AsmType::Double();
- }
-
- if (binop->op() == Token::SUB) {
- if (left_type->IsA(AsmType::DoubleQ()) &&
- right_type->IsA(AsmType::DoubleQ())) {
- return AsmType::Double();
- }
- }
-
- FAIL(binop, "Invalid operands for additive expression.");
-}
-
-// 6.8.10 ShiftExpression
-AsmType* AsmTyper::ValidateShiftExpression(BinaryOperation* binop) {
- auto* left = binop->left();
- auto* right = binop->right();
-
- AsmType* left_type;
- AsmType* right_type;
- RECURSE(left_type = ValidateExpression(left));
- RECURSE(right_type = ValidateExpression(right));
-
-#define BINOP_OVERLOAD(Src0, Src1, Dest) \
- do { \
- if (left_type->IsA(AsmType::Src0()) && right_type->IsA(AsmType::Src1())) { \
- return AsmType::Dest(); \
- } \
- } while (0)
- switch (binop->op()) {
- default:
- FAIL(binop, "Invalid shift expression.");
- case Token::SHL:
- BINOP_OVERLOAD(Intish, Intish, Signed);
- FAIL(binop, "Invalid operands for <<.");
- case Token::SAR:
- BINOP_OVERLOAD(Intish, Intish, Signed);
- FAIL(binop, "Invalid operands for >>.");
- case Token::SHR:
- BINOP_OVERLOAD(Intish, Intish, Unsigned);
- FAIL(binop, "Invalid operands for >>>.");
- }
-#undef BINOP_OVERLOAD
-
- UNREACHABLE();
-}
-
-// 6.8.11 RelationalExpression
-AsmType* AsmTyper::ValidateRelationalExpression(CompareOperation* cmpop) {
- auto* left = cmpop->left();
- auto* right = cmpop->right();
-
- AsmType* left_type;
- AsmType* right_type;
- RECURSE(left_type = ValidateExpression(left));
- RECURSE(right_type = ValidateExpression(right));
-
-#define CMPOP_OVERLOAD(Src0, Src1, Dest) \
- do { \
- if (left_type->IsA(AsmType::Src0()) && right_type->IsA(AsmType::Src1())) { \
- return AsmType::Dest(); \
- } \
- } while (0)
- switch (cmpop->op()) {
- default:
- FAIL(cmpop, "Invalid relational expression.");
- case Token::LT:
- CMPOP_OVERLOAD(Signed, Signed, Int);
- CMPOP_OVERLOAD(Unsigned, Unsigned, Int);
- CMPOP_OVERLOAD(Float, Float, Int);
- CMPOP_OVERLOAD(Double, Double, Int);
- FAIL(cmpop, "Invalid operands for <.");
- case Token::GT:
- CMPOP_OVERLOAD(Signed, Signed, Int);
- CMPOP_OVERLOAD(Unsigned, Unsigned, Int);
- CMPOP_OVERLOAD(Float, Float, Int);
- CMPOP_OVERLOAD(Double, Double, Int);
- FAIL(cmpop, "Invalid operands for >.");
- case Token::LTE:
- CMPOP_OVERLOAD(Signed, Signed, Int);
- CMPOP_OVERLOAD(Unsigned, Unsigned, Int);
- CMPOP_OVERLOAD(Float, Float, Int);
- CMPOP_OVERLOAD(Double, Double, Int);
- FAIL(cmpop, "Invalid operands for <=.");
- case Token::GTE:
- CMPOP_OVERLOAD(Signed, Signed, Int);
- CMPOP_OVERLOAD(Unsigned, Unsigned, Int);
- CMPOP_OVERLOAD(Float, Float, Int);
- CMPOP_OVERLOAD(Double, Double, Int);
- FAIL(cmpop, "Invalid operands for >=.");
- }
-#undef CMPOP_OVERLOAD
-
- UNREACHABLE();
-}
-
-// 6.8.12 EqualityExpression
-AsmType* AsmTyper::ValidateEqualityExpression(CompareOperation* cmpop) {
- auto* left = cmpop->left();
- auto* right = cmpop->right();
-
- AsmType* left_type;
- AsmType* right_type;
- RECURSE(left_type = ValidateExpression(left));
- RECURSE(right_type = ValidateExpression(right));
-
-#define CMPOP_OVERLOAD(Src0, Src1, Dest) \
- do { \
- if (left_type->IsA(AsmType::Src0()) && right_type->IsA(AsmType::Src1())) { \
- return AsmType::Dest(); \
- } \
- } while (0)
- switch (cmpop->op()) {
- default:
- FAIL(cmpop, "Invalid equality expression.");
- case Token::EQ:
- CMPOP_OVERLOAD(Signed, Signed, Int);
- CMPOP_OVERLOAD(Unsigned, Unsigned, Int);
- CMPOP_OVERLOAD(Float, Float, Int);
- CMPOP_OVERLOAD(Double, Double, Int);
- FAIL(cmpop, "Invalid operands for ==.");
- case Token::NE:
- CMPOP_OVERLOAD(Signed, Signed, Int);
- CMPOP_OVERLOAD(Unsigned, Unsigned, Int);
- CMPOP_OVERLOAD(Float, Float, Int);
- CMPOP_OVERLOAD(Double, Double, Int);
- FAIL(cmpop, "Invalid operands for !=.");
- }
-#undef CMPOP_OVERLOAD
-
- UNREACHABLE();
-}
-
-// 6.8.13 BitwiseANDExpression
-AsmType* AsmTyper::ValidateBitwiseANDExpression(BinaryOperation* binop) {
- auto* left = binop->left();
- auto* right = binop->right();
-
- AsmType* left_type;
- AsmType* right_type;
- RECURSE(left_type = ValidateExpression(left));
- RECURSE(right_type = ValidateExpression(right));
-
- if (binop->op() != Token::BIT_AND) {
- FAIL(binop, "Invalid & expression.");
- }
-
-#define BINOP_OVERLOAD(Src0, Src1, Dest) \
- do { \
- if (left_type->IsA(AsmType::Src0()) && right_type->IsA(AsmType::Src1())) { \
- return AsmType::Dest(); \
- } \
- } while (0)
- BINOP_OVERLOAD(Intish, Intish, Signed);
- FAIL(binop, "Invalid operands for &.");
-#undef BINOP_OVERLOAD
-
- UNREACHABLE();
-}
-
-// 6.8.14 BitwiseXORExpression
-AsmType* AsmTyper::ValidateBitwiseXORExpression(BinaryOperation* binop) {
- auto* left = binop->left();
- auto* right = binop->right();
-
- AsmType* left_type;
- AsmType* right_type;
- RECURSE(left_type = ValidateExpression(left));
- RECURSE(right_type = ValidateExpression(right));
-
- if (binop->op() != Token::BIT_XOR) {
- FAIL(binop, "Invalid ^ expression.");
- }
-
-#define BINOP_OVERLOAD(Src0, Src1, Dest) \
- do { \
- if (left_type->IsA(AsmType::Src0()) && right_type->IsA(AsmType::Src1())) { \
- return AsmType::Dest(); \
- } \
- } while (0)
- BINOP_OVERLOAD(Intish, Intish, Signed);
- FAIL(binop, "Invalid operands for ^.");
-#undef BINOP_OVERLOAD
-
- UNREACHABLE();
-}
-
-// 6.8.15 BitwiseORExpression
-AsmType* AsmTyper::ValidateBitwiseORExpression(BinaryOperation* binop) {
- auto* left = binop->left();
- if (IsIntAnnotation(binop)) {
- if (auto* left_as_call = left->AsCall()) {
- AsmType* type;
- RECURSE(type = ValidateCall(AsmType::Signed(), left_as_call));
- return type;
- }
- AsmType* left_type;
- RECURSE(left_type = ValidateExpression(left));
- if (!left_type->IsA(AsmType::Intish())) {
- FAIL(left, "Left side of |0 annotation must be intish.");
- }
- return AsmType::Signed();
- }
-
- auto* right = binop->right();
- AsmType* left_type;
- AsmType* right_type;
- RECURSE(left_type = ValidateExpression(left));
- RECURSE(right_type = ValidateExpression(right));
-
- if (binop->op() != Token::BIT_OR) {
- FAIL(binop, "Invalid | expression.");
- }
-
-#define BINOP_OVERLOAD(Src0, Src1, Dest) \
- do { \
- if (left_type->IsA(AsmType::Src0()) && right_type->IsA(AsmType::Src1())) { \
- return AsmType::Dest(); \
- } \
- } while (0)
- BINOP_OVERLOAD(Intish, Intish, Signed);
- FAIL(binop, "Invalid operands for |.");
-#undef BINOP_OVERLOAD
-
- UNREACHABLE();
-}
-
-// 6.8.16 ConditionalExpression
-AsmType* AsmTyper::ValidateConditionalExpression(Conditional* cond) {
- AsmType* cond_type;
- RECURSE(cond_type = ValidateExpression(cond->condition()));
- if (!cond_type->IsA(AsmType::Int())) {
- FAIL(cond, "Ternary operation condition should be int.");
- }
-
- AsmType* then_type;
- RECURSE(then_type = ValidateExpression(cond->then_expression()));
- AsmType* else_type;
- RECURSE(else_type = ValidateExpression(cond->else_expression()));
-
-#define SUCCEED_IF_BOTH_ARE(type) \
- do { \
- if (then_type->IsA(AsmType::type())) { \
- if (!else_type->IsA(AsmType::type())) { \
- FAIL(cond, "Type mismatch for ternary operation result type."); \
- } \
- return AsmType::type(); \
- } \
- } while (0)
- SUCCEED_IF_BOTH_ARE(Int);
- SUCCEED_IF_BOTH_ARE(Float);
- SUCCEED_IF_BOTH_ARE(Double);
-#undef SUCCEED_IF_BOTH_ARE
-
- FAIL(cond, "Ternary operator must return int, float, or double.");
-}
-
-// 6.9 ValidateCall
-namespace {
-bool ExtractIndirectCallMask(Expression* expr, uint32_t* value) {
- auto* as_literal = expr->AsLiteral();
- if (as_literal == nullptr) {
- return false;
- }
-
- if (!IsLiteralInt(as_literal)) {
- return false;
- }
-
- if (!as_literal->value()->ToUint32(value)) {
- return false;
- }
-
- return base::bits::IsPowerOfTwo32(1 + *value);
-}
-} // namespace
-
-AsmType* AsmTyper::ValidateCall(AsmType* return_type, Call* call) {
- AsmType* float_coercion_type;
- RECURSE(float_coercion_type = ValidateFloatCoercion(call));
- if (float_coercion_type == AsmType::Float()) {
- SetTypeOf(call, AsmType::Float());
- return return_type;
- }
-
- // TODO(jpp): we should be able to reuse the args vector's storage space.
- ZoneVector<AsmType*> args(zone_);
- args.reserve(call->arguments()->length());
-
- for (auto* arg : *call->arguments()) {
- AsmType* arg_type;
- RECURSE(arg_type = ValidateExpression(arg));
- args.emplace_back(arg_type);
- }
-
- auto* call_expr = call->expression();
-
- // identifier(Expression...)
- if (auto* call_var_proxy = call_expr->AsVariableProxy()) {
- auto* call_var_info = Lookup(call_var_proxy->var());
-
- if (call_var_info == nullptr) {
- // We can't fail here: the validator performs a single pass over the AST,
- // so it is possible for some calls to be currently unresolved. We eagerly
- // add the function to the table of globals.
- auto* call_type = AsmType::Function(zone_, return_type)->AsFunctionType();
- for (auto* arg : args) {
- call_type->AddArgument(arg->ToParameterType());
- }
- auto* fun_info =
- new (zone_) VariableInfo(reinterpret_cast<AsmType*>(call_type));
- fun_info->set_mutability(VariableInfo::kImmutableGlobal);
- AddForwardReference(call_var_proxy, fun_info);
- if (!ValidAsmIdentifier(call_var_proxy->name())) {
- FAIL(call_var_proxy,
- "Invalid asm.js identifier in (forward) function name.");
- }
- if (!AddGlobal(call_var_proxy->var(), fun_info)) {
- DCHECK(false);
- FAIL(call, "Redeclared global identifier.");
- }
- if (call->GetCallType() != Call::OTHER_CALL) {
- FAIL(call, "Invalid call of existing global function.");
- }
- SetTypeOf(call_var_proxy, reinterpret_cast<AsmType*>(call_type));
- SetTypeOf(call, return_type);
- return return_type;
- }
-
- auto* callee_type = call_var_info->type()->AsCallableType();
- if (callee_type == nullptr) {
- FAIL(call, "Calling something that's not a function.");
- }
-
- if (callee_type->AsFFIType() != nullptr) {
- if (return_type == AsmType::Float()) {
- FAIL(call, "Foreign functions can't return float.");
- }
- // Record FFI use signature, since the asm->wasm translator must know
- // all uses up-front.
- ffi_use_signatures_.emplace_back(
- FFIUseSignature(call_var_proxy->var(), zone_));
- FFIUseSignature* sig = &ffi_use_signatures_.back();
- sig->return_type_ = return_type;
- sig->arg_types_.reserve(args.size());
- for (size_t i = 0; i < args.size(); ++i) {
- sig->arg_types_.emplace_back(args[i]);
- }
- }
-
- if (!callee_type->CanBeInvokedWith(return_type, args)) {
- FAIL(call, "Function invocation does not match function type.");
- }
-
- if (call->GetCallType() != Call::OTHER_CALL) {
- FAIL(call, "Invalid forward call of global function.");
- }
-
- SetTypeOf(call_var_proxy, call_var_info->type());
- SetTypeOf(call, return_type);
- return return_type;
- }
-
- // identifier[expr & n](Expression...)
- if (auto* call_property = call_expr->AsProperty()) {
- auto* index = call_property->key()->AsBinaryOperation();
- if (index == nullptr || index->op() != Token::BIT_AND) {
- FAIL(call_property->key(),
- "Indirect call index must be in the expr & mask form.");
- }
-
- auto* left = index->left();
- auto* right = index->right();
- uint32_t mask;
- if (!ExtractIndirectCallMask(right, &mask)) {
- if (!ExtractIndirectCallMask(left, &mask)) {
- FAIL(right, "Invalid indirect call mask.");
- } else {
- left = right;
- }
- }
- const uint32_t table_length = mask + 1;
-
- AsmType* left_type;
- RECURSE(left_type = ValidateExpression(left));
- if (!left_type->IsA(AsmType::Intish())) {
- FAIL(left, "Indirect call index should be an intish.");
- }
-
- auto* name_var = call_property->obj()->AsVariableProxy();
-
- if (name_var == nullptr) {
- FAIL(call_property, "Invalid call.");
- }
-
- auto* name_info = Lookup(name_var->var());
- if (name_info == nullptr) {
- // We can't fail here -- just like above.
- auto* call_type = AsmType::Function(zone_, return_type)->AsFunctionType();
- for (auto* arg : args) {
- call_type->AddArgument(arg->ToParameterType());
- }
- auto* table_type = AsmType::FunctionTableType(
- zone_, table_length, reinterpret_cast<AsmType*>(call_type));
- auto* fun_info =
- new (zone_) VariableInfo(reinterpret_cast<AsmType*>(table_type));
- fun_info->set_mutability(VariableInfo::kImmutableGlobal);
- AddForwardReference(name_var, fun_info);
- if (!ValidAsmIdentifier(name_var->name())) {
- FAIL(name_var,
- "Invalid asm.js identifier in (forward) function table name.");
- }
- if (!AddGlobal(name_var->var(), fun_info)) {
- DCHECK(false);
- FAIL(call, "Redeclared global identifier.");
- }
- if (call->GetCallType() != Call::KEYED_PROPERTY_CALL) {
- FAIL(call, "Invalid call of existing function table.");
- }
- SetTypeOf(call_property, reinterpret_cast<AsmType*>(call_type));
- SetTypeOf(call, return_type);
- return return_type;
- }
-
- auto* previous_type = name_info->type()->AsFunctionTableType();
- if (previous_type == nullptr) {
- FAIL(call, "Identifier does not name a function table.");
- }
-
- if (table_length != previous_type->length()) {
- FAIL(call, "Function table size does not match expected size.");
- }
-
- auto* previous_type_signature =
- previous_type->signature()->AsFunctionType();
- DCHECK(previous_type_signature != nullptr);
- if (!previous_type_signature->CanBeInvokedWith(return_type, args)) {
- // TODO(jpp): better error messages.
- FAIL(call,
- "Function pointer table signature does not match previous "
- "signature.");
- }
-
- if (call->GetCallType() != Call::KEYED_PROPERTY_CALL) {
- FAIL(call, "Invalid forward call of function table.");
- }
- SetTypeOf(call_property, previous_type->signature());
- SetTypeOf(call, return_type);
- return return_type;
- }
-
- FAIL(call, "Invalid call.");
-}
-
-// 6.10 ValidateHeapAccess
-namespace {
-bool ExtractHeapAccessShift(Expression* expr, uint32_t* value) {
- auto* as_literal = expr->AsLiteral();
- if (as_literal == nullptr) {
- return false;
- }
-
- if (!IsLiteralInt(as_literal)) {
- return false;
- }
-
- return as_literal->value()->ToUint32(value);
-}
-
-// Returns whether index is too large to access a heap with the given type.
-bool LiteralIndexOutOfBounds(AsmType* obj_type, uint32_t index) {
- switch (obj_type->ElementSizeInBytes()) {
- case 1:
- return false;
- case 2:
- return (index & 0x80000000u) != 0;
- case 4:
- return (index & 0xC0000000u) != 0;
- case 8:
- return (index & 0xE0000000u) != 0;
- }
- UNREACHABLE();
- return true;
-}
-
-} // namespace
-
-AsmType* AsmTyper::ValidateHeapAccess(Property* heap,
- HeapAccessType access_type) {
- auto* obj = heap->obj()->AsVariableProxy();
- if (obj == nullptr) {
- FAIL(heap, "Invalid heap access.");
- }
-
- auto* obj_info = Lookup(obj->var());
- if (obj_info == nullptr) {
- FAIL(heap, "Undeclared identifier in heap access.");
- }
-
- auto* obj_type = obj_info->type();
- if (!obj_type->IsA(AsmType::Heap())) {
- FAIL(heap, "Identifier does not represent a heap view.");
- }
- SetTypeOf(obj, obj_type);
-
- if (auto* key_as_literal = heap->key()->AsLiteral()) {
- if (!IsLiteralInt(key_as_literal)) {
- FAIL(key_as_literal, "Heap access index must be int.");
- }
-
- uint32_t index;
- if (!key_as_literal->value()->ToUint32(&index)) {
- FAIL(key_as_literal,
- "Heap access index must be a 32-bit unsigned integer.");
- }
-
- if (LiteralIndexOutOfBounds(obj_type, index)) {
- FAIL(key_as_literal, "Heap access index is out of bounds");
- }
-
- if (access_type == LoadFromHeap) {
- return obj_type->LoadType();
- }
- return obj_type->StoreType();
- }
-
- if (auto* key_as_binop = heap->key()->AsBinaryOperation()) {
- uint32_t shift;
- if (key_as_binop->op() == Token::SAR &&
- ExtractHeapAccessShift(key_as_binop->right(), &shift) &&
- (1 << shift) == obj_type->ElementSizeInBytes()) {
- AsmType* type;
- RECURSE(type = ValidateExpression(key_as_binop->left()));
- if (type->IsA(AsmType::Intish())) {
- if (access_type == LoadFromHeap) {
- return obj_type->LoadType();
- }
- return obj_type->StoreType();
- }
- FAIL(key_as_binop, "Invalid heap access index.");
- }
- }
-
- if (obj_type->ElementSizeInBytes() == 1) {
- // Leniency: if this is a byte array, we don't require the shift operation
- // to be present.
- AsmType* index_type;
- RECURSE(index_type = ValidateExpression(heap->key()));
- if (!index_type->IsA(AsmType::Int())) {
- FAIL(heap, "Invalid heap access index for byte array.");
- }
- if (access_type == LoadFromHeap) {
- return obj_type->LoadType();
- }
- return obj_type->StoreType();
- }
-
- FAIL(heap, "Invalid heap access index.");
-}
-
-// 6.11 ValidateFloatCoercion
-bool AsmTyper::IsCallToFround(Call* call) {
- if (call->arguments()->length() != 1) {
- return false;
- }
-
- auto* call_var_proxy = call->expression()->AsVariableProxy();
- if (call_var_proxy == nullptr) {
- return false;
- }
-
- auto* call_var_info = Lookup(call_var_proxy->var());
- if (call_var_info == nullptr) {
- return false;
- }
-
- return call_var_info->standard_member() == kMathFround;
-}
-
-AsmType* AsmTyper::ValidateFloatCoercion(Call* call) {
- if (!IsCallToFround(call)) {
- return nullptr;
- }
-
- auto* arg = call->arguments()->at(0);
- // call is a fround() node. From now, there can be two possible outcomes:
- // 1. fround is used as a return type annotation.
- if (auto* arg_as_call = arg->AsCall()) {
- RECURSE(ValidateCall(AsmType::Float(), arg_as_call));
- return AsmType::Float();
- }
-
- // 2. fround is used for converting to float.
- AsmType* arg_type;
- RECURSE(arg_type = ValidateExpression(arg));
- if (arg_type->IsA(AsmType::Floatish()) || arg_type->IsA(AsmType::DoubleQ()) ||
- arg_type->IsA(AsmType::Signed()) || arg_type->IsA(AsmType::Unsigned())) {
- SetTypeOf(call->expression(), fround_type_);
- return AsmType::Float();
- }
-
- FAIL(call, "Invalid argument type to fround.");
-}
-
-// 5.1 ParameterTypeAnnotations
-AsmType* AsmTyper::ParameterTypeAnnotations(Variable* parameter,
- Expression* annotation) {
- if (auto* binop = annotation->AsBinaryOperation()) {
- // Must be:
- // * x|0
- // * x*1 (*VIOLATION* i.e.,, +x)
- auto* left = binop->left()->AsVariableProxy();
- if (left == nullptr) {
- FAIL(
- binop->left(),
- "Invalid parameter type annotation - should annotate an identifier.");
- }
- if (left->var() != parameter) {
- FAIL(binop->left(),
- "Invalid parameter type annotation - should annotate a parameter.");
- }
- if (IsDoubleAnnotation(binop)) {
- SetTypeOf(left, AsmType::Double());
- return AsmType::Double();
- }
- if (IsIntAnnotation(binop)) {
- SetTypeOf(left, AsmType::Int());
- return AsmType::Int();
- }
- FAIL(binop, "Invalid parameter type annotation.");
- }
-
- auto* call = annotation->AsCall();
- if (call == nullptr) {
- FAIL(
- annotation,
- "Invalid float parameter type annotation - must be fround(parameter).");
- }
-
- if (!IsCallToFround(call)) {
- FAIL(annotation,
- "Invalid float parameter type annotation - must be call to fround.");
- }
-
- auto* src_expr = call->arguments()->at(0)->AsVariableProxy();
- if (src_expr == nullptr) {
- FAIL(annotation,
- "Invalid float parameter type annotation - argument to fround is not "
- "an identifier.");
- }
-
- if (src_expr->var() != parameter) {
- FAIL(annotation,
- "Invalid float parameter type annotation - argument to fround is not "
- "a parameter.");
- }
-
- SetTypeOf(src_expr, AsmType::Float());
- return AsmType::Float();
-}
-
-// 5.2 ReturnTypeAnnotations
-AsmType* AsmTyper::ReturnTypeAnnotations(Expression* ret_expr) {
- DCHECK_NOT_NULL(ret_expr);
-
- if (auto* binop = ret_expr->AsBinaryOperation()) {
- if (IsDoubleAnnotation(binop)) {
- return AsmType::Double();
- } else if (IsIntAnnotation(binop)) {
- return AsmType::Signed();
- }
- FAIL(ret_expr, "Invalid return type annotation.");
- }
-
- if (auto* call = ret_expr->AsCall()) {
- if (IsCallToFround(call)) {
- return AsmType::Float();
- }
- FAIL(ret_expr, "Invalid function call in return statement.");
- }
-
- if (auto* literal = ret_expr->AsLiteral()) {
- int32_t _;
- if (IsLiteralDouble(literal)) {
- return AsmType::Double();
- } else if (IsLiteralInt(literal) && literal->value()->ToInt32(&_)) {
- return AsmType::Signed();
- } else if (literal->IsUndefinedLiteral()) {
- // *VIOLATION* The parser changes
- //
- // return;
- //
- // into
- //
- // return undefined
- return AsmType::Void();
- }
- FAIL(ret_expr, "Invalid literal in return statement.");
- }
-
- if (auto* proxy = ret_expr->AsVariableProxy()) {
- auto* var_info = Lookup(proxy->var());
-
- if (var_info == nullptr) {
- FAIL(ret_expr, "Undeclared identifier in return statement.");
- }
-
- if (var_info->mutability() != VariableInfo::kConstGlobal) {
- FAIL(ret_expr, "Identifier in return statement is not const.");
- }
-
- if (!var_info->type()->IsReturnType()) {
- FAIL(ret_expr, "Constant in return must be signed, float, or double.");
- }
-
- return var_info->type();
- }
-
- // NOTE: This is not strictly valid asm.js, but is emitted by some versions of
- // Emscripten.
- if (auto* cond = ret_expr->AsConditional()) {
- AsmType* a = AsmType::None();
- AsmType* b = AsmType::None();
- RECURSE(a = ReturnTypeAnnotations(cond->then_expression()));
- if (a->IsA(AsmType::None())) {
- return a;
- }
- RECURSE(b = ReturnTypeAnnotations(cond->else_expression()));
- if (b->IsA(AsmType::None())) {
- return b;
- }
- if (a->IsExactly(b)) {
- return a;
- }
- }
-
- FAIL(ret_expr, "Invalid return type expression.");
-}
-
-// 5.4 VariableTypeAnnotations
-// Also used for 5.5 GlobalVariableTypeAnnotations
-AsmType* AsmTyper::VariableTypeAnnotations(
- Expression* initializer, VariableInfo::Mutability mutability_type) {
- if (auto* literal = initializer->AsLiteral()) {
- if (IsLiteralDouble(literal)) {
- SetTypeOf(initializer, AsmType::Double());
- return AsmType::Double();
- }
- if (!IsLiteralInt(literal)) {
- FAIL(initializer, "Invalid type annotation - forbidden literal.");
- }
- int32_t i32;
- uint32_t u32;
- AsmType* initializer_type = nullptr;
- if (literal->value()->ToUint32(&u32)) {
- if (u32 > LargestFixNum) {
- initializer_type = AsmType::Unsigned();
- SetTypeOf(initializer, initializer_type);
- } else {
- initializer_type = AsmType::FixNum();
- SetTypeOf(initializer, initializer_type);
- initializer_type = AsmType::Signed();
- }
- } else if (literal->value()->ToInt32(&i32)) {
- initializer_type = AsmType::Signed();
- SetTypeOf(initializer, initializer_type);
- } else {
- FAIL(initializer, "Invalid type annotation - forbidden literal.");
- }
- if (mutability_type != VariableInfo::kConstGlobal) {
- return AsmType::Int();
- }
- return initializer_type;
- }
-
- if (auto* proxy = initializer->AsVariableProxy()) {
- auto* var_info = Lookup(proxy->var());
-
- if (var_info == nullptr) {
- FAIL(initializer,
- "Undeclared identifier in variable declaration initializer.");
- }
-
- if (var_info->mutability() != VariableInfo::kConstGlobal) {
- FAIL(initializer,
- "Identifier in variable declaration initializer must be const.");
- }
-
- SetTypeOf(initializer, var_info->type());
- return var_info->type();
- }
-
- auto* call = initializer->AsCall();
- if (call == nullptr) {
- FAIL(initializer,
- "Invalid variable initialization - it should be a literal, const, or "
- "fround(literal).");
- }
-
- if (!IsCallToFround(call)) {
- FAIL(initializer,
- "Invalid float coercion - expected call fround(literal).");
- }
-
- auto* src_expr = call->arguments()->at(0)->AsLiteral();
- if (src_expr == nullptr) {
- FAIL(initializer,
- "Invalid float type annotation - expected literal argument for call "
- "to fround.");
- }
-
- // ERRATA: 5.4
- // According to the spec: float constants must contain dots in local,
- // but not in globals.
- // However, the errata doc (and actual programs), use integer values
- // with fround(..).
- // Skipping the check that would go here to enforce this.
- // Checking instead the literal expression is at least a number.
- if (!src_expr->raw_value()->IsNumber()) {
- FAIL(initializer,
- "Invalid float type annotation - expected numeric literal for call "
- "to fround.");
- }
-
- return AsmType::Float();
-}
-
-// 5.5 GlobalVariableTypeAnnotations
-AsmType* AsmTyper::NewHeapView(CallNew* new_heap_view) {
- auto* heap_type = new_heap_view->expression()->AsProperty();
- if (heap_type == nullptr) {
- FAIL(new_heap_view, "Invalid type after new.");
- }
- auto* heap_view_info = ImportLookup(heap_type);
-
- if (heap_view_info == nullptr) {
- FAIL(new_heap_view, "Unknown stdlib member in heap view declaration.");
- }
-
- if (!heap_view_info->type()->IsA(AsmType::Heap())) {
- FAIL(new_heap_view, "Type is not a heap view type.");
- }
-
- if (new_heap_view->arguments()->length() != 1) {
- FAIL(new_heap_view, "Invalid number of arguments when creating heap view.");
- }
-
- auto* heap = new_heap_view->arguments()->at(0);
- auto* heap_var_proxy = heap->AsVariableProxy();
-
- if (heap_var_proxy == nullptr) {
- FAIL(heap,
- "Heap view creation parameter should be the module's heap parameter.");
- }
-
- auto* heap_var_info = Lookup(heap_var_proxy->var());
-
- if (heap_var_info == nullptr) {
- FAIL(heap, "Undeclared identifier instead of heap parameter.");
- }
-
- if (!heap_var_info->IsHeap()) {
- FAIL(heap,
- "Heap view creation parameter should be the module's heap parameter.");
- }
-
- DCHECK(heap_view_info->type()->IsA(AsmType::Heap()));
- return heap_view_info->type();
-}
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/asmjs/asm-typer.h b/deps/v8/src/asmjs/asm-typer.h
deleted file mode 100644
index 965137383e..0000000000
--- a/deps/v8/src/asmjs/asm-typer.h
+++ /dev/null
@@ -1,420 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef SRC_ASMJS_ASM_TYPER_H_
-#define SRC_ASMJS_ASM_TYPER_H_
-
-#include <cstdint>
-#include <string>
-#include <unordered_map>
-#include <unordered_set>
-
-#include "src/allocation.h"
-#include "src/asmjs/asm-types.h"
-#include "src/ast/ast-type-bounds.h"
-#include "src/ast/ast-types.h"
-#include "src/ast/ast.h"
-#include "src/effects.h"
-#include "src/messages.h"
-#include "src/type-info.h"
-#include "src/zone/zone-containers.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-class AsmType;
-class AsmTyperHarnessBuilder;
-class SourceLayoutTracker;
-
-class AsmTyper final {
- public:
- enum StandardMember {
- kHeap = -4,
- kFFI = -3,
- kStdlib = -2,
- kModule = -1,
- kNone = 0,
- kInfinity,
- kNaN,
- kMathAcos,
- kMathAsin,
- kMathAtan,
- kMathCos,
- kMathSin,
- kMathTan,
- kMathExp,
- kMathLog,
- kMathCeil,
- kMathFloor,
- kMathSqrt,
- kMathAbs,
- kMathClz32,
- kMathMin,
- kMathMax,
- kMathAtan2,
- kMathPow,
- kMathImul,
- kMathFround,
- kMathE,
- kMathLN10,
- kMathLN2,
- kMathLOG2E,
- kMathLOG10E,
- kMathPI,
- kMathSQRT1_2,
- kMathSQRT2,
- };
-
- ~AsmTyper() = default;
- AsmTyper(Isolate* isolate, Zone* zone, Handle<Script> script,
- FunctionLiteral* root);
-
- bool Validate();
- // Do asm.js validation in phases (to interleave with conversion to wasm).
- bool ValidateBeforeFunctionsPhase();
- bool ValidateInnerFunction(FunctionDeclaration* decl);
- bool ValidateAfterFunctionsPhase();
- void ClearFunctionNodeTypes();
-
- Handle<JSMessageObject> error_message() const { return error_message_; }
- const MessageLocation* message_location() const { return &message_location_; }
-
- AsmType* TriggerParsingError();
-
- AsmType* TypeOf(AstNode* node) const;
- AsmType* TypeOf(Variable* v) const;
- StandardMember VariableAsStandardMember(Variable* var);
-
- // Allow the asm-wasm-builder to trigger failures (for interleaved
- // validating).
- AsmType* FailWithMessage(const char* text);
-
- typedef std::unordered_set<StandardMember, std::hash<int> > StdlibSet;
-
- StdlibSet StdlibUses() const { return stdlib_uses_; }
-
- // Each FFI import has a usage-site signature associated with it.
- struct FFIUseSignature {
- Variable* var;
- ZoneVector<AsmType*> arg_types_;
- AsmType* return_type_;
- FFIUseSignature(Variable* v, Zone* zone)
- : var(v), arg_types_(zone), return_type_(nullptr) {}
- };
-
- const ZoneVector<FFIUseSignature>& FFIUseSignatures() {
- return ffi_use_signatures_;
- }
-
- private:
- friend class v8::internal::wasm::AsmTyperHarnessBuilder;
-
- class VariableInfo : public ZoneObject {
- public:
- enum Mutability {
- kInvalidMutability,
- kLocal,
- kMutableGlobal,
- // *VIOLATION* We support const variables in asm.js, as per the
- //
- // https://discourse.wicg.io/t/allow-const-global-variables/684
- //
- // Global const variables are treated as if they were numeric literals,
- // and can be used anywhere a literal can be used.
- kConstGlobal,
- kImmutableGlobal,
- };
-
- explicit VariableInfo(AsmType* t) : type_(t) {}
-
- VariableInfo* Clone(Zone* zone) const;
-
- bool IsMutable() const {
- return mutability_ == kLocal || mutability_ == kMutableGlobal;
- }
-
- bool IsGlobal() const {
- return mutability_ == kImmutableGlobal || mutability_ == kConstGlobal ||
- mutability_ == kMutableGlobal;
- }
-
- bool IsStdlib() const { return standard_member_ == kStdlib; }
- bool IsFFI() const { return standard_member_ == kFFI; }
- bool IsHeap() const { return standard_member_ == kHeap; }
-
- void MarkDefined() { missing_definition_ = false; }
- void SetFirstForwardUse(const MessageLocation& source_location);
-
- StandardMember standard_member() const { return standard_member_; }
- void set_standard_member(StandardMember standard_member) {
- standard_member_ = standard_member;
- }
-
- AsmType* type() const { return type_; }
- void set_type(AsmType* type) { type_ = type; }
-
- Mutability mutability() const { return mutability_; }
- void set_mutability(Mutability mutability) { mutability_ = mutability; }
-
- bool missing_definition() const { return missing_definition_; }
-
- const MessageLocation* source_location() { return &source_location_; }
-
- static VariableInfo* ForSpecialSymbol(Zone* zone,
- StandardMember standard_member);
-
- private:
- AsmType* type_;
- StandardMember standard_member_ = kNone;
- Mutability mutability_ = kInvalidMutability;
- // missing_definition_ is set to true for forward definition - i.e., use
- // before definition.
- bool missing_definition_ = false;
- // Used for error messages.
- MessageLocation source_location_;
- };
-
- // RAII-style manager for the in_function_ member variable.
- struct FunctionScope {
- explicit FunctionScope(AsmTyper* typer) : typer_(typer) {
- DCHECK(!typer_->in_function_);
- typer_->in_function_ = true;
- typer_->local_scope_.Clear();
- typer_->return_type_ = AsmType::None();
- }
-
- ~FunctionScope() {
- DCHECK(typer_->in_function_);
- typer_->in_function_ = false;
- }
-
- AsmTyper* typer_;
- };
-
- // FlattenedStatements is an iterator class for ZoneList<Statement*> that
- // flattens the Block construct in the AST. This is here because we need it in
- // the tests.
- class FlattenedStatements {
- public:
- explicit FlattenedStatements(Zone* zone, ZoneList<Statement*>* s);
- Statement* Next();
-
- private:
- struct Context {
- explicit Context(ZoneList<Statement*>* s) : statements_(s) {}
- ZoneList<Statement*>* statements_;
- int next_index_ = 0;
- };
-
- ZoneVector<Context> context_stack_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(FlattenedStatements);
- };
-
- class SourceLayoutTracker {
- public:
- SourceLayoutTracker() = default;
- bool IsValid() const;
- void AddUseAsm(const AstNode& node) { use_asm_.AddNewElement(node); }
- void AddGlobal(const AstNode& node) { globals_.AddNewElement(node); }
- void AddFunction(const AstNode& node) { functions_.AddNewElement(node); }
- void AddTable(const AstNode& node) { tables_.AddNewElement(node); }
- void AddExport(const AstNode& node) { exports_.AddNewElement(node); }
-
- private:
- class Section {
- public:
- Section() = default;
- Section(const Section&) = default;
- Section& operator=(const Section&) = default;
-
- void AddNewElement(const AstNode& node);
- bool IsPrecededBy(const Section& other) const;
-
- private:
- int start_ = kNoSourcePosition;
- int end_ = kNoSourcePosition;
- };
-
- Section use_asm_;
- Section globals_;
- Section functions_;
- Section tables_;
- Section exports_;
-
- DISALLOW_COPY_AND_ASSIGN(SourceLayoutTracker);
- };
-
- using ObjectTypeMap = ZoneMap<std::string, VariableInfo*>;
- void InitializeStdlib();
- void SetTypeOf(AstNode* node, AsmType* type);
-
- void AddForwardReference(VariableProxy* proxy, VariableInfo* info);
- bool AddGlobal(Variable* global, VariableInfo* info);
- bool AddLocal(Variable* global, VariableInfo* info);
- // Used for 5.5 GlobalVariableTypeAnnotations
- VariableInfo* ImportLookup(Property* expr);
- // 3.3 Environment Lookup
- // NOTE: In the spec, the lookup function's prototype is
- //
- // Lookup(Delta, Gamma, x)
- //
- // Delta is the global_scope_ member, and Gamma, local_scope_.
- VariableInfo* Lookup(Variable* variable) const;
-
- // All of the ValidateXXX methods below return AsmType::None() in case of
- // validation failure.
-
- // 6.1 ValidateModule
- AsmType* ValidateModuleBeforeFunctionsPhase(FunctionLiteral* fun);
- AsmType* ValidateModuleFunction(FunctionDeclaration* fun_decl);
- AsmType* ValidateModuleFunctions(FunctionLiteral* fun);
- AsmType* ValidateModuleAfterFunctionsPhase(FunctionLiteral* fun);
- AsmType* ValidateGlobalDeclaration(Assignment* assign);
- // 6.2 ValidateExport
- AsmType* ExportType(VariableProxy* fun_export);
- AsmType* ValidateExport(ReturnStatement* exports);
- // 6.3 ValidateFunctionTable
- AsmType* ValidateFunctionTable(Assignment* assign);
- // 6.4 ValidateFunction
- AsmType* ValidateFunction(FunctionDeclaration* fun_decl);
- // 6.5 ValidateStatement
- AsmType* ValidateStatement(Statement* statement);
- // 6.5.1 BlockStatement
- AsmType* ValidateBlockStatement(Block* block);
- // 6.5.2 ExpressionStatement
- AsmType* ValidateExpressionStatement(ExpressionStatement* expr);
- // 6.5.3 EmptyStatement
- AsmType* ValidateEmptyStatement(EmptyStatement* empty);
- // 6.5.4 IfStatement
- AsmType* ValidateIfStatement(IfStatement* if_stmt);
- // 6.5.5 ReturnStatement
- AsmType* ValidateReturnStatement(ReturnStatement* ret_stmt);
- // 6.5.6 IterationStatement
- // 6.5.6.a WhileStatement
- AsmType* ValidateWhileStatement(WhileStatement* while_stmt);
- // 6.5.6.b DoWhileStatement
- AsmType* ValidateDoWhileStatement(DoWhileStatement* do_while);
- // 6.5.6.c ForStatement
- AsmType* ValidateForStatement(ForStatement* for_stmt);
- // 6.5.7 BreakStatement
- AsmType* ValidateBreakStatement(BreakStatement* brk_stmt);
- // 6.5.8 ContinueStatement
- AsmType* ValidateContinueStatement(ContinueStatement* cont_stmt);
- // 6.5.9 LabelledStatement
- // NOTE: we don't need to handle these: Labelled statements are
- // BreakableStatements in our AST, but BreakableStatement is not a concrete
- // class -- and we're handling all of BreakableStatement's subclasses.
- // 6.5.10 SwitchStatement
- AsmType* ValidateSwitchStatement(SwitchStatement* stmt);
- // 6.6 ValidateCase
- AsmType* ValidateCase(CaseClause* label, int32_t* case_lbl);
- // 6.7 ValidateDefault
- AsmType* ValidateDefault(CaseClause* label);
- // 6.8 ValidateExpression
- AsmType* ValidateExpression(Expression* expr);
- AsmType* ValidateCompareOperation(CompareOperation* cmp);
- AsmType* ValidateBinaryOperation(BinaryOperation* binop);
- // 6.8.1 Expression
- AsmType* ValidateCommaExpression(BinaryOperation* comma);
- // 6.8.2 NumericLiteral
- AsmType* ValidateNumericLiteral(Literal* literal);
- // 6.8.3 Identifier
- AsmType* ValidateIdentifier(VariableProxy* proxy);
- // 6.8.4 CallExpression
- AsmType* ValidateCallExpression(Call* call);
- // 6.8.5 MemberExpression
- AsmType* ValidateMemberExpression(Property* prop);
- // 6.8.6 AssignmentExpression
- AsmType* ValidateAssignmentExpression(Assignment* assignment);
- // 6.8.7 UnaryExpression
- AsmType* ValidateUnaryExpression(UnaryOperation* unop);
- // 6.8.8 MultiplicativeExpression
- AsmType* ValidateMultiplicativeExpression(BinaryOperation* binop);
- // 6.8.9 AdditiveExpression
- AsmType* ValidateAdditiveExpression(BinaryOperation* binop,
- uint32_t intish_count);
- // 6.8.10 ShiftExpression
- AsmType* ValidateShiftExpression(BinaryOperation* binop);
- // 6.8.11 RelationalExpression
- AsmType* ValidateRelationalExpression(CompareOperation* cmpop);
- // 6.8.12 EqualityExpression
- AsmType* ValidateEqualityExpression(CompareOperation* cmpop);
- // 6.8.13 BitwiseANDExpression
- AsmType* ValidateBitwiseANDExpression(BinaryOperation* binop);
- // 6.8.14 BitwiseXORExpression
- AsmType* ValidateBitwiseXORExpression(BinaryOperation* binop);
- // 6.8.15 BitwiseORExpression
- AsmType* ValidateBitwiseORExpression(BinaryOperation* binop);
- // 6.8.16 ConditionalExpression
- AsmType* ValidateConditionalExpression(Conditional* cond);
- // 6.9 ValidateCall
- AsmType* ValidateCall(AsmType* return_type, Call* call);
- // 6.10 ValidateHeapAccess
- enum HeapAccessType { LoadFromHeap, StoreToHeap };
- AsmType* ValidateHeapAccess(Property* heap, HeapAccessType access_type);
- // 6.11 ValidateFloatCoercion
- bool IsCallToFround(Call* call);
- AsmType* ValidateFloatCoercion(Call* call);
-
- // 5.1 ParameterTypeAnnotations
- AsmType* ParameterTypeAnnotations(Variable* parameter,
- Expression* annotation);
- // 5.2 ReturnTypeAnnotations
- AsmType* ReturnTypeAnnotations(Expression* ret_expr);
- // 5.4 VariableTypeAnnotations
- // 5.5 GlobalVariableTypeAnnotations
- AsmType* VariableTypeAnnotations(
- Expression* initializer,
- VariableInfo::Mutability global = VariableInfo::kLocal);
- AsmType* ImportExpression(Property* import);
- AsmType* NewHeapView(CallNew* new_heap_view);
-
- Isolate* isolate_;
- Zone* zone_;
- Handle<Script> script_;
- FunctionLiteral* root_;
- bool in_function_ = false;
-
- AsmType* return_type_ = nullptr;
-
- ZoneVector<VariableInfo*> forward_definitions_;
- ZoneVector<FFIUseSignature> ffi_use_signatures_;
- ObjectTypeMap stdlib_types_;
- ObjectTypeMap stdlib_math_types_;
-
- // The ASM module name. This member is used to prevent globals from redefining
- // the module name.
- VariableInfo* module_info_;
- Handle<String> module_name_;
-
- // 3 Environments
- ZoneHashMap global_scope_; // 3.1 Global environment
- ZoneHashMap local_scope_; // 3.2 Variable environment
-
- std::uintptr_t stack_limit_;
- bool stack_overflow_ = false;
- std::unordered_map<AstNode*, AsmType*> module_node_types_;
- std::unordered_map<AstNode*, AsmType*> function_node_types_;
- static const int kErrorMessageLimit = 128;
- AsmType* fround_type_;
- AsmType* ffi_type_;
- Handle<JSMessageObject> error_message_;
- MessageLocation message_location_;
- StdlibSet stdlib_uses_;
-
- SourceLayoutTracker source_layout_;
- ReturnStatement* module_return_;
- ZoneVector<Assignment*> function_pointer_tables_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(AsmTyper);
-};
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // SRC_ASMJS_ASM_TYPER_H_
diff --git a/deps/v8/src/asmjs/asm-types.h b/deps/v8/src/asmjs/asm-types.h
index 882e32828a..d29b107657 100644
--- a/deps/v8/src/asmjs/asm-types.h
+++ b/deps/v8/src/asmjs/asm-types.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef SRC_ASMJS_ASM_TYPES_H_
-#define SRC_ASMJS_ASM_TYPES_H_
+#ifndef V8_ASMJS_ASM_TYPES_H_
+#define V8_ASMJS_ASM_TYPES_H_
#include <string>
@@ -347,4 +347,4 @@ class V8_EXPORT_PRIVATE AsmType {
} // namespace internal
} // namespace v8
-#endif // SRC_ASMJS_ASM_TYPES_H_
+#endif // V8_ASMJS_ASM_TYPES_H_
diff --git a/deps/v8/src/asmjs/asm-wasm-builder.cc b/deps/v8/src/asmjs/asm-wasm-builder.cc
deleted file mode 100644
index a92e3ca1b4..0000000000
--- a/deps/v8/src/asmjs/asm-wasm-builder.cc
+++ /dev/null
@@ -1,2025 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-// Required to get M_E etc. in MSVC.
-#if defined(_WIN32)
-#define _USE_MATH_DEFINES
-#endif
-#include <math.h>
-
-#include "src/asmjs/asm-types.h"
-#include "src/asmjs/asm-wasm-builder.h"
-#include "src/asmjs/switch-logic.h"
-
-#include "src/wasm/wasm-macro-gen.h"
-#include "src/wasm/wasm-opcodes.h"
-
-#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
-#include "src/codegen.h"
-#include "src/compilation-info.h"
-#include "src/compiler.h"
-#include "src/counters.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/parsing/parse-info.h"
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-#define RECURSE(call) \
- do { \
- DCHECK(!HasStackOverflow()); \
- call; \
- if (HasStackOverflow()) return; \
- } while (false)
-
-namespace {
-
-enum AsmScope { kModuleScope, kInitScope, kFuncScope, kExportScope };
-enum ValueFate { kDrop, kLeaveOnStack };
-
-struct ForeignVariable {
- Handle<Name> name;
- Variable* var;
- ValueType type;
-};
-
-enum TargetType : uint8_t { NoTarget, BreakTarget, ContinueTarget };
-
-} // namespace
-
-class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
- public:
- AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, CompilationInfo* info,
- AstValueFactory* ast_value_factory, Handle<Script> script,
- FunctionLiteral* literal, AsmTyper* typer)
- : local_variables_(ZoneHashMap::kDefaultHashMapCapacity,
- ZoneAllocationPolicy(zone)),
- functions_(ZoneHashMap::kDefaultHashMapCapacity,
- ZoneAllocationPolicy(zone)),
- global_variables_(ZoneHashMap::kDefaultHashMapCapacity,
- ZoneAllocationPolicy(zone)),
- scope_(kModuleScope),
- builder_(new (zone) WasmModuleBuilder(zone)),
- current_function_builder_(nullptr),
- literal_(literal),
- isolate_(isolate),
- zone_(zone),
- info_(info),
- ast_value_factory_(ast_value_factory),
- script_(script),
- typer_(typer),
- typer_failed_(false),
- typer_finished_(false),
- breakable_blocks_(zone),
- foreign_variables_(zone),
- init_function_(nullptr),
- foreign_init_function_(nullptr),
- function_tables_(ZoneHashMap::kDefaultHashMapCapacity,
- ZoneAllocationPolicy(zone)),
- imported_function_table_(this),
- parent_binop_(nullptr) {
- InitializeAstVisitor(isolate);
- }
-
- void InitializeInitFunction() {
- FunctionSig::Builder b(zone(), 0, 0);
- init_function_ = builder_->AddFunction(b.Build());
- builder_->MarkStartFunction(init_function_);
- // Record start of the function, used as position for the stack check.
- init_function_->SetAsmFunctionStartPosition(literal_->start_position());
- }
-
- void BuildForeignInitFunction() {
- foreign_init_function_ = builder_->AddFunction();
- FunctionSig::Builder b(zone(), 0, foreign_variables_.size());
- for (auto i = foreign_variables_.begin(); i != foreign_variables_.end();
- ++i) {
- b.AddParam(i->type);
- }
- foreign_init_function_->ExportAs(
- CStrVector(AsmWasmBuilder::foreign_init_name));
- foreign_init_function_->SetSignature(b.Build());
- for (size_t pos = 0; pos < foreign_variables_.size(); ++pos) {
- foreign_init_function_->EmitGetLocal(static_cast<uint32_t>(pos));
- ForeignVariable* fv = &foreign_variables_[pos];
- uint32_t index = LookupOrInsertGlobal(fv->var, fv->type);
- foreign_init_function_->EmitWithVarUint(kExprSetGlobal, index);
- }
- foreign_init_function_->Emit(kExprEnd);
- }
-
- Handle<FixedArray> GetForeignArgs() {
- Handle<FixedArray> ret = isolate_->factory()->NewFixedArray(
- static_cast<int>(foreign_variables_.size()));
- for (size_t i = 0; i < foreign_variables_.size(); ++i) {
- ForeignVariable* fv = &foreign_variables_[i];
- ret->set(static_cast<int>(i), *fv->name);
- }
- return ret;
- }
-
- bool Build() {
- InitializeInitFunction();
- if (!typer_->ValidateBeforeFunctionsPhase()) {
- return false;
- }
- DCHECK(!HasStackOverflow());
- VisitFunctionLiteral(literal_);
- if (HasStackOverflow()) {
- return false;
- }
- if (!typer_finished_ && !typer_failed_) {
- typer_->FailWithMessage("Module missing export section.");
- typer_failed_ = true;
- }
- if (typer_failed_) {
- return false;
- }
- BuildForeignInitFunction();
- init_function_->Emit(kExprEnd); // finish init function.
- return true;
- }
-
- void VisitVariableDeclaration(VariableDeclaration* decl) {}
-
- void VisitFunctionDeclaration(FunctionDeclaration* decl) {
- DCHECK_EQ(kModuleScope, scope_);
- DCHECK_NULL(current_function_builder_);
- FunctionLiteral* old_func = decl->fun();
- DeclarationScope* new_func_scope = nullptr;
- std::unique_ptr<ParseInfo> info;
- if (decl->fun()->body() == nullptr) {
- // TODO(titzer/bradnelson): Reuse SharedFunctionInfos used here when
- // compiling the wasm module.
- Handle<SharedFunctionInfo> shared =
- Compiler::GetSharedFunctionInfo(decl->fun(), script_, info_);
- shared->set_is_toplevel(false);
- info.reset(new ParseInfo(script_));
- info->set_shared_info(shared);
- info->set_toplevel(false);
- info->set_language_mode(decl->fun()->scope()->language_mode());
- info->set_allow_lazy_parsing(false);
- info->set_function_literal_id(shared->function_literal_id());
- info->set_ast_value_factory(ast_value_factory_);
- info->set_ast_value_factory_owned(false);
- // Create fresh function scope to use to parse the function in.
- new_func_scope = new (info->zone()) DeclarationScope(
- info->zone(), decl->fun()->scope()->outer_scope(), FUNCTION_SCOPE);
- info->set_asm_function_scope(new_func_scope);
- if (!Compiler::ParseAndAnalyze(info.get(), info_->isolate())) {
- decl->fun()->scope()->outer_scope()->RemoveInnerScope(new_func_scope);
- if (isolate_->has_pending_exception()) {
- isolate_->clear_pending_exception();
- }
- typer_->TriggerParsingError();
- typer_failed_ = true;
- return;
- }
- FunctionLiteral* func = info->literal();
- DCHECK_NOT_NULL(func);
- decl->set_fun(func);
- }
- if (!typer_->ValidateInnerFunction(decl)) {
- typer_failed_ = true;
- decl->set_fun(old_func);
- if (new_func_scope != nullptr) {
- DCHECK_EQ(new_func_scope, decl->scope()->inner_scope());
- if (!decl->scope()->RemoveInnerScope(new_func_scope)) {
- UNREACHABLE();
- }
- }
- return;
- }
- current_function_builder_ = LookupOrInsertFunction(decl->proxy()->var());
- scope_ = kFuncScope;
-
- // Record start of the function, used as position for the stack check.
- current_function_builder_->SetAsmFunctionStartPosition(
- decl->fun()->start_position());
-
- RECURSE(Visit(decl->fun()));
- decl->set_fun(old_func);
- if (new_func_scope != nullptr) {
- DCHECK_EQ(new_func_scope, decl->scope()->inner_scope());
- if (!decl->scope()->RemoveInnerScope(new_func_scope)) {
- UNREACHABLE();
- }
- }
- scope_ = kModuleScope;
- current_function_builder_ = nullptr;
- local_variables_.Clear();
- typer_->ClearFunctionNodeTypes();
- }
-
- void VisitStatements(ZoneList<Statement*>* stmts) {
- for (int i = 0; i < stmts->length(); ++i) {
- Statement* stmt = stmts->at(i);
- ExpressionStatement* e = stmt->AsExpressionStatement();
- if (e != nullptr && e->expression()->IsUndefinedLiteral()) {
- continue;
- }
- RECURSE(Visit(stmt));
- if (typer_failed_) break;
- // Not stopping when a jump statement is found.
- }
- }
-
- void VisitBlock(Block* stmt) {
- if (stmt->statements()->length() == 1) {
- ExpressionStatement* expr =
- stmt->statements()->at(0)->AsExpressionStatement();
- if (expr != nullptr) {
- if (expr->expression()->IsAssignment()) {
- RECURSE(VisitExpressionStatement(expr));
- return;
- }
- }
- }
- if (scope_ == kFuncScope) {
- BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock,
- BreakTarget);
- RECURSE(VisitStatements(stmt->statements()));
- } else {
- RECURSE(VisitStatements(stmt->statements()));
- }
- }
-
- class BlockVisitor {
- private:
- AsmWasmBuilderImpl* builder_;
-
- public:
- BlockVisitor(AsmWasmBuilderImpl* builder, BreakableStatement* stmt,
- WasmOpcode opcode, TargetType target_type = NoTarget)
- : builder_(builder) {
- builder_->breakable_blocks_.emplace_back(stmt, target_type);
- // block and loops have a type immediate.
- builder_->current_function_builder_->EmitWithU8(opcode, kLocalVoid);
- }
- ~BlockVisitor() {
- builder_->current_function_builder_->Emit(kExprEnd);
- builder_->breakable_blocks_.pop_back();
- }
- };
-
- void VisitExpressionStatement(ExpressionStatement* stmt) {
- VisitForEffect(stmt->expression());
- }
-
- void VisitForEffect(Expression* expr) {
- if (expr->IsAssignment()) {
- // Don't emit drops for assignments. Instead use SetLocal/GetLocal.
- VisitAssignment(expr->AsAssignment(), kDrop);
- return;
- }
- if (expr->IsCall()) {
- // Only emit a drop if the call has a non-void return value.
- if (VisitCallExpression(expr->AsCall()) && scope_ == kFuncScope) {
- current_function_builder_->Emit(kExprDrop);
- }
- return;
- }
- if (expr->IsBinaryOperation()) {
- BinaryOperation* binop = expr->AsBinaryOperation();
- if (binop->op() == Token::COMMA) {
- VisitForEffect(binop->left());
- VisitForEffect(binop->right());
- return;
- }
- }
- RECURSE(Visit(expr));
- if (scope_ == kFuncScope) current_function_builder_->Emit(kExprDrop);
- }
-
- void VisitEmptyStatement(EmptyStatement* stmt) {}
-
- void VisitEmptyParentheses(EmptyParentheses* paren) { UNREACHABLE(); }
-
- void VisitGetIterator(GetIterator* expr) { UNREACHABLE(); }
-
- void VisitImportCallExpression(ImportCallExpression* expr) { UNREACHABLE(); }
-
- void VisitIfStatement(IfStatement* stmt) {
- DCHECK_EQ(kFuncScope, scope_);
- RECURSE(Visit(stmt->condition()));
- // Wasm ifs come with implicit blocks for both arms.
- BlockVisitor block(this, nullptr, kExprIf);
- if (stmt->HasThenStatement()) {
- RECURSE(Visit(stmt->then_statement()));
- }
- if (stmt->HasElseStatement()) {
- current_function_builder_->Emit(kExprElse);
- RECURSE(Visit(stmt->else_statement()));
- }
- }
-
- void DoBreakOrContinue(BreakableStatement* target, TargetType type) {
- DCHECK_EQ(kFuncScope, scope_);
- for (int i = static_cast<int>(breakable_blocks_.size()) - 1; i >= 0; --i) {
- auto elem = breakable_blocks_.at(i);
- if (elem.first == target && elem.second == type) {
- int block_distance = static_cast<int>(breakable_blocks_.size() - i - 1);
- current_function_builder_->EmitWithVarUint(kExprBr, block_distance);
- return;
- }
- }
- UNREACHABLE(); // statement not found
- }
-
- void VisitContinueStatement(ContinueStatement* stmt) {
- DoBreakOrContinue(stmt->target(), ContinueTarget);
- }
-
- void VisitBreakStatement(BreakStatement* stmt) {
- DoBreakOrContinue(stmt->target(), BreakTarget);
- }
-
- void VisitReturnStatement(ReturnStatement* stmt) {
- if (scope_ == kModuleScope) {
- if (typer_finished_) {
- typer_->FailWithMessage("Module has multiple returns.");
- typer_failed_ = true;
- return;
- }
- if (!typer_->ValidateAfterFunctionsPhase()) {
- typer_failed_ = true;
- return;
- }
- typer_finished_ = true;
- scope_ = kExportScope;
- RECURSE(Visit(stmt->expression()));
- scope_ = kModuleScope;
- } else if (scope_ == kFuncScope) {
- RECURSE(Visit(stmt->expression()));
- current_function_builder_->Emit(kExprReturn);
- } else {
- UNREACHABLE();
- }
- }
-
- void VisitWithStatement(WithStatement* stmt) { UNREACHABLE(); }
-
- void HandleCase(CaseNode* node,
- ZoneMap<int, unsigned int>& case_to_block,
- VariableProxy* tag, int default_block, int if_depth) {
- int prev_if_depth = if_depth;
- if (node->left != nullptr) {
- VisitVariableProxy(tag);
- current_function_builder_->EmitI32Const(node->begin);
- current_function_builder_->Emit(kExprI32LtS);
- current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
- if_depth++;
- breakable_blocks_.emplace_back(nullptr, NoTarget);
- HandleCase(node->left, case_to_block, tag, default_block, if_depth);
- current_function_builder_->Emit(kExprElse);
- }
- if (node->right != nullptr) {
- VisitVariableProxy(tag);
- current_function_builder_->EmitI32Const(node->end);
- current_function_builder_->Emit(kExprI32GtS);
- current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
- if_depth++;
- breakable_blocks_.emplace_back(nullptr, NoTarget);
- HandleCase(node->right, case_to_block, tag, default_block, if_depth);
- current_function_builder_->Emit(kExprElse);
- }
- if (node->begin == node->end) {
- VisitVariableProxy(tag);
- current_function_builder_->EmitI32Const(node->begin);
- current_function_builder_->Emit(kExprI32Eq);
- current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
- DCHECK(case_to_block.find(node->begin) != case_to_block.end());
- current_function_builder_->Emit(kExprBr);
- current_function_builder_->EmitVarUint(1 + if_depth +
- case_to_block[node->begin]);
- current_function_builder_->Emit(kExprEnd);
- } else {
- if (node->begin != 0) {
- VisitVariableProxy(tag);
- current_function_builder_->EmitI32Const(node->begin);
- current_function_builder_->Emit(kExprI32Sub);
- } else {
- VisitVariableProxy(tag);
- }
- current_function_builder_->Emit(kExprBrTable);
- current_function_builder_->EmitVarUint(node->end - node->begin + 1);
- for (int v = node->begin; v <= node->end; ++v) {
- if (case_to_block.find(v) != case_to_block.end()) {
- uint32_t target = if_depth + case_to_block[v];
- current_function_builder_->EmitVarUint(target);
- } else {
- uint32_t target = if_depth + default_block;
- current_function_builder_->EmitVarUint(target);
- }
- if (v == kMaxInt) {
- break;
- }
- }
- uint32_t target = if_depth + default_block;
- current_function_builder_->EmitVarUint(target);
- }
-
- while (if_depth-- != prev_if_depth) {
- breakable_blocks_.pop_back();
- current_function_builder_->Emit(kExprEnd);
- }
- }
-
- void VisitSwitchStatement(SwitchStatement* stmt) {
- VariableProxy* tag = stmt->tag()->AsVariableProxy();
- DCHECK_NOT_NULL(tag);
- ZoneList<CaseClause*>* clauses = stmt->cases();
- int case_count = clauses->length();
- if (case_count == 0) {
- return;
- }
- BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock,
- BreakTarget);
- ZoneVector<BlockVisitor*> blocks(zone_);
- ZoneVector<int32_t> cases(zone_);
- ZoneMap<int, unsigned int> case_to_block(zone_);
- bool has_default = false;
- for (int i = case_count - 1; i >= 0; --i) {
- CaseClause* clause = clauses->at(i);
- blocks.push_back(new BlockVisitor(this, nullptr, kExprBlock));
- if (!clause->is_default()) {
- Literal* label = clause->label()->AsLiteral();
- Handle<Object> value = label->value();
- int32_t label_value;
- bool label_is_i32 = value->ToInt32(&label_value);
- DCHECK(value->IsNumber() && label_is_i32);
- (void)label_is_i32;
- case_to_block[label_value] = i;
- cases.push_back(label_value);
- } else {
- DCHECK_EQ(i, case_count - 1);
- has_default = true;
- }
- }
- if (!has_default || case_count > 1) {
- int default_block = has_default ? case_count - 1 : case_count;
- BlockVisitor switch_logic_block(this, nullptr, kExprBlock);
- CaseNode* root = OrderCases(&cases, zone_);
- HandleCase(root, case_to_block, tag, default_block, 0);
- if (root->left != nullptr || root->right != nullptr ||
- root->begin == root->end) {
- current_function_builder_->Emit(kExprBr);
- current_function_builder_->EmitVarUint(default_block);
- }
- }
- for (int i = 0; i < case_count; ++i) {
- CaseClause* clause = clauses->at(i);
- RECURSE(VisitStatements(clause->statements()));
- BlockVisitor* v = blocks.at(case_count - i - 1);
- blocks.pop_back();
- delete v;
- }
- }
-
- void VisitCaseClause(CaseClause* clause) { UNREACHABLE(); }
-
- void VisitDoWhileStatement(DoWhileStatement* stmt) {
- DCHECK_EQ(kFuncScope, scope_);
- BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock,
- BreakTarget);
- BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop);
- {
- BlockVisitor inner_block(this, stmt->AsBreakableStatement(), kExprBlock,
- ContinueTarget);
- RECURSE(Visit(stmt->body()));
- }
- RECURSE(Visit(stmt->cond()));
- current_function_builder_->EmitWithU8(kExprBrIf, 0);
- }
-
- void VisitWhileStatement(WhileStatement* stmt) {
- DCHECK_EQ(kFuncScope, scope_);
- BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock,
- BreakTarget);
- BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop,
- ContinueTarget);
- RECURSE(Visit(stmt->cond()));
- BlockVisitor if_block(this, nullptr, kExprIf);
- RECURSE(Visit(stmt->body()));
- current_function_builder_->EmitWithU8(kExprBr, 1);
- }
-
- void VisitForStatement(ForStatement* stmt) {
- DCHECK_EQ(kFuncScope, scope_);
- if (stmt->init() != nullptr) {
- RECURSE(Visit(stmt->init()));
- }
- BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock,
- BreakTarget);
- BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop,
- ContinueTarget);
- if (stmt->cond() != nullptr) {
- RECURSE(Visit(stmt->cond()));
- current_function_builder_->Emit(kExprI32Eqz);
- current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
- current_function_builder_->EmitWithU8(kExprBr, 2);
- current_function_builder_->Emit(kExprEnd);
- }
- if (stmt->body() != nullptr) {
- RECURSE(Visit(stmt->body()));
- }
- if (stmt->next() != nullptr) {
- RECURSE(Visit(stmt->next()));
- }
- current_function_builder_->EmitWithU8(kExprBr, 0);
- }
-
- void VisitForInStatement(ForInStatement* stmt) { UNREACHABLE(); }
-
- void VisitForOfStatement(ForOfStatement* stmt) { UNREACHABLE(); }
-
- void VisitTryCatchStatement(TryCatchStatement* stmt) { UNREACHABLE(); }
-
- void VisitTryFinallyStatement(TryFinallyStatement* stmt) { UNREACHABLE(); }
-
- void VisitDebuggerStatement(DebuggerStatement* stmt) { UNREACHABLE(); }
-
- void VisitFunctionLiteral(FunctionLiteral* expr) {
- DeclarationScope* scope = expr->scope();
- if (scope_ == kFuncScope) {
- if (auto* func_type = typer_->TypeOf(expr)->AsFunctionType()) {
- // Add the parameters for the function.
- const auto& arguments = func_type->Arguments();
- for (int i = 0; i < expr->parameter_count(); ++i) {
- ValueType type = TypeFrom(arguments[i]);
- DCHECK_NE(kWasmStmt, type);
- InsertParameter(scope->parameter(i), type, i);
- }
- } else {
- UNREACHABLE();
- }
- }
- RECURSE(VisitDeclarations(scope->declarations()));
- if (typer_failed_) return;
- RECURSE(VisitStatements(expr->body()));
- if (scope_ == kFuncScope) {
- // Finish the function-body scope block.
- current_function_builder_->Emit(kExprEnd);
- }
- }
-
- void VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
- UNREACHABLE();
- }
-
- void VisitConditional(Conditional* expr) {
- DCHECK_EQ(kFuncScope, scope_);
- RECURSE(Visit(expr->condition()));
- // Wasm ifs come with implicit blocks for both arms.
- breakable_blocks_.emplace_back(nullptr, NoTarget);
- ValueTypeCode type;
- switch (TypeOf(expr)) {
- case kWasmI32:
- type = kLocalI32;
- break;
- case kWasmI64:
- type = kLocalI64;
- break;
- case kWasmF32:
- type = kLocalF32;
- break;
- case kWasmF64:
- type = kLocalF64;
- break;
- default:
- UNREACHABLE();
- }
- current_function_builder_->EmitWithU8(kExprIf, type);
- RECURSE(Visit(expr->then_expression()));
- current_function_builder_->Emit(kExprElse);
- RECURSE(Visit(expr->else_expression()));
- current_function_builder_->Emit(kExprEnd);
- breakable_blocks_.pop_back();
- }
-
- bool VisitStdlibConstant(Variable* var) {
- AsmTyper::StandardMember standard_object =
- typer_->VariableAsStandardMember(var);
- double value;
- switch (standard_object) {
- case AsmTyper::kInfinity: {
- value = std::numeric_limits<double>::infinity();
- break;
- }
- case AsmTyper::kNaN: {
- value = std::numeric_limits<double>::quiet_NaN();
- break;
- }
- case AsmTyper::kMathE: {
- value = M_E;
- break;
- }
- case AsmTyper::kMathLN10: {
- value = M_LN10;
- break;
- }
- case AsmTyper::kMathLN2: {
- value = M_LN2;
- break;
- }
- case AsmTyper::kMathLOG10E: {
- value = M_LOG10E;
- break;
- }
- case AsmTyper::kMathLOG2E: {
- value = M_LOG2E;
- break;
- }
- case AsmTyper::kMathPI: {
- value = M_PI;
- break;
- }
- case AsmTyper::kMathSQRT1_2: {
- value = M_SQRT1_2;
- break;
- }
- case AsmTyper::kMathSQRT2: {
- value = M_SQRT2;
- break;
- }
- default: { return false; }
- }
- byte code[] = {WASM_F64(value)};
- current_function_builder_->EmitCode(code, sizeof(code));
- return true;
- }
-
- void VisitVariableProxy(VariableProxy* expr) {
- if (scope_ == kFuncScope || scope_ == kInitScope) {
- Variable* var = expr->var();
- if (VisitStdlibConstant(var)) {
- return;
- }
- ValueType var_type = TypeOf(expr);
- DCHECK_NE(kWasmStmt, var_type);
- if (var->IsContextSlot()) {
- current_function_builder_->EmitWithVarUint(
- kExprGetGlobal, LookupOrInsertGlobal(var, var_type));
- } else {
- current_function_builder_->EmitGetLocal(
- LookupOrInsertLocal(var, var_type));
- }
- } else if (scope_ == kExportScope) {
- Variable* var = expr->var();
- DCHECK(var->is_function());
- WasmFunctionBuilder* function = LookupOrInsertFunction(var);
- function->ExportAs(CStrVector(AsmWasmBuilder::single_function_name));
- }
- }
-
- void VisitLiteral(Literal* expr) {
- Handle<Object> value = expr->value();
- if (!(value->IsNumber() || expr->raw_value()->IsTrue() ||
- expr->raw_value()->IsFalse()) ||
- (scope_ != kFuncScope && scope_ != kInitScope)) {
- return;
- }
- AsmType* type = typer_->TypeOf(expr);
- DCHECK_NE(type, AsmType::None());
-
- if (type->IsA(AsmType::Signed())) {
- int32_t i = 0;
- CHECK(value->ToInt32(&i));
- current_function_builder_->EmitI32Const(i);
- } else if (type->IsA(AsmType::Unsigned()) || type->IsA(AsmType::FixNum())) {
- uint32_t u = 0;
- CHECK(value->ToUint32(&u));
- current_function_builder_->EmitI32Const(bit_cast<int32_t>(u));
- } else if (type->IsA(AsmType::Int())) {
- // The parser can collapse !0, !1 etc to true / false.
- // Allow these as int literals.
- if (expr->raw_value()->IsTrue()) {
- byte code[] = {WASM_ONE};
- current_function_builder_->EmitCode(code, sizeof(code));
- } else if (expr->raw_value()->IsFalse()) {
- byte code[] = {WASM_ZERO};
- current_function_builder_->EmitCode(code, sizeof(code));
- } else if (expr->raw_value()->IsNumber()) {
- // This can happen when -x becomes x * -1 (due to the parser).
- int32_t i = 0;
- CHECK(value->ToInt32(&i) && i == -1);
- byte code[] = {WASM_I32V_1(-1)};
- current_function_builder_->EmitCode(code, sizeof(code));
- } else {
- UNREACHABLE();
- }
- } else if (type->IsA(AsmType::Double())) {
- // TODO(bradnelson): Pattern match the case where negation occurs and
- // emit f64.neg instead.
- double val = expr->raw_value()->AsNumber();
- byte code[] = {WASM_F64(val)};
- current_function_builder_->EmitCode(code, sizeof(code));
- } else if (type->IsA(AsmType::Float())) {
- // This can happen when -fround(x) becomes fround(x) * 1.0[float]
- // (due to the parser).
- // TODO(bradnelson): Pattern match this and emit f32.neg instead.
- double val = expr->raw_value()->AsNumber();
- DCHECK_EQ(-1.0, val);
- byte code[] = {WASM_F32(val)};
- current_function_builder_->EmitCode(code, sizeof(code));
- } else {
- UNREACHABLE();
- }
- }
-
- void VisitRegExpLiteral(RegExpLiteral* expr) { UNREACHABLE(); }
-
- void VisitObjectLiteral(ObjectLiteral* expr) {
- ZoneList<ObjectLiteralProperty*>* props = expr->properties();
- for (int i = 0; i < props->length(); ++i) {
- ObjectLiteralProperty* prop = props->at(i);
- DCHECK_EQ(kExportScope, scope_);
- VariableProxy* expr = prop->value()->AsVariableProxy();
- DCHECK_NOT_NULL(expr);
- Variable* var = expr->var();
- Literal* name = prop->key()->AsLiteral();
- DCHECK_NOT_NULL(name);
- DCHECK(name->IsPropertyName());
- Handle<String> function_name = name->AsPropertyName();
- int length;
- std::unique_ptr<char[]> utf8 = function_name->ToCString(
- DISALLOW_NULLS, FAST_STRING_TRAVERSAL, &length);
- if (var->is_function()) {
- WasmFunctionBuilder* function = LookupOrInsertFunction(var);
- function->ExportAs({utf8.get(), length});
- }
- }
- }
-
- void VisitArrayLiteral(ArrayLiteral* expr) { UNREACHABLE(); }
-
- void LoadInitFunction() {
- current_function_builder_ = init_function_;
- scope_ = kInitScope;
- }
-
- void UnLoadInitFunction() {
- scope_ = kModuleScope;
- current_function_builder_ = nullptr;
- }
-
- struct FunctionTableIndices : public ZoneObject {
- uint32_t start_index;
- uint32_t signature_index;
- };
-
- FunctionTableIndices* LookupOrAddFunctionTable(VariableProxy* table,
- Property* p) {
- FunctionTableIndices* indices = LookupFunctionTable(table->var());
- if (indices != nullptr) {
- // Already setup.
- return indices;
- }
- indices = new (zone()) FunctionTableIndices();
- auto* func_type = typer_->TypeOf(p)->AsFunctionType();
- auto* func_table_type = typer_->TypeOf(p->obj()->AsVariableProxy()->var())
- ->AsFunctionTableType();
- const auto& arguments = func_type->Arguments();
- ValueType return_type = TypeFrom(func_type->ReturnType());
- FunctionSig::Builder sig(zone(), return_type == kWasmStmt ? 0 : 1,
- arguments.size());
- if (return_type != kWasmStmt) {
- sig.AddReturn(return_type);
- }
- for (auto* arg : arguments) {
- sig.AddParam(TypeFrom(arg));
- }
- uint32_t signature_index = builder_->AddSignature(sig.Build());
- indices->start_index = builder_->AllocateIndirectFunctions(
- static_cast<uint32_t>(func_table_type->length()));
- indices->signature_index = signature_index;
- ZoneHashMap::Entry* entry = function_tables_.LookupOrInsert(
- table->var(), ComputePointerHash(table->var()),
- ZoneAllocationPolicy(zone()));
- entry->value = indices;
- return indices;
- }
-
- FunctionTableIndices* LookupFunctionTable(Variable* v) {
- ZoneHashMap::Entry* entry =
- function_tables_.Lookup(v, ComputePointerHash(v));
- if (entry == nullptr) {
- return nullptr;
- }
- return reinterpret_cast<FunctionTableIndices*>(entry->value);
- }
-
- void PopulateFunctionTable(VariableProxy* table, ArrayLiteral* funcs) {
- FunctionTableIndices* indices = LookupFunctionTable(table->var());
- // Ignore unused function tables.
- if (indices == nullptr) {
- return;
- }
- for (int i = 0; i < funcs->values()->length(); ++i) {
- VariableProxy* func = funcs->values()->at(i)->AsVariableProxy();
- DCHECK_NOT_NULL(func);
- builder_->SetIndirectFunction(
- indices->start_index + i,
- LookupOrInsertFunction(func->var())->func_index());
- }
- }
-
- class ImportedFunctionTable {
- private:
- class ImportedFunctionIndices : public ZoneObject {
- public:
- const char* name_;
- int name_length_;
- WasmModuleBuilder::SignatureMap signature_to_index_;
-
- ImportedFunctionIndices(const char* name, int name_length, Zone* zone)
- : name_(name), name_length_(name_length), signature_to_index_(zone) {}
- };
- ZoneHashMap table_;
- AsmWasmBuilderImpl* builder_;
-
- public:
- explicit ImportedFunctionTable(AsmWasmBuilderImpl* builder)
- : table_(ZoneHashMap::kDefaultHashMapCapacity,
- ZoneAllocationPolicy(builder->zone())),
- builder_(builder) {}
-
- ImportedFunctionIndices* LookupOrInsertImport(Variable* v) {
- auto* entry = table_.LookupOrInsert(
- v, ComputePointerHash(v), ZoneAllocationPolicy(builder_->zone()));
- ImportedFunctionIndices* indices;
- if (entry->value == nullptr) {
- indices = new (builder_->zone())
- ImportedFunctionIndices(nullptr, 0, builder_->zone());
- entry->value = indices;
- } else {
- indices = reinterpret_cast<ImportedFunctionIndices*>(entry->value);
- }
- return indices;
- }
-
- void SetImportName(Variable* v, const char* name, int name_length) {
- auto* indices = LookupOrInsertImport(v);
- indices->name_ = name;
- indices->name_length_ = name_length;
- for (auto i : indices->signature_to_index_) {
- builder_->builder_->SetImportName(i.second, indices->name_,
- indices->name_length_);
- }
- }
-
- // Get a function's index (or allocate if new).
- uint32_t LookupOrInsertImportUse(Variable* v, FunctionSig* sig) {
- auto* indices = LookupOrInsertImport(v);
- WasmModuleBuilder::SignatureMap::iterator pos =
- indices->signature_to_index_.find(sig);
- if (pos != indices->signature_to_index_.end()) {
- return pos->second;
- } else {
- uint32_t index = builder_->builder_->AddImport(
- indices->name_, indices->name_length_, sig);
- indices->signature_to_index_[sig] = index;
- return index;
- }
- }
- };
-
- void EmitAssignmentLhs(Expression* target, AsmType** atype) {
- // Match the left hand side of the assignment.
- VariableProxy* target_var = target->AsVariableProxy();
- if (target_var != nullptr) {
- // Left hand side is a local or a global variable, no code on LHS.
- return;
- }
-
- Property* target_prop = target->AsProperty();
- if (target_prop != nullptr) {
- // Left hand side is a property access, i.e. the asm.js heap.
- VisitPropertyAndEmitIndex(target_prop, atype);
- return;
- }
-
- if (target_var == nullptr && target_prop == nullptr) {
- UNREACHABLE(); // invalid assignment.
- }
- }
-
- void EmitAssignmentRhs(Expression* target, Expression* value, bool* is_nop) {
- BinaryOperation* binop = value->AsBinaryOperation();
- if (binop != nullptr) {
- if (scope_ == kInitScope) {
- // Handle foreign variables in the initialization scope.
- Property* prop = binop->left()->AsProperty();
- if (binop->op() == Token::MUL) {
- DCHECK(binop->right()->IsLiteral());
- DCHECK_EQ(1.0, binop->right()->AsLiteral()->raw_value()->AsNumber());
- DCHECK(binop->right()->AsLiteral()->raw_value()->ContainsDot());
- DCHECK(target->IsVariableProxy());
- VisitForeignVariable(true, target->AsVariableProxy()->var(), prop);
- *is_nop = true;
- return;
- } else if (binop->op() == Token::BIT_OR) {
- DCHECK(binop->right()->IsLiteral());
- DCHECK_EQ(0.0, binop->right()->AsLiteral()->raw_value()->AsNumber());
- DCHECK(!binop->right()->AsLiteral()->raw_value()->ContainsDot());
- DCHECK(target->IsVariableProxy());
- VisitForeignVariable(false, target->AsVariableProxy()->var(), prop);
- *is_nop = true;
- return;
- } else {
- UNREACHABLE();
- }
- }
- if (MatchBinaryOperation(binop) == kAsIs) {
- VariableProxy* target_var = target->AsVariableProxy();
- VariableProxy* effective_value_var = GetLeft(binop)->AsVariableProxy();
- if (target_var != nullptr && effective_value_var != nullptr &&
- target_var->var() == effective_value_var->var()) {
- *is_nop = true;
- return;
- }
- }
- }
- RECURSE(Visit(value));
- }
-
- void EmitAssignment(Assignment* expr, AsmType* type, ValueFate fate) {
- // Match the left hand side of the assignment.
- VariableProxy* target_var = expr->target()->AsVariableProxy();
- if (target_var != nullptr) {
- // Left hand side is a local or a global variable.
- Variable* var = target_var->var();
- ValueType var_type = TypeOf(expr);
- DCHECK_NE(kWasmStmt, var_type);
- if (var->IsContextSlot()) {
- uint32_t index = LookupOrInsertGlobal(var, var_type);
- current_function_builder_->EmitWithVarUint(kExprSetGlobal, index);
- if (fate == kLeaveOnStack) {
- current_function_builder_->EmitWithVarUint(kExprGetGlobal, index);
- }
- } else {
- if (fate == kDrop) {
- current_function_builder_->EmitSetLocal(
- LookupOrInsertLocal(var, var_type));
- } else {
- current_function_builder_->EmitTeeLocal(
- LookupOrInsertLocal(var, var_type));
- }
- }
- }
-
- Property* target_prop = expr->target()->AsProperty();
- if (target_prop != nullptr) {
- // Left hand side is a property access, i.e. the asm.js heap.
- if (TypeOf(expr->value()) == kWasmF64 && expr->target()->IsProperty() &&
- typer_->TypeOf(expr->target()->AsProperty()->obj())
- ->IsA(AsmType::Float32Array())) {
- current_function_builder_->Emit(kExprF32ConvertF64);
- }
- // Note that unlike StoreMem, AsmjsStoreMem ignores out-of-bounds writes.
- WasmOpcode opcode;
- if (type == AsmType::Int8Array()) {
- opcode = kExprI32AsmjsStoreMem8;
- } else if (type == AsmType::Uint8Array()) {
- opcode = kExprI32AsmjsStoreMem8;
- } else if (type == AsmType::Int16Array()) {
- opcode = kExprI32AsmjsStoreMem16;
- } else if (type == AsmType::Uint16Array()) {
- opcode = kExprI32AsmjsStoreMem16;
- } else if (type == AsmType::Int32Array()) {
- opcode = kExprI32AsmjsStoreMem;
- } else if (type == AsmType::Uint32Array()) {
- opcode = kExprI32AsmjsStoreMem;
- } else if (type == AsmType::Float32Array()) {
- opcode = kExprF32AsmjsStoreMem;
- } else if (type == AsmType::Float64Array()) {
- opcode = kExprF64AsmjsStoreMem;
- } else {
- UNREACHABLE();
- }
- current_function_builder_->Emit(opcode);
- if (fate == kDrop) {
- // Asm.js stores to memory leave their result on the stack.
- current_function_builder_->Emit(kExprDrop);
- }
- }
-
- if (target_var == nullptr && target_prop == nullptr) {
- UNREACHABLE(); // invalid assignment.
- }
- }
-
- void VisitAssignment(Assignment* expr) {
- VisitAssignment(expr, kLeaveOnStack);
- }
-
- void VisitAssignment(Assignment* expr, ValueFate fate) {
- bool as_init = false;
- if (scope_ == kModuleScope) {
- // Skip extra assignment inserted by the parser when in this form:
- // (function Module(a, b, c) {... })
- if (expr->target()->IsVariableProxy() &&
- expr->target()->AsVariableProxy()->var()->is_sloppy_function_name()) {
- return;
- }
- Property* prop = expr->value()->AsProperty();
- if (prop != nullptr) {
- VariableProxy* vp = prop->obj()->AsVariableProxy();
- if (vp != nullptr && vp->var()->IsParameter() &&
- vp->var()->index() == 1) {
- VariableProxy* target = expr->target()->AsVariableProxy();
- if (typer_->TypeOf(target)->AsFFIType() != nullptr) {
- const AstRawString* name =
- prop->key()->AsLiteral()->AsRawPropertyName();
- imported_function_table_.SetImportName(
- target->var(), reinterpret_cast<const char*>(name->raw_data()),
- name->length());
- }
- }
- // Property values in module scope don't emit code, so return.
- return;
- }
- ArrayLiteral* funcs = expr->value()->AsArrayLiteral();
- if (funcs != nullptr) {
- VariableProxy* target = expr->target()->AsVariableProxy();
- DCHECK_NOT_NULL(target);
- PopulateFunctionTable(target, funcs);
- // Only add to the function table. No init needed.
- return;
- }
- if (expr->value()->IsCallNew()) {
- // No init code to emit for CallNew nodes.
- return;
- }
- as_init = true;
- }
-
- if (as_init) LoadInitFunction();
- AsmType* atype = AsmType::None();
- bool is_nop = false;
- EmitAssignmentLhs(expr->target(), &atype);
- EmitAssignmentRhs(expr->target(), expr->value(), &is_nop);
- if (!is_nop) {
- EmitAssignment(expr, atype, fate);
- }
- if (as_init) UnLoadInitFunction();
- }
-
- void VisitSuspend(Suspend* expr) { UNREACHABLE(); }
-
- void VisitThrow(Throw* expr) { UNREACHABLE(); }
-
- void VisitForeignVariable(bool is_float, Variable* var, Property* expr) {
- DCHECK(expr->obj()->AsVariableProxy());
- DCHECK(VariableLocation::PARAMETER ==
- expr->obj()->AsVariableProxy()->var()->location());
- DCHECK_EQ(1, expr->obj()->AsVariableProxy()->var()->index());
- Literal* key_literal = expr->key()->AsLiteral();
- DCHECK_NOT_NULL(key_literal);
- if (!key_literal->value().is_null()) {
- Handle<Name> name =
- Object::ToName(isolate_, key_literal->value()).ToHandleChecked();
- ValueType type = is_float ? kWasmF64 : kWasmI32;
- foreign_variables_.push_back({name, var, type});
- }
- }
-
- void VisitPropertyAndEmitIndex(Property* expr, AsmType** atype) {
- Expression* obj = expr->obj();
- *atype = typer_->TypeOf(obj);
- int32_t size = (*atype)->ElementSizeInBytes();
- if (size == 1) {
- // Allow more general expression in byte arrays than the spec
- // strictly permits.
- // Early versions of Emscripten emit HEAP8[HEAP32[..]|0] in
- // places that strictly should be HEAP8[HEAP32[..]>>0].
- RECURSE(Visit(expr->key()));
- return;
- }
-
- Literal* value = expr->key()->AsLiteral();
- if (value) {
- DCHECK(value->raw_value()->IsNumber());
- DCHECK_EQ(kWasmI32, TypeOf(value));
- int32_t val = static_cast<int32_t>(value->raw_value()->AsNumber());
- // TODO(titzer): handle overflow here.
- current_function_builder_->EmitI32Const(val * size);
- return;
- }
- BinaryOperation* binop = expr->key()->AsBinaryOperation();
- if (binop) {
- DCHECK_EQ(Token::SAR, binop->op());
- DCHECK(binop->right()->AsLiteral()->raw_value()->IsNumber());
- DCHECK(kWasmI32 == TypeOf(binop->right()->AsLiteral()));
- DCHECK_EQ(size,
- 1 << static_cast<int>(
- binop->right()->AsLiteral()->raw_value()->AsNumber()));
- // Mask bottom bits to match asm.js behavior.
- RECURSE(Visit(binop->left()));
- current_function_builder_->EmitI32Const(~(size - 1));
- current_function_builder_->Emit(kExprI32And);
- return;
- }
- UNREACHABLE();
- }
-
- void VisitProperty(Property* expr) {
- AsmType* type = AsmType::None();
- VisitPropertyAndEmitIndex(expr, &type);
- WasmOpcode opcode;
- if (type == AsmType::Int8Array()) {
- opcode = kExprI32AsmjsLoadMem8S;
- } else if (type == AsmType::Uint8Array()) {
- opcode = kExprI32AsmjsLoadMem8U;
- } else if (type == AsmType::Int16Array()) {
- opcode = kExprI32AsmjsLoadMem16S;
- } else if (type == AsmType::Uint16Array()) {
- opcode = kExprI32AsmjsLoadMem16U;
- } else if (type == AsmType::Int32Array()) {
- opcode = kExprI32AsmjsLoadMem;
- } else if (type == AsmType::Uint32Array()) {
- opcode = kExprI32AsmjsLoadMem;
- } else if (type == AsmType::Float32Array()) {
- opcode = kExprF32AsmjsLoadMem;
- } else if (type == AsmType::Float64Array()) {
- opcode = kExprF64AsmjsLoadMem;
- } else {
- UNREACHABLE();
- }
-
- current_function_builder_->Emit(opcode);
- }
-
- bool VisitStdlibFunction(Call* call, VariableProxy* expr) {
- Variable* var = expr->var();
- AsmTyper::StandardMember standard_object =
- typer_->VariableAsStandardMember(var);
- ZoneList<Expression*>* args = call->arguments();
- ValueType call_type = TypeOf(call);
-
- switch (standard_object) {
- case AsmTyper::kNone: {
- return false;
- }
- case AsmTyper::kMathAcos: {
- VisitCallArgs(call);
- DCHECK_EQ(kWasmF64, call_type);
- current_function_builder_->Emit(kExprF64Acos);
- break;
- }
- case AsmTyper::kMathAsin: {
- VisitCallArgs(call);
- DCHECK_EQ(kWasmF64, call_type);
- current_function_builder_->Emit(kExprF64Asin);
- break;
- }
- case AsmTyper::kMathAtan: {
- VisitCallArgs(call);
- DCHECK_EQ(kWasmF64, call_type);
- current_function_builder_->Emit(kExprF64Atan);
- break;
- }
- case AsmTyper::kMathCos: {
- VisitCallArgs(call);
- DCHECK_EQ(kWasmF64, call_type);
- current_function_builder_->Emit(kExprF64Cos);
- break;
- }
- case AsmTyper::kMathSin: {
- VisitCallArgs(call);
- DCHECK_EQ(kWasmF64, call_type);
- current_function_builder_->Emit(kExprF64Sin);
- break;
- }
- case AsmTyper::kMathTan: {
- VisitCallArgs(call);
- DCHECK_EQ(kWasmF64, call_type);
- current_function_builder_->Emit(kExprF64Tan);
- break;
- }
- case AsmTyper::kMathExp: {
- VisitCallArgs(call);
- DCHECK_EQ(kWasmF64, call_type);
- current_function_builder_->Emit(kExprF64Exp);
- break;
- }
- case AsmTyper::kMathLog: {
- VisitCallArgs(call);
- DCHECK_EQ(kWasmF64, call_type);
- current_function_builder_->Emit(kExprF64Log);
- break;
- }
- case AsmTyper::kMathCeil: {
- VisitCallArgs(call);
- if (call_type == kWasmF32) {
- current_function_builder_->Emit(kExprF32Ceil);
- } else if (call_type == kWasmF64) {
- current_function_builder_->Emit(kExprF64Ceil);
- } else {
- UNREACHABLE();
- }
- break;
- }
- case AsmTyper::kMathFloor: {
- VisitCallArgs(call);
- if (call_type == kWasmF32) {
- current_function_builder_->Emit(kExprF32Floor);
- } else if (call_type == kWasmF64) {
- current_function_builder_->Emit(kExprF64Floor);
- } else {
- UNREACHABLE();
- }
- break;
- }
- case AsmTyper::kMathSqrt: {
- VisitCallArgs(call);
- if (call_type == kWasmF32) {
- current_function_builder_->Emit(kExprF32Sqrt);
- } else if (call_type == kWasmF64) {
- current_function_builder_->Emit(kExprF64Sqrt);
- } else {
- UNREACHABLE();
- }
- break;
- }
- case AsmTyper::kMathClz32: {
- VisitCallArgs(call);
- DCHECK(call_type == kWasmI32);
- current_function_builder_->Emit(kExprI32Clz);
- break;
- }
- case AsmTyper::kMathAbs: {
- if (call_type == kWasmI32) {
- WasmTemporary tmp(current_function_builder_, kWasmI32);
-
- // if set_local(tmp, x) < 0
- Visit(call->arguments()->at(0));
- current_function_builder_->EmitTeeLocal(tmp.index());
- byte code[] = {WASM_ZERO};
- current_function_builder_->EmitCode(code, sizeof(code));
- current_function_builder_->Emit(kExprI32LtS);
- current_function_builder_->EmitWithU8(kExprIf, kLocalI32);
-
- // then (0 - tmp)
- current_function_builder_->EmitCode(code, sizeof(code));
- current_function_builder_->EmitGetLocal(tmp.index());
- current_function_builder_->Emit(kExprI32Sub);
-
- // else tmp
- current_function_builder_->Emit(kExprElse);
- current_function_builder_->EmitGetLocal(tmp.index());
- // end
- current_function_builder_->Emit(kExprEnd);
-
- } else if (call_type == kWasmF32) {
- VisitCallArgs(call);
- current_function_builder_->Emit(kExprF32Abs);
- } else if (call_type == kWasmF64) {
- VisitCallArgs(call);
- current_function_builder_->Emit(kExprF64Abs);
- } else {
- UNREACHABLE();
- }
- break;
- }
- case AsmTyper::kMathMin: {
- // TODO(bradnelson): Change wasm to match Math.min in asm.js mode.
- if (call_type == kWasmI32) {
- WasmTemporary tmp_x(current_function_builder_, kWasmI32);
- WasmTemporary tmp_y(current_function_builder_, kWasmI32);
-
- // if set_local(tmp_x, x) < set_local(tmp_y, y)
- Visit(call->arguments()->at(0));
- current_function_builder_->EmitTeeLocal(tmp_x.index());
-
- Visit(call->arguments()->at(1));
- current_function_builder_->EmitTeeLocal(tmp_y.index());
-
- current_function_builder_->Emit(kExprI32LeS);
- current_function_builder_->EmitWithU8(kExprIf, kLocalI32);
-
- // then tmp_x
- current_function_builder_->EmitGetLocal(tmp_x.index());
-
- // else tmp_y
- current_function_builder_->Emit(kExprElse);
- current_function_builder_->EmitGetLocal(tmp_y.index());
- current_function_builder_->Emit(kExprEnd);
-
- } else if (call_type == kWasmF32) {
- VisitCallArgs(call);
- current_function_builder_->Emit(kExprF32Min);
- } else if (call_type == kWasmF64) {
- VisitCallArgs(call);
- current_function_builder_->Emit(kExprF64Min);
- } else {
- UNREACHABLE();
- }
- break;
- }
- case AsmTyper::kMathMax: {
- // TODO(bradnelson): Change wasm to match Math.max in asm.js mode.
- if (call_type == kWasmI32) {
- WasmTemporary tmp_x(current_function_builder_, kWasmI32);
- WasmTemporary tmp_y(current_function_builder_, kWasmI32);
-
- // if set_local(tmp_x, x) < set_local(tmp_y, y)
- Visit(call->arguments()->at(0));
-
- current_function_builder_->EmitTeeLocal(tmp_x.index());
-
- Visit(call->arguments()->at(1));
- current_function_builder_->EmitTeeLocal(tmp_y.index());
-
- current_function_builder_->Emit(kExprI32LeS);
- current_function_builder_->EmitWithU8(kExprIf, kLocalI32);
-
- // then tmp_y
- current_function_builder_->EmitGetLocal(tmp_y.index());
-
- // else tmp_x
- current_function_builder_->Emit(kExprElse);
- current_function_builder_->EmitGetLocal(tmp_x.index());
- current_function_builder_->Emit(kExprEnd);
-
- } else if (call_type == kWasmF32) {
- VisitCallArgs(call);
- current_function_builder_->Emit(kExprF32Max);
- } else if (call_type == kWasmF64) {
- VisitCallArgs(call);
- current_function_builder_->Emit(kExprF64Max);
- } else {
- UNREACHABLE();
- }
- break;
- }
- case AsmTyper::kMathAtan2: {
- VisitCallArgs(call);
- DCHECK_EQ(kWasmF64, call_type);
- current_function_builder_->Emit(kExprF64Atan2);
- break;
- }
- case AsmTyper::kMathPow: {
- VisitCallArgs(call);
- DCHECK_EQ(kWasmF64, call_type);
- current_function_builder_->Emit(kExprF64Pow);
- break;
- }
- case AsmTyper::kMathImul: {
- VisitCallArgs(call);
- current_function_builder_->Emit(kExprI32Mul);
- break;
- }
- case AsmTyper::kMathFround: {
- DCHECK(args->length() == 1);
- Literal* literal = args->at(0)->AsLiteral();
- if (literal != nullptr) {
- // constant fold Math.fround(#const);
- if (literal->raw_value()->IsNumber()) {
- float val = static_cast<float>(literal->raw_value()->AsNumber());
- byte code[] = {WASM_F32(val)};
- current_function_builder_->EmitCode(code, sizeof(code));
- return true;
- }
- }
- VisitCallArgs(call);
- static const bool kDontIgnoreSign = false;
- switch (TypeIndexOf(args->at(0), kDontIgnoreSign)) {
- case kInt32:
- case kFixnum:
- current_function_builder_->Emit(kExprF32SConvertI32);
- break;
- case kUint32:
- current_function_builder_->Emit(kExprF32UConvertI32);
- break;
- case kFloat32:
- break;
- case kFloat64:
- current_function_builder_->Emit(kExprF32ConvertF64);
- break;
- default:
- UNREACHABLE();
- }
- break;
- }
- default: {
- UNREACHABLE();
- break;
- }
- }
- return true;
- }
-
- void VisitCallArgs(Call* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- for (int i = 0; i < args->length(); ++i) {
- Expression* arg = args->at(i);
- RECURSE(Visit(arg));
- }
- }
-
- void VisitCall(Call* expr) { VisitCallExpression(expr); }
-
- bool VisitCallExpression(Call* expr) {
- Call::CallType call_type = expr->GetCallType();
- bool returns_value = true;
-
- // Save the parent now, it might be overwritten in VisitCallArgs.
- BinaryOperation* parent_binop = parent_binop_;
-
- switch (call_type) {
- case Call::OTHER_CALL: {
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- if (proxy != nullptr) {
- DCHECK(kFuncScope == scope_ ||
- typer_->VariableAsStandardMember(proxy->var()) ==
- AsmTyper::kMathFround);
- if (VisitStdlibFunction(expr, proxy)) {
- return true;
- }
- }
- DCHECK(kFuncScope == scope_);
- VariableProxy* vp = expr->expression()->AsVariableProxy();
- DCHECK_NOT_NULL(vp);
- if (typer_->TypeOf(vp)->AsFFIType() != nullptr) {
- ValueType return_type = TypeOf(expr);
- ZoneList<Expression*>* args = expr->arguments();
- FunctionSig::Builder sig(zone(), return_type == kWasmStmt ? 0 : 1,
- args->length());
- if (return_type != kWasmStmt) {
- sig.AddReturn(return_type);
- } else {
- returns_value = false;
- }
- for (int i = 0; i < args->length(); ++i) {
- sig.AddParam(TypeOf(args->at(i)));
- }
- uint32_t index = imported_function_table_.LookupOrInsertImportUse(
- vp->var(), sig.Build());
- VisitCallArgs(expr);
- // For non-void functions, we must know the parent node.
- DCHECK_IMPLIES(returns_value, parent_binop != nullptr);
- DCHECK_IMPLIES(returns_value, parent_binop->left() == expr ||
- parent_binop->right() == expr);
- int pos = expr->position();
- int parent_pos = returns_value ? parent_binop->position() : pos;
- current_function_builder_->AddAsmWasmOffset(pos, parent_pos);
- current_function_builder_->Emit(kExprCallFunction);
- current_function_builder_->EmitVarUint(index);
- } else {
- WasmFunctionBuilder* function = LookupOrInsertFunction(vp->var());
- VisitCallArgs(expr);
- current_function_builder_->AddAsmWasmOffset(expr->position(),
- expr->position());
- current_function_builder_->Emit(kExprCallFunction);
- current_function_builder_->EmitDirectCallIndex(
- function->func_index());
- returns_value = function->signature()->return_count() > 0;
- }
- break;
- }
- case Call::KEYED_PROPERTY_CALL: {
- DCHECK_EQ(kFuncScope, scope_);
- Property* p = expr->expression()->AsProperty();
- DCHECK_NOT_NULL(p);
- VariableProxy* var = p->obj()->AsVariableProxy();
- DCHECK_NOT_NULL(var);
- FunctionTableIndices* indices = LookupOrAddFunctionTable(var, p);
- Visit(p->key()); // TODO(titzer): should use RECURSE()
-
- // We have to use a temporary for the correct order of evaluation.
- current_function_builder_->EmitI32Const(indices->start_index);
- current_function_builder_->Emit(kExprI32Add);
- WasmTemporary tmp(current_function_builder_, kWasmI32);
- current_function_builder_->EmitSetLocal(tmp.index());
-
- VisitCallArgs(expr);
-
- current_function_builder_->EmitGetLocal(tmp.index());
- current_function_builder_->AddAsmWasmOffset(expr->position(),
- expr->position());
- current_function_builder_->Emit(kExprCallIndirect);
- current_function_builder_->EmitVarUint(indices->signature_index);
- current_function_builder_->EmitVarUint(0); // table index
- returns_value =
- builder_->GetSignature(indices->signature_index)->return_count() >
- 0;
- break;
- }
- default:
- UNREACHABLE();
- }
- return returns_value;
- }
-
- void VisitCallNew(CallNew* expr) { UNREACHABLE(); }
-
- void VisitCallRuntime(CallRuntime* expr) { UNREACHABLE(); }
-
- void VisitUnaryOperation(UnaryOperation* expr) {
- RECURSE(Visit(expr->expression()));
- switch (expr->op()) {
- case Token::NOT: {
- DCHECK_EQ(kWasmI32, TypeOf(expr->expression()));
- current_function_builder_->Emit(kExprI32Eqz);
- break;
- }
- default:
- UNREACHABLE();
- }
- }
-
- void VisitCountOperation(CountOperation* expr) { UNREACHABLE(); }
-
- bool MatchIntBinaryOperation(BinaryOperation* expr, Token::Value op,
- int32_t val) {
- DCHECK_NOT_NULL(expr->right());
- if (expr->op() == op && expr->right()->IsLiteral() &&
- TypeOf(expr) == kWasmI32) {
- Literal* right = expr->right()->AsLiteral();
- if (right->raw_value()->IsNumber() &&
- static_cast<int32_t>(right->raw_value()->AsNumber()) == val) {
- return true;
- }
- }
- return false;
- }
-
- bool MatchDoubleBinaryOperation(BinaryOperation* expr, Token::Value op,
- double val) {
- DCHECK_NOT_NULL(expr->right());
- if (expr->op() == op && expr->right()->IsLiteral() &&
- TypeOf(expr) == kWasmF64) {
- Literal* right = expr->right()->AsLiteral();
- DCHECK(right->raw_value()->IsNumber());
- if (right->raw_value()->AsNumber() == val) {
- return true;
- }
- }
- return false;
- }
-
- enum ConvertOperation { kNone, kAsIs, kToInt, kToDouble };
-
- ConvertOperation MatchOr(BinaryOperation* expr) {
- if (MatchIntBinaryOperation(expr, Token::BIT_OR, 0) &&
- (TypeOf(expr->left()) == kWasmI32)) {
- return kAsIs;
- } else {
- return kNone;
- }
- }
-
- ConvertOperation MatchShr(BinaryOperation* expr) {
- if (MatchIntBinaryOperation(expr, Token::SHR, 0)) {
- // TODO(titzer): this probably needs to be kToUint
- return (TypeOf(expr->left()) == kWasmI32) ? kAsIs : kToInt;
- } else {
- return kNone;
- }
- }
-
- ConvertOperation MatchXor(BinaryOperation* expr) {
- if (MatchIntBinaryOperation(expr, Token::BIT_XOR, 0xffffffff)) {
- DCHECK_EQ(kWasmI32, TypeOf(expr->left()));
- DCHECK_EQ(kWasmI32, TypeOf(expr->right()));
- BinaryOperation* op = expr->left()->AsBinaryOperation();
- if (op != nullptr) {
- if (MatchIntBinaryOperation(op, Token::BIT_XOR, 0xffffffff)) {
- DCHECK_EQ(kWasmI32, TypeOf(op->right()));
- if (TypeOf(op->left()) != kWasmI32) {
- return kToInt;
- } else {
- return kAsIs;
- }
- }
- }
- }
- return kNone;
- }
-
- ConvertOperation MatchMul(BinaryOperation* expr) {
- if (MatchDoubleBinaryOperation(expr, Token::MUL, 1.0)) {
- DCHECK_EQ(kWasmF64, TypeOf(expr->right()));
- if (TypeOf(expr->left()) != kWasmF64) {
- return kToDouble;
- } else {
- return kAsIs;
- }
- } else {
- return kNone;
- }
- }
-
- ConvertOperation MatchBinaryOperation(BinaryOperation* expr) {
- switch (expr->op()) {
- case Token::BIT_OR:
- return MatchOr(expr);
- case Token::SHR:
- return MatchShr(expr);
- case Token::BIT_XOR:
- return MatchXor(expr);
- case Token::MUL:
- return MatchMul(expr);
- default:
- return kNone;
- }
- }
-
-// Work around Mul + Div being defined in PPC assembler.
-#ifdef Mul
-#undef Mul
-#endif
-
-#define NON_SIGNED_BINOP(op) \
- static WasmOpcode opcodes[] = { \
- kExprI32##op, \
- kExprI32##op, \
- kExprF32##op, \
- kExprF64##op \
- }
-
-#define SIGNED_BINOP(op) \
- static WasmOpcode opcodes[] = { \
- kExprI32##op##S, \
- kExprI32##op##U, \
- kExprF32##op, \
- kExprF64##op \
- }
-
-#define NON_SIGNED_INT_BINOP(op) \
- static WasmOpcode opcodes[] = { kExprI32##op, kExprI32##op }
-
-#define BINOP_CASE(token, op, V, ignore_sign) \
- case token: { \
- V(op); \
- int type = TypeIndexOf(expr->left(), expr->right(), ignore_sign); \
- current_function_builder_->Emit(opcodes[type]); \
- break; \
- }
-
- Expression* GetLeft(BinaryOperation* expr) {
- if (expr->op() == Token::BIT_XOR) {
- return expr->left()->AsBinaryOperation()->left();
- } else {
- return expr->left();
- }
- }
-
- void VisitBinaryOperation(BinaryOperation* expr) {
- ConvertOperation convertOperation = MatchBinaryOperation(expr);
- static const bool kDontIgnoreSign = false;
- parent_binop_ = expr;
- if (convertOperation == kToDouble) {
- RECURSE(Visit(expr->left()));
- TypeIndex type = TypeIndexOf(expr->left(), kDontIgnoreSign);
- if (type == kInt32 || type == kFixnum) {
- current_function_builder_->Emit(kExprF64SConvertI32);
- } else if (type == kUint32) {
- current_function_builder_->Emit(kExprF64UConvertI32);
- } else if (type == kFloat32) {
- current_function_builder_->Emit(kExprF64ConvertF32);
- } else {
- UNREACHABLE();
- }
- } else if (convertOperation == kToInt) {
- RECURSE(Visit(GetLeft(expr)));
- TypeIndex type = TypeIndexOf(GetLeft(expr), kDontIgnoreSign);
- if (type == kFloat32) {
- current_function_builder_->Emit(kExprI32AsmjsSConvertF32);
- } else if (type == kFloat64) {
- current_function_builder_->Emit(kExprI32AsmjsSConvertF64);
- } else {
- UNREACHABLE();
- }
- } else if (convertOperation == kAsIs) {
- RECURSE(Visit(GetLeft(expr)));
- } else {
- if (expr->op() == Token::COMMA) {
- RECURSE(VisitForEffect(expr->left()));
- RECURSE(Visit(expr->right()));
- return;
- }
- RECURSE(Visit(expr->left()));
- RECURSE(Visit(expr->right()));
-
- switch (expr->op()) {
- BINOP_CASE(Token::ADD, Add, NON_SIGNED_BINOP, true);
- BINOP_CASE(Token::SUB, Sub, NON_SIGNED_BINOP, true);
- BINOP_CASE(Token::MUL, Mul, NON_SIGNED_BINOP, true);
- BINOP_CASE(Token::BIT_OR, Ior, NON_SIGNED_INT_BINOP, true);
- BINOP_CASE(Token::BIT_AND, And, NON_SIGNED_INT_BINOP, true);
- BINOP_CASE(Token::BIT_XOR, Xor, NON_SIGNED_INT_BINOP, true);
- BINOP_CASE(Token::SHL, Shl, NON_SIGNED_INT_BINOP, true);
- BINOP_CASE(Token::SAR, ShrS, NON_SIGNED_INT_BINOP, true);
- BINOP_CASE(Token::SHR, ShrU, NON_SIGNED_INT_BINOP, true);
- case Token::DIV: {
- static WasmOpcode opcodes[] = {kExprI32AsmjsDivS, kExprI32AsmjsDivU,
- kExprF32Div, kExprF64Div};
- int type = TypeIndexOf(expr->left(), expr->right(), false);
- current_function_builder_->Emit(opcodes[type]);
- break;
- }
- case Token::MOD: {
- TypeIndex type = TypeIndexOf(expr->left(), expr->right(), false);
- if (type == kInt32) {
- current_function_builder_->Emit(kExprI32AsmjsRemS);
- } else if (type == kUint32) {
- current_function_builder_->Emit(kExprI32AsmjsRemU);
- } else if (type == kFloat64) {
- current_function_builder_->Emit(kExprF64Mod);
- return;
- } else {
- UNREACHABLE();
- }
- break;
- }
- case Token::COMMA: {
- break;
- }
- default:
- UNREACHABLE();
- }
- }
- }
-
- void VisitCompareOperation(CompareOperation* expr) {
- RECURSE(Visit(expr->left()));
- RECURSE(Visit(expr->right()));
- switch (expr->op()) {
- BINOP_CASE(Token::EQ, Eq, NON_SIGNED_BINOP, false);
- BINOP_CASE(Token::LT, Lt, SIGNED_BINOP, false);
- BINOP_CASE(Token::LTE, Le, SIGNED_BINOP, false);
- BINOP_CASE(Token::GT, Gt, SIGNED_BINOP, false);
- BINOP_CASE(Token::GTE, Ge, SIGNED_BINOP, false);
- default:
- UNREACHABLE();
- }
- }
-
-#undef BINOP_CASE
-#undef NON_SIGNED_INT_BINOP
-#undef SIGNED_BINOP
-#undef NON_SIGNED_BINOP
-
- enum TypeIndex {
- kInt32 = 0,
- kUint32 = 1,
- kFloat32 = 2,
- kFloat64 = 3,
- kFixnum = 4
- };
-
- TypeIndex TypeIndexOf(Expression* left, Expression* right, bool ignore_sign) {
- TypeIndex left_index = TypeIndexOf(left, ignore_sign);
- TypeIndex right_index = TypeIndexOf(right, ignore_sign);
- if (left_index == kFixnum) {
- left_index = right_index;
- }
- if (right_index == kFixnum) {
- right_index = left_index;
- }
- if (left_index == kFixnum && right_index == kFixnum) {
- left_index = kInt32;
- right_index = kInt32;
- }
- if (left_index != right_index) {
- DCHECK(ignore_sign && (left_index <= 1) && (right_index <= 1));
- }
- return left_index;
- }
-
- TypeIndex TypeIndexOf(Expression* expr, bool ignore_sign) {
- AsmType* type = typer_->TypeOf(expr);
- if (type->IsA(AsmType::FixNum())) {
- return kFixnum;
- }
-
- if (type->IsA(AsmType::Signed())) {
- return kInt32;
- }
-
- if (type->IsA(AsmType::Unsigned())) {
- return kUint32;
- }
-
- if (type->IsA(AsmType::Intish())) {
- if (!ignore_sign) {
- // TODO(jpp): log a warning and move on.
- }
- return kInt32;
- }
-
- if (type->IsA(AsmType::Floatish())) {
- return kFloat32;
- }
-
- if (type->IsA(AsmType::DoubleQ())) {
- return kFloat64;
- }
-
- UNREACHABLE();
- return kInt32;
- }
-
-#undef CASE
-#undef NON_SIGNED_INT
-#undef SIGNED
-#undef NON_SIGNED
-
- void VisitThisFunction(ThisFunction* expr) { UNREACHABLE(); }
-
- void VisitDeclarations(Declaration::List* decls) {
- for (Declaration* decl : *decls) {
- RECURSE(Visit(decl));
- if (typer_failed_) {
- return;
- }
- }
- }
-
- void VisitClassLiteral(ClassLiteral* expr) { UNREACHABLE(); }
-
- void VisitSpread(Spread* expr) { UNREACHABLE(); }
-
- void VisitSuperPropertyReference(SuperPropertyReference* expr) {
- UNREACHABLE();
- }
-
- void VisitSuperCallReference(SuperCallReference* expr) { UNREACHABLE(); }
-
- void VisitSloppyBlockFunctionStatement(SloppyBlockFunctionStatement* expr) {
- UNREACHABLE();
- }
-
- void VisitDoExpression(DoExpression* expr) { UNREACHABLE(); }
-
- void VisitRewritableExpression(RewritableExpression* expr) { UNREACHABLE(); }
-
- struct IndexContainer : public ZoneObject {
- uint32_t index;
- };
-
- uint32_t LookupOrInsertLocal(Variable* v, ValueType type) {
- DCHECK_NOT_NULL(current_function_builder_);
- ZoneHashMap::Entry* entry =
- local_variables_.Lookup(v, ComputePointerHash(v));
- if (entry == nullptr) {
- uint32_t index;
- DCHECK(!v->IsParameter());
- index = current_function_builder_->AddLocal(type);
- IndexContainer* container = new (zone()) IndexContainer();
- container->index = index;
- entry = local_variables_.LookupOrInsert(v, ComputePointerHash(v),
- ZoneAllocationPolicy(zone()));
- entry->value = container;
- }
- return (reinterpret_cast<IndexContainer*>(entry->value))->index;
- }
-
- void InsertParameter(Variable* v, ValueType type, uint32_t index) {
- DCHECK(v->IsParameter());
- DCHECK_NOT_NULL(current_function_builder_);
- ZoneHashMap::Entry* entry =
- local_variables_.Lookup(v, ComputePointerHash(v));
- DCHECK_NULL(entry);
- IndexContainer* container = new (zone()) IndexContainer();
- container->index = index;
- entry = local_variables_.LookupOrInsert(v, ComputePointerHash(v),
- ZoneAllocationPolicy(zone()));
- entry->value = container;
- }
-
- uint32_t LookupOrInsertGlobal(Variable* v, ValueType type) {
- ZoneHashMap::Entry* entry =
- global_variables_.Lookup(v, ComputePointerHash(v));
- if (entry == nullptr) {
- uint32_t index = builder_->AddGlobal(type, 0);
- IndexContainer* container = new (zone()) IndexContainer();
- container->index = index;
- entry = global_variables_.LookupOrInsert(v, ComputePointerHash(v),
- ZoneAllocationPolicy(zone()));
- entry->value = container;
- }
- return (reinterpret_cast<IndexContainer*>(entry->value))->index;
- }
-
- WasmFunctionBuilder* LookupOrInsertFunction(Variable* v) {
- DCHECK_NOT_NULL(builder_);
- ZoneHashMap::Entry* entry = functions_.Lookup(v, ComputePointerHash(v));
- if (entry == nullptr) {
- auto* func_type = typer_->TypeOf(v)->AsFunctionType();
- DCHECK_NOT_NULL(func_type);
- // Build the signature for the function.
- ValueType return_type = TypeFrom(func_type->ReturnType());
- const auto& arguments = func_type->Arguments();
- FunctionSig::Builder b(zone(), return_type == kWasmStmt ? 0 : 1,
- arguments.size());
- if (return_type != kWasmStmt) b.AddReturn(return_type);
- for (int i = 0; i < static_cast<int>(arguments.size()); ++i) {
- ValueType type = TypeFrom(arguments[i]);
- DCHECK_NE(kWasmStmt, type);
- b.AddParam(type);
- }
-
- WasmFunctionBuilder* function = builder_->AddFunction(b.Build());
- entry = functions_.LookupOrInsert(v, ComputePointerHash(v),
- ZoneAllocationPolicy(zone()));
- function->SetName(
- {reinterpret_cast<const char*>(v->raw_name()->raw_data()),
- v->raw_name()->length()});
- entry->value = function;
- }
- return (reinterpret_cast<WasmFunctionBuilder*>(entry->value));
- }
-
- ValueType TypeOf(Expression* expr) { return TypeFrom(typer_->TypeOf(expr)); }
-
- ValueType TypeFrom(AsmType* type) {
- if (type->IsA(AsmType::Intish())) {
- return kWasmI32;
- }
-
- if (type->IsA(AsmType::Floatish())) {
- return kWasmF32;
- }
-
- if (type->IsA(AsmType::DoubleQ())) {
- return kWasmF64;
- }
-
- return kWasmStmt;
- }
-
- Zone* zone() { return zone_; }
-
- ZoneHashMap local_variables_;
- ZoneHashMap functions_;
- ZoneHashMap global_variables_;
- AsmScope scope_;
- WasmModuleBuilder* builder_;
- WasmFunctionBuilder* current_function_builder_;
- FunctionLiteral* literal_;
- Isolate* isolate_;
- Zone* zone_;
- CompilationInfo* info_;
- AstValueFactory* ast_value_factory_;
- Handle<Script> script_;
- AsmTyper* typer_;
- bool typer_failed_;
- bool typer_finished_;
- ZoneVector<std::pair<BreakableStatement*, TargetType>> breakable_blocks_;
- ZoneVector<ForeignVariable> foreign_variables_;
- WasmFunctionBuilder* init_function_;
- WasmFunctionBuilder* foreign_init_function_;
- uint32_t next_table_index_;
- ZoneHashMap function_tables_;
- ImportedFunctionTable imported_function_table_;
- // Remember the parent node for reporting the correct location for ToNumber
- // conversions after calls.
- BinaryOperation* parent_binop_;
-
- DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(AsmWasmBuilderImpl);
-};
-
-AsmWasmBuilder::AsmWasmBuilder(CompilationInfo* info)
- : info_(info),
- typer_(info->isolate(), info->zone(), info->script(), info->literal()) {}
-
-// TODO(aseemgarg): probably should take zone (to write wasm to) as input so
-// that zone in constructor may be thrown away once wasm module is written.
-AsmWasmBuilder::Result AsmWasmBuilder::Run(Handle<FixedArray>* foreign_args) {
- HistogramTimerScope asm_wasm_time_scope(
- info_->isolate()->counters()->asm_wasm_translation_time());
-
- Zone* zone = info_->zone();
- AsmWasmBuilderImpl impl(info_->isolate(), zone, info_,
- info_->parse_info()->ast_value_factory(),
- info_->script(), info_->literal(), &typer_);
- bool success = impl.Build();
- if (!success) {
- return {nullptr, nullptr, success};
- }
- *foreign_args = impl.GetForeignArgs();
- ZoneBuffer* module_buffer = new (zone) ZoneBuffer(zone);
- impl.builder_->WriteTo(*module_buffer);
- ZoneBuffer* asm_offsets_buffer = new (zone) ZoneBuffer(zone);
- impl.builder_->WriteAsmJsOffsetTable(*asm_offsets_buffer);
- return {module_buffer, asm_offsets_buffer, success};
-}
-
-const char* AsmWasmBuilder::foreign_init_name = "__foreign_init__";
-const char* AsmWasmBuilder::single_function_name = "__single_function__";
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/src/asmjs/asm-wasm-builder.h b/deps/v8/src/asmjs/asm-wasm-builder.h
deleted file mode 100644
index a5db096683..0000000000
--- a/deps/v8/src/asmjs/asm-wasm-builder.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_ASMJS_ASM_WASM_BUILDER_H_
-#define V8_ASMJS_ASM_WASM_BUILDER_H_
-
-#include "src/allocation.h"
-#include "src/asmjs/asm-typer.h"
-#include "src/objects.h"
-#include "src/wasm/wasm-module-builder.h"
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-
-class CompilationInfo;
-
-namespace wasm {
-
-class AsmWasmBuilder {
- public:
- struct Result {
- ZoneBuffer* module_bytes;
- ZoneBuffer* asm_offset_table;
- bool success;
- };
-
- explicit AsmWasmBuilder(CompilationInfo* info);
- Result Run(Handle<FixedArray>* foreign_args);
-
- static const char* foreign_init_name;
- static const char* single_function_name;
-
- const AsmTyper* typer() { return &typer_; }
-
- private:
- CompilationInfo* info_;
- AsmTyper typer_;
-};
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-#endif // V8_WASM_ASM_WASM_BUILDER_H_
diff --git a/deps/v8/src/asmjs/switch-logic.h b/deps/v8/src/asmjs/switch-logic.h
index 4e967ae35f..3ef34d9461 100644
--- a/deps/v8/src/asmjs/switch-logic.h
+++ b/deps/v8/src/asmjs/switch-logic.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_WASM_SWITCH_LOGIC_H
-#define V8_WASM_SWITCH_LOGIC_H
+#ifndef V8_ASMJS_SWITCH_LOGIC_H
+#define V8_ASMJS_SWITCH_LOGIC_H
#include "src/globals.h"
#include "src/zone/zone-containers.h"
@@ -25,8 +25,9 @@ struct CaseNode : public ZoneObject {
};
V8_EXPORT_PRIVATE CaseNode* OrderCases(ZoneVector<int>* cases, Zone* zone);
+
} // namespace wasm
} // namespace internal
} // namespace v8
-#endif
+#endif // V8_ASMJS_SWITCH_LOGIC_H
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index c549798cd8..20a7b6c51e 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -63,6 +63,7 @@
#include "src/runtime/runtime.h"
#include "src/simulator.h" // For flushing instruction cache.
#include "src/snapshot/serializer-common.h"
+#include "src/string-search.h"
#include "src/wasm/wasm-external-refs.h"
// Include native regexp-macro-assembler.
@@ -90,6 +91,10 @@
#endif // Target architecture.
#endif // V8_INTERPRETED_REGEXP
+#ifdef V8_INTL_SUPPORT
+#include "src/intl.h"
+#endif // V8_INTL_SUPPORT
+
namespace v8 {
namespace internal {
@@ -139,11 +144,8 @@ const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
// Implementation of AssemblerBase
AssemblerBase::IsolateData::IsolateData(Isolate* isolate)
- : serializer_enabled_(isolate->serializer_enabled())
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
- ,
+ : serializer_enabled_(isolate->serializer_enabled()),
max_old_generation_size_(isolate->heap()->MaxOldGenerationSize())
-#endif
#if V8_TARGET_ARCH_X64
,
code_range_start_(
@@ -1553,6 +1555,14 @@ ExternalReference ExternalReference::libc_memcpy_function(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memcpy)));
}
+void* libc_memmove(void* dest, const void* src, size_t n) {
+ return memmove(dest, src, n);
+}
+
+ExternalReference ExternalReference::libc_memmove_function(Isolate* isolate) {
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memmove)));
+}
+
void* libc_memset(void* dest, int byte, size_t n) {
DCHECK_EQ(static_cast<char>(byte), byte);
return memset(dest, byte, n);
@@ -1562,6 +1572,42 @@ ExternalReference ExternalReference::libc_memset_function(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memset)));
}
+template <typename SubjectChar, typename PatternChar>
+ExternalReference ExternalReference::search_string_raw(Isolate* isolate) {
+ auto f = SearchStringRaw<SubjectChar, PatternChar>;
+ return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
+}
+
+ExternalReference ExternalReference::try_internalize_string_function(
+ Isolate* isolate) {
+ return ExternalReference(Redirect(
+ isolate, FUNCTION_ADDR(StringTable::LookupStringIfExists_NoAllocate)));
+}
+
+#ifdef V8_INTL_SUPPORT
+ExternalReference ExternalReference::intl_convert_one_byte_to_lower(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate, FUNCTION_ADDR(ConvertOneByteToLower)));
+}
+
+ExternalReference ExternalReference::intl_to_latin1_lower_table(
+ Isolate* isolate) {
+ uint8_t* ptr = const_cast<uint8_t*>(ToLatin1LowerTable());
+ return ExternalReference(reinterpret_cast<Address>(ptr));
+}
+#endif // V8_INTL_SUPPORT
+
+// Explicit instantiations for all combinations of 1- and 2-byte strings.
+template ExternalReference
+ExternalReference::search_string_raw<const uint8_t, const uint8_t>(Isolate*);
+template ExternalReference
+ExternalReference::search_string_raw<const uint8_t, const uc16>(Isolate*);
+template ExternalReference
+ExternalReference::search_string_raw<const uc16, const uint8_t>(Isolate*);
+template ExternalReference
+ExternalReference::search_string_raw<const uc16, const uc16>(Isolate*);
+
ExternalReference ExternalReference::page_flags(Page* page) {
return ExternalReference(reinterpret_cast<Address>(page) +
MemoryChunk::kFlagsOffset);
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index dd476d95c4..65976676b4 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -69,9 +69,7 @@ class AssemblerBase: public Malloced {
IsolateData(const IsolateData&) = default;
bool serializer_enabled_;
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
size_t max_old_generation_size_;
-#endif
#if V8_TARGET_ARCH_X64
Address code_range_start_;
#endif
@@ -325,7 +323,7 @@ class RelocInfo {
enum Mode {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
- CODE_TARGET, // Code target which is not any of the above.
+ CODE_TARGET,
CODE_TARGET_WITH_ID,
EMBEDDED_OBJECT,
// To relocate pointers into the wasm memory embedded in wasm code
@@ -820,6 +818,7 @@ class ExternalReference BASE_EMBEDDED {
static void SetUp();
+ // These functions must use the isolate in a thread-safe way.
typedef void* ExternalReferenceRedirector(Isolate* isolate, void* original,
Type type);
@@ -990,8 +989,19 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference libc_memchr_function(Isolate* isolate);
static ExternalReference libc_memcpy_function(Isolate* isolate);
+ static ExternalReference libc_memmove_function(Isolate* isolate);
static ExternalReference libc_memset_function(Isolate* isolate);
+ static ExternalReference try_internalize_string_function(Isolate* isolate);
+
+#ifdef V8_INTL_SUPPORT
+ static ExternalReference intl_convert_one_byte_to_lower(Isolate* isolate);
+ static ExternalReference intl_to_latin1_lower_table(Isolate* isolate);
+#endif // V8_INTL_SUPPORT
+
+ template <typename SubjectChar, typename PatternChar>
+ static ExternalReference search_string_raw(Isolate* isolate);
+
static ExternalReference page_flags(Page* page);
static ExternalReference ForDeoptEntry(Address entry);
diff --git a/deps/v8/src/ast/ast-expression-rewriter.cc b/deps/v8/src/ast/ast-expression-rewriter.cc
index d23612f2b4..a3ee43204a 100644
--- a/deps/v8/src/ast/ast-expression-rewriter.cc
+++ b/deps/v8/src/ast/ast-expression-rewriter.cc
@@ -31,7 +31,7 @@ void AstExpressionRewriter::VisitDeclarations(Declaration::List* declarations) {
void AstExpressionRewriter::VisitStatements(ZoneList<Statement*>* statements) {
for (int i = 0; i < statements->length(); i++) {
AST_REWRITE_LIST_ELEMENT(Statement, statements, i);
- // Not stopping when a jump statement is found.
+ if (statements->at(i)->IsJump()) break;
}
}
diff --git a/deps/v8/src/ast/ast-numbering.cc b/deps/v8/src/ast/ast-numbering.cc
index 24ccf79244..202b61b17f 100644
--- a/deps/v8/src/ast/ast-numbering.cc
+++ b/deps/v8/src/ast/ast-numbering.cc
@@ -563,6 +563,7 @@ void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
IncrementNodeCount();
DisableFullCodegenAndCrankshaft(kClassLiteral);
node->set_base_id(ReserveIdRange(ClassLiteral::num_ids()));
+ LanguageModeScope language_mode_scope(this, STRICT);
if (node->extends()) Visit(node->extends());
if (node->constructor()) Visit(node->constructor());
if (node->class_variable_proxy()) {
@@ -715,7 +716,7 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
node->set_dont_optimize_reason(dont_optimize_reason());
node->set_suspend_count(suspend_count_);
- if (FLAG_trace_opt) {
+ if (FLAG_trace_opt && !FLAG_turbo) {
if (disable_crankshaft_reason_ != kNoReason) {
// TODO(leszeks): This is a quick'n'dirty fix to allow the debug name of
// the function to be accessed in the below print. This DCHECK will fail
diff --git a/deps/v8/src/ast/ast-types.cc b/deps/v8/src/ast/ast-types.cc
index 9e14730c97..8ff1d88351 100644
--- a/deps/v8/src/ast/ast-types.cc
+++ b/deps/v8/src/ast/ast-types.cc
@@ -302,16 +302,13 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case FILLER_TYPE:
case ACCESS_CHECK_INFO_TYPE:
case INTERCEPTOR_INFO_TYPE:
- case CALL_HANDLER_INFO_TYPE:
case PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE:
case PROMISE_REACTION_JOB_INFO_TYPE:
case FUNCTION_TEMPLATE_INFO_TYPE:
case OBJECT_TEMPLATE_INFO_TYPE:
case ALLOCATION_MEMENTO_TYPE:
- case TYPE_FEEDBACK_INFO_TYPE:
case ALIASED_ARGUMENTS_ENTRY_TYPE:
case DEBUG_INFO_TYPE:
- case BREAK_POINT_INFO_TYPE:
case STACK_FRAME_INFO_TYPE:
case CELL_TYPE:
case WEAK_CELL_TYPE:
@@ -319,7 +316,10 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case TUPLE2_TYPE:
case TUPLE3_TYPE:
case CONTEXT_EXTENSION_TYPE:
- case CONSTANT_ELEMENTS_PAIR_TYPE:
+ case PADDING_TYPE_1:
+ case PADDING_TYPE_2:
+ case PADDING_TYPE_3:
+ case PADDING_TYPE_4:
UNREACHABLE();
return kNone;
}
diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc
index 74613c5eae..a304aa0e00 100644
--- a/deps/v8/src/ast/ast-value-factory.cc
+++ b/deps/v8/src/ast/ast-value-factory.cc
@@ -31,6 +31,7 @@
#include "src/char-predicates-inl.h"
#include "src/objects-inl.h"
#include "src/objects.h"
+#include "src/string-hasher.h"
#include "src/utils.h"
namespace v8 {
@@ -189,10 +190,8 @@ bool AstValue::BooleanValue() const {
case SYMBOL:
UNREACHABLE();
break;
- case NUMBER_WITH_DOT:
case NUMBER:
return DoubleToBoolean(number_);
- case SMI_WITH_DOT:
case SMI:
return smi_ != 0;
case BOOLEAN:
@@ -224,11 +223,9 @@ void AstValue::Internalize(Isolate* isolate) {
break;
}
break;
- case NUMBER_WITH_DOT:
case NUMBER:
set_value(isolate->factory()->NewNumber(number_, TENURED));
break;
- case SMI_WITH_DOT:
case SMI:
set_value(handle(Smi::FromInt(smi_), isolate));
break;
@@ -342,9 +339,8 @@ const AstValue* AstValueFactory::NewSymbol(AstSymbol symbol) {
return AddValue(value);
}
-
-const AstValue* AstValueFactory::NewNumber(double number, bool with_dot) {
- AstValue* value = new (zone_) AstValue(number, with_dot);
+const AstValue* AstValueFactory::NewNumber(double number) {
+ AstValue* value = new (zone_) AstValue(number);
return AddValue(value);
}
diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h
index 19452bc325..34e8b9e1c1 100644
--- a/deps/v8/src/ast/ast-value-factory.h
+++ b/deps/v8/src/ast/ast-value-factory.h
@@ -189,10 +189,6 @@ class AstValue : public ZoneObject {
bool IsNumber() const { return IsSmi() || IsHeapNumber(); }
- bool ContainsDot() const {
- return type_ == NUMBER_WITH_DOT || type_ == SMI_WITH_DOT;
- }
-
const AstRawString* AsString() const {
CHECK_EQ(STRING, type_);
return string_;
@@ -236,10 +232,8 @@ class AstValue : public ZoneObject {
bool BooleanValue() const;
- bool IsSmi() const { return type_ == SMI || type_ == SMI_WITH_DOT; }
- bool IsHeapNumber() const {
- return type_ == NUMBER || type_ == NUMBER_WITH_DOT;
- }
+ bool IsSmi() const { return type_ == SMI; }
+ bool IsHeapNumber() const { return type_ == NUMBER; }
bool IsFalse() const { return type_ == BOOLEAN && !bool_; }
bool IsTrue() const { return type_ == BOOLEAN && bool_; }
bool IsUndefined() const { return type_ == UNDEFINED; }
@@ -267,9 +261,7 @@ class AstValue : public ZoneObject {
STRING,
SYMBOL,
NUMBER,
- NUMBER_WITH_DOT,
SMI,
- SMI_WITH_DOT,
BOOLEAN,
NULL_TYPE,
UNDEFINED,
@@ -284,13 +276,13 @@ class AstValue : public ZoneObject {
symbol_ = symbol;
}
- explicit AstValue(double n, bool with_dot) : next_(nullptr) {
+ explicit AstValue(double n) : next_(nullptr) {
int int_value;
if (DoubleToSmiInteger(n, &int_value)) {
- type_ = with_dot ? SMI_WITH_DOT : SMI;
+ type_ = SMI;
smi_ = int_value;
} else {
- type_ = with_dot ? NUMBER_WITH_DOT : NUMBER;
+ type_ = NUMBER;
number_ = n;
}
}
@@ -481,8 +473,7 @@ class AstValueFactory {
V8_EXPORT_PRIVATE const AstValue* NewString(const AstRawString* string);
// A JavaScript symbol (ECMA-262 edition 6).
const AstValue* NewSymbol(AstSymbol symbol);
- V8_EXPORT_PRIVATE const AstValue* NewNumber(double number,
- bool with_dot = false);
+ V8_EXPORT_PRIVATE const AstValue* NewNumber(double number);
const AstValue* NewSmi(uint32_t number);
const AstValue* NewBoolean(bool b);
const AstValue* NewStringList(ZoneList<const AstRawString*>* strings);
diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc
index d7d70ae433..b367df7dae 100644
--- a/deps/v8/src/ast/ast.cc
+++ b/deps/v8/src/ast/ast.cc
@@ -19,6 +19,7 @@
#include "src/elements.h"
#include "src/objects-inl.h"
#include "src/objects/literal-objects.h"
+#include "src/objects/map.h"
#include "src/property-details.h"
#include "src/property.h"
#include "src/string-stream.h"
@@ -147,8 +148,8 @@ bool Expression::IsValidReferenceExpressionOrThis() const {
bool Expression::IsAnonymousFunctionDefinition() const {
return (IsFunctionLiteral() &&
AsFunctionLiteral()->IsAnonymousFunctionDefinition()) ||
- (IsDoExpression() &&
- AsDoExpression()->IsAnonymousFunctionDefinition());
+ (IsClassLiteral() &&
+ AsClassLiteral()->IsAnonymousFunctionDefinition());
}
void Expression::MarkTail() {
@@ -161,12 +162,6 @@ void Expression::MarkTail() {
}
}
-bool DoExpression::IsAnonymousFunctionDefinition() const {
- // This is specifically to allow DoExpressions to represent ClassLiterals.
- return represented_function_ != nullptr &&
- represented_function_->raw_name()->IsEmpty();
-}
-
bool Statement::IsJump() const {
switch (node_type()) {
#define JUMP_NODE_LIST(V) \
@@ -350,6 +345,23 @@ bool FunctionLiteral::NeedsHomeObject(Expression* expr) {
return expr->AsFunctionLiteral()->scope()->NeedsHomeObject();
}
+void FunctionLiteral::ReplaceBodyAndScope(FunctionLiteral* other) {
+ DCHECK_NULL(body_);
+ DCHECK_NOT_NULL(scope_);
+ DCHECK_NOT_NULL(other->scope());
+
+ Scope* outer_scope = scope_->outer_scope();
+
+ body_ = other->body();
+ scope_ = other->scope();
+ scope_->ReplaceOuterScope(outer_scope);
+#ifdef DEBUG
+ scope_->set_replaced_from_parse_task(true);
+#endif
+
+ function_length_ = other->function_length_;
+}
+
ObjectLiteralProperty::ObjectLiteralProperty(Expression* key, Expression* value,
Kind kind, bool is_computed_name)
: LiteralProperty(key, value, is_computed_name),
@@ -490,7 +502,7 @@ void ObjectLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
ObjectLiteral::Property* property = properties()->at(property_index);
Expression* value = property->value();
- if (property->kind() != ObjectLiteral::Property::PROTOTYPE) {
+ if (!property->IsPrototype()) {
if (FunctionLiteral::NeedsHomeObject(value)) {
property->SetSlot(spec->AddStoreICSlot(language_mode));
}
@@ -512,7 +524,7 @@ void ObjectLiteral::CalculateEmitStore(Zone* zone) {
for (int i = properties()->length() - 1; i >= 0; i--) {
ObjectLiteral::Property* property = properties()->at(i);
if (property->is_computed_name()) continue;
- if (property->kind() == ObjectLiteral::Property::PROTOTYPE) continue;
+ if (property->IsPrototype()) continue;
Literal* literal = property->key()->AsLiteral();
DCHECK(!literal->IsNullLiteral());
@@ -532,31 +544,42 @@ void ObjectLiteral::CalculateEmitStore(Zone* zone) {
}
}
-
-bool ObjectLiteral::IsBoilerplateProperty(ObjectLiteral::Property* property) {
- return property != NULL &&
- property->kind() != ObjectLiteral::Property::PROTOTYPE;
+void ObjectLiteral::InitFlagsForPendingNullPrototype(int i) {
+ // We still check for __proto__:null after computed property names.
+ for (; i < properties()->length(); i++) {
+ if (properties()->at(i)->IsNullPrototype()) {
+ set_has_null_protoype(true);
+ break;
+ }
+ }
}
void ObjectLiteral::InitDepthAndFlags() {
- if (depth_ > 0) return;
-
- int position = 0;
- // Accumulate the value in local variables and store it at the end.
+ if (is_initialized()) return;
bool is_simple = true;
+ bool has_seen_prototype = false;
int depth_acc = 1;
- uint32_t max_element_index = 0;
+ uint32_t nof_properties = 0;
uint32_t elements = 0;
+ uint32_t max_element_index = 0;
for (int i = 0; i < properties()->length(); i++) {
ObjectLiteral::Property* property = properties()->at(i);
- if (!IsBoilerplateProperty(property)) {
+ if (property->IsPrototype()) {
+ has_seen_prototype = true;
+ // __proto__:null has no side-effects and is set directly on the
+ // boilerplate.
+ if (property->IsNullPrototype()) {
+ set_has_null_protoype(true);
+ continue;
+ }
+ DCHECK(!has_null_prototype());
is_simple = false;
continue;
}
-
- if (static_cast<uint32_t>(position) == boilerplate_properties_ * 2) {
+ if (nof_properties == boilerplate_properties_) {
DCHECK(property->is_computed_name());
is_simple = false;
+ if (!has_seen_prototype) InitFlagsForPendingNullPrototype(i);
break;
}
DCHECK(!property->is_computed_name());
@@ -578,7 +601,7 @@ void ObjectLiteral::InitDepthAndFlags() {
// TODO(verwaest): Remove once we can store them inline.
if (FLAG_track_double_fields &&
(value->IsNumberLiteral() || !is_compile_time_value)) {
- bit_field_ = MayStoreDoublesField::update(bit_field_, true);
+ set_may_store_doubles(true);
}
is_simple = is_simple && is_compile_time_value;
@@ -596,15 +619,12 @@ void ObjectLiteral::InitDepthAndFlags() {
elements++;
}
- // Increment the position for the key and the value.
- position += 2;
+ nof_properties++;
}
- bit_field_ = FastElementsField::update(
- bit_field_,
- (max_element_index <= 32) || ((2 * elements) >= max_element_index));
- bit_field_ = HasElementsField::update(bit_field_, elements > 0);
-
+ set_fast_elements((max_element_index <= 32) ||
+ ((2 * elements) >= max_element_index));
+ set_has_elements(elements > 0);
set_is_simple(is_simple);
set_depth(depth_acc);
}
@@ -616,7 +636,7 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
bool has_seen_proto = false;
for (int i = 0; i < properties()->length(); i++) {
ObjectLiteral::Property* property = properties()->at(i);
- if (!IsBoilerplateProperty(property)) {
+ if (property->IsPrototype()) {
has_seen_proto = true;
continue;
}
@@ -641,9 +661,7 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
int position = 0;
for (int i = 0; i < properties()->length(); i++) {
ObjectLiteral::Property* property = properties()->at(i);
- if (!IsBoilerplateProperty(property)) {
- continue;
- }
+ if (property->IsPrototype()) continue;
if (static_cast<uint32_t>(position) == boilerplate_properties_ * 2) {
DCHECK(property->is_computed_name());
@@ -693,7 +711,7 @@ ElementsKind ArrayLiteral::constant_elements_kind() const {
void ArrayLiteral::InitDepthAndFlags() {
DCHECK_LT(first_spread_index_, 0);
- if (depth_ > 0) return;
+ if (is_initialized()) return;
int constants_length = values()->length();
@@ -1013,6 +1031,24 @@ void Expression::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
}
}
+void SmallMapList::AddMapIfMissing(Handle<Map> map, Zone* zone) {
+ if (!Map::TryUpdate(map).ToHandle(&map)) return;
+ for (int i = 0; i < length(); ++i) {
+ if (at(i).is_identical_to(map)) return;
+ }
+ Add(map, zone);
+}
+
+void SmallMapList::FilterForPossibleTransitions(Map* root_map) {
+ for (int i = list_.length() - 1; i >= 0; i--) {
+ if (at(i)->FindRootMap() != root_map) {
+ list_.RemoveElement(list_.at(i));
+ }
+ }
+}
+
+Handle<Map> SmallMapList::at(int i) const { return Handle<Map>(list_.at(i)); }
+
SmallMapList* Expression::GetReceiverTypes() {
switch (node_type()) {
#define NODE_LIST(V) \
diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h
index 9d7b4de82c..0fc9af621c 100644
--- a/deps/v8/src/ast/ast.h
+++ b/deps/v8/src/ast/ast.h
@@ -252,29 +252,15 @@ class SmallMapList final {
bool is_empty() const { return list_.is_empty(); }
int length() const { return list_.length(); }
- void AddMapIfMissing(Handle<Map> map, Zone* zone) {
- if (!Map::TryUpdate(map).ToHandle(&map)) return;
- for (int i = 0; i < length(); ++i) {
- if (at(i).is_identical_to(map)) return;
- }
- Add(map, zone);
- }
+ void AddMapIfMissing(Handle<Map> map, Zone* zone);
- void FilterForPossibleTransitions(Map* root_map) {
- for (int i = list_.length() - 1; i >= 0; i--) {
- if (at(i)->FindRootMap() != root_map) {
- list_.RemoveElement(list_.at(i));
- }
- }
- }
+ void FilterForPossibleTransitions(Map* root_map);
void Add(Handle<Map> handle, Zone* zone) {
list_.Add(handle.location(), zone);
}
- Handle<Map> at(int i) const {
- return Handle<Map>(list_.at(i));
- }
+ Handle<Map> at(int i) const;
Handle<Map> first() const { return at(0); }
Handle<Map> last() const { return at(length() - 1); }
@@ -484,20 +470,12 @@ class DoExpression final : public Expression {
void set_block(Block* b) { block_ = b; }
VariableProxy* result() { return result_; }
void set_result(VariableProxy* v) { result_ = v; }
- FunctionLiteral* represented_function() { return represented_function_; }
- void set_represented_function(FunctionLiteral* f) {
- represented_function_ = f;
- }
- bool IsAnonymousFunctionDefinition() const;
private:
friend class AstNodeFactory;
DoExpression(Block* block, VariableProxy* result, int pos)
- : Expression(pos, kDoExpression),
- block_(block),
- result_(result),
- represented_function_(nullptr) {
+ : Expression(pos, kDoExpression), block_(block), result_(result) {
DCHECK_NOT_NULL(block_);
DCHECK_NOT_NULL(result_);
}
@@ -506,7 +484,6 @@ class DoExpression final : public Expression {
Block* block_;
VariableProxy* result_;
- FunctionLiteral* represented_function_;
};
@@ -977,11 +954,11 @@ class CaseClause final : public Expression {
CaseClause(Expression* label, ZoneList<Statement*>* statements, int pos);
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+ FeedbackSlot feedback_slot_;
Expression* label_;
Label body_target_;
ZoneList<Statement*>* statements_;
AstType* compare_type_;
- FeedbackSlot feedback_slot_;
};
@@ -1238,9 +1215,9 @@ class Literal final : public Expression {
// Base class for literals that need space in the type feedback vector.
class MaterializedLiteral : public Expression {
public:
+ bool is_initialized() const { return 0 < depth_; }
int depth() const {
- // only callable after initialization.
- DCHECK(depth_ >= 1);
+ DCHECK(is_initialized());
return depth_;
}
@@ -1270,10 +1247,11 @@ class MaterializedLiteral : public Expression {
void set_is_simple(bool is_simple) {
bit_field_ = IsSimpleField::update(bit_field_, is_simple);
}
+
friend class CompileTimeValue;
void set_depth(int depth) {
- DCHECK_LE(1, depth);
+ DCHECK(!is_initialized());
depth_ = depth;
}
@@ -1359,6 +1337,11 @@ class ObjectLiteralProperty final : public LiteralProperty {
void set_receiver_type(Handle<Map> map) { receiver_type_ = map; }
+ bool IsNullPrototype() const {
+ return IsPrototype() && value()->IsNullLiteral();
+ }
+ bool IsPrototype() const { return kind() == PROTOTYPE; }
+
private:
friend class AstNodeFactory;
@@ -1396,9 +1379,9 @@ class ObjectLiteral final : public MaterializedLiteral {
bool has_rest_property() const {
return HasRestPropertyField::decode(bit_field_);
}
-
- // Decide if a property should be in the object boilerplate.
- static bool IsBoilerplateProperty(Property* property);
+ bool has_null_prototype() const {
+ return HasNullPrototypeField::decode(bit_field_);
+ }
// Populate the depth field and flags.
void InitDepthAndFlags();
@@ -1426,12 +1409,16 @@ class ObjectLiteral final : public MaterializedLiteral {
// Assemble bitfield of flags for the CreateObjectLiteral helper.
int ComputeFlags(bool disable_mementos = false) const {
int flags = fast_elements() ? kFastElements : kNoFlags;
- if (has_shallow_properties()) {
- flags |= kShallowProperties;
- }
- if (disable_mementos) {
- flags |= kDisableMementos;
- }
+ if (has_shallow_properties()) flags |= kShallowProperties;
+ if (disable_mementos) flags |= kDisableMementos;
+ if (has_null_prototype()) flags |= kHasNullPrototype;
+ return flags;
+ }
+
+ int EncodeLiteralType() {
+ int flags = fast_elements() ? kFastElements : kNoFlags;
+ if (has_shallow_properties()) flags |= kShallowProperties;
+ if (has_null_prototype()) flags |= kHasNullPrototype;
return flags;
}
@@ -1440,7 +1427,7 @@ class ObjectLiteral final : public MaterializedLiteral {
kFastElements = 1,
kShallowProperties = 1 << 1,
kDisableMementos = 1 << 2,
- kHasRestProperty = 1 << 3,
+ kHasNullPrototype = 1 << 3,
};
struct Accessors: public ZoneObject {
@@ -1476,12 +1463,28 @@ class ObjectLiteral final : public MaterializedLiteral {
bit_field_ |= FastElementsField::encode(false) |
HasElementsField::encode(false) |
MayStoreDoublesField::encode(false) |
- HasRestPropertyField::encode(has_rest_property);
+ HasRestPropertyField::encode(has_rest_property) |
+ HasNullPrototypeField::encode(false);
}
static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+ void InitFlagsForPendingNullPrototype(int i);
+
+ void set_may_store_doubles(bool may_store_doubles) {
+ bit_field_ = MayStoreDoublesField::update(bit_field_, may_store_doubles);
+ }
+ void set_fast_elements(bool fast_elements) {
+ bit_field_ = FastElementsField::update(bit_field_, fast_elements);
+ }
+ void set_has_elements(bool has_elements) {
+ bit_field_ = HasElementsField::update(bit_field_, has_elements);
+ }
+ void set_has_null_protoype(bool has_null_prototype) {
+ bit_field_ = HasNullPrototypeField::update(bit_field_, has_null_prototype);
+ }
+
uint32_t boilerplate_properties_;
Handle<BoilerplateDescription> constant_properties_;
ZoneList<Property*>* properties_;
@@ -1494,6 +1497,8 @@ class ObjectLiteral final : public MaterializedLiteral {
: public BitField<bool, HasElementsField::kNext, 1> {};
class HasRestPropertyField
: public BitField<bool, MayStoreDoublesField::kNext, 1> {};
+ class HasNullPrototypeField
+ : public BitField<bool, HasRestPropertyField::kNext, 1> {};
};
@@ -1582,9 +1587,7 @@ class ArrayLiteral final : public MaterializedLiteral {
// Assemble bitfield of flags for the CreateArrayLiteral helper.
int ComputeFlags(bool disable_mementos = false) const {
int flags = depth() == 1 ? kShallowElements : kNoFlags;
- if (disable_mementos) {
- flags |= kDisableMementos;
- }
+ if (disable_mementos) flags |= kDisableMementos;
return flags;
}
@@ -1668,7 +1671,11 @@ class VariableProxy final : public Expression {
}
HoleCheckMode hole_check_mode() const {
- return HoleCheckModeField::decode(bit_field_);
+ HoleCheckMode mode = HoleCheckModeField::decode(bit_field_);
+ DCHECK_IMPLIES(mode == HoleCheckMode::kRequired,
+ var()->binding_needs_init() ||
+ var()->local_if_not_shadowed()->binding_needs_init());
+ return mode;
}
void set_needs_hole_check() {
bit_field_ =
@@ -2160,10 +2167,10 @@ class BinaryOperation final : public Expression {
BinaryOperation(Token::Value op, Expression* left, Expression* right, int pos)
: Expression(pos, kBinaryOperation),
- has_fixed_right_arg_(false),
- fixed_right_arg_value_(0),
left_(left),
- right_(right) {
+ right_(right),
+ has_fixed_right_arg_(false),
+ fixed_right_arg_value_(0) {
bit_field_ |= OperatorField::encode(op);
DCHECK(Token::IsBinaryOp(op));
}
@@ -2171,14 +2178,14 @@ class BinaryOperation final : public Expression {
static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+ FeedbackSlot feedback_slot_;
+ Expression* left_;
+ Expression* right_;
+ Handle<AllocationSite> allocation_site_;
// TODO(rossberg): the fixed arg should probably be represented as a Constant
// type for the RHS. Currenty it's actually a Maybe<int>
bool has_fixed_right_arg_;
int fixed_right_arg_value_;
- Expression* left_;
- Expression* right_;
- Handle<AllocationSite> allocation_site_;
- FeedbackSlot feedback_slot_;
class OperatorField
: public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
@@ -2306,11 +2313,11 @@ class CompareOperation final : public Expression {
static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
+ FeedbackSlot feedback_slot_;
Expression* left_;
Expression* right_;
-
AstType* combined_type_;
- FeedbackSlot feedback_slot_;
+
class OperatorField
: public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
};
@@ -2645,6 +2652,14 @@ class FunctionLiteral final : public Expression {
bool AllowsLazyCompilation();
+ bool CanSuspend() {
+ if (suspend_count() > 0) {
+ DCHECK(IsResumableFunction(kind()));
+ return true;
+ }
+ return false;
+ }
+
Handle<String> debug_name() const {
if (raw_name_ != NULL && !raw_name_->IsEmpty()) {
return raw_name_->string();
@@ -2742,6 +2757,8 @@ class FunctionLiteral final : public Expression {
function_literal_id_ = function_literal_id;
}
+ void ReplaceBodyAndScope(FunctionLiteral* other);
+
private:
friend class AstNodeFactory;
@@ -2827,6 +2844,7 @@ class ClassLiteral final : public Expression {
public:
typedef ClassLiteralProperty Property;
+ Scope* scope() const { return scope_; }
VariableProxy* class_variable_proxy() const { return class_variable_proxy_; }
Expression* extends() const { return extends_; }
void set_extends(Expression* e) { extends_ = e; }
@@ -2842,6 +2860,13 @@ class ClassLiteral final : public Expression {
return HasStaticComputedNames::decode(bit_field_);
}
+ bool is_anonymous_expression() const {
+ return IsAnonymousExpression::decode(bit_field_);
+ }
+ bool IsAnonymousFunctionDefinition() const {
+ return is_anonymous_expression();
+ }
+
// Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects.
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
@@ -2858,23 +2883,27 @@ class ClassLiteral final : public Expression {
private:
friend class AstNodeFactory;
- ClassLiteral(VariableProxy* class_variable_proxy, Expression* extends,
- FunctionLiteral* constructor, ZoneList<Property*>* properties,
- int start_position, int end_position,
- bool has_name_static_property, bool has_static_computed_names)
+ ClassLiteral(Scope* scope, VariableProxy* class_variable_proxy,
+ Expression* extends, FunctionLiteral* constructor,
+ ZoneList<Property*>* properties, int start_position,
+ int end_position, bool has_name_static_property,
+ bool has_static_computed_names, bool is_anonymous)
: Expression(start_position, kClassLiteral),
end_position_(end_position),
+ scope_(scope),
class_variable_proxy_(class_variable_proxy),
extends_(extends),
constructor_(constructor),
properties_(properties) {
bit_field_ |= HasNameStaticProperty::encode(has_name_static_property) |
- HasStaticComputedNames::encode(has_static_computed_names);
+ HasStaticComputedNames::encode(has_static_computed_names) |
+ IsAnonymousExpression::encode(is_anonymous);
}
int end_position_;
FeedbackSlot home_object_slot_;
FeedbackSlot proxy_slot_;
+ Scope* scope_;
VariableProxy* class_variable_proxy_;
Expression* extends_;
FunctionLiteral* constructor_;
@@ -2884,6 +2913,8 @@ class ClassLiteral final : public Expression {
: public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
class HasStaticComputedNames
: public BitField<bool, HasNameStaticProperty::kNext, 1> {};
+ class IsAnonymousExpression
+ : public BitField<bool, HasStaticComputedNames::kNext, 1> {};
};
@@ -2909,9 +2940,9 @@ class NativeFunctionLiteral final : public Expression {
name_(name),
extension_(extension) {}
+ FeedbackSlot literal_feedback_slot_;
const AstRawString* name_;
v8::Extension* extension_;
- FeedbackSlot literal_feedback_slot_;
};
@@ -3211,17 +3242,12 @@ class AstVisitor BASE_EMBEDDED {
class AstNodeFactory final BASE_EMBEDDED {
public:
- explicit AstNodeFactory(AstValueFactory* ast_value_factory)
- : zone_(nullptr), ast_value_factory_(ast_value_factory) {
- if (ast_value_factory != nullptr) {
- zone_ = ast_value_factory->zone();
- }
- }
+ AstNodeFactory(AstValueFactory* ast_value_factory, Zone* zone)
+ : zone_(zone), ast_value_factory_(ast_value_factory) {}
AstValueFactory* ast_value_factory() const { return ast_value_factory_; }
void set_ast_value_factory(AstValueFactory* ast_value_factory) {
ast_value_factory_ = ast_value_factory;
- zone_ = ast_value_factory->zone();
}
VariableDeclaration* NewVariableDeclaration(VariableProxy* proxy,
@@ -3370,9 +3396,8 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) Literal(ast_value_factory_->NewSymbol(symbol), pos);
}
- Literal* NewNumberLiteral(double number, int pos, bool with_dot = false) {
- return new (zone_)
- Literal(ast_value_factory_->NewNumber(number, with_dot), pos);
+ Literal* NewNumberLiteral(double number, int pos) {
+ return new (zone_) Literal(ast_value_factory_->NewNumber(number), pos);
}
Literal* NewSmiLiteral(uint32_t number, int pos) {
@@ -3590,15 +3615,18 @@ class AstNodeFactory final BASE_EMBEDDED {
ClassLiteral::Property(key, value, kind, is_static, is_computed_name);
}
- ClassLiteral* NewClassLiteral(VariableProxy* proxy, Expression* extends,
+ ClassLiteral* NewClassLiteral(Scope* scope, VariableProxy* proxy,
+ Expression* extends,
FunctionLiteral* constructor,
ZoneList<ClassLiteral::Property*>* properties,
int start_position, int end_position,
bool has_name_static_property,
- bool has_static_computed_names) {
- return new (zone_) ClassLiteral(
- proxy, extends, constructor, properties, start_position, end_position,
- has_name_static_property, has_static_computed_names);
+ bool has_static_computed_names,
+ bool is_anonymous) {
+ return new (zone_)
+ ClassLiteral(scope, proxy, extends, constructor, properties,
+ start_position, end_position, has_name_static_property,
+ has_static_computed_names, is_anonymous);
}
NativeFunctionLiteral* NewNativeFunctionLiteral(const AstRawString* name,
@@ -3646,24 +3674,6 @@ class AstNodeFactory final BASE_EMBEDDED {
Zone* zone() const { return zone_; }
void set_zone(Zone* zone) { zone_ = zone; }
- // Handles use of temporary zones when parsing inner function bodies.
- class BodyScope {
- public:
- BodyScope(AstNodeFactory* factory, Zone* temp_zone, bool use_temp_zone)
- : factory_(factory), prev_zone_(factory->zone_) {
- if (use_temp_zone) {
- factory->zone_ = temp_zone;
- }
- }
-
- void Reset() { factory_->zone_ = prev_zone_; }
- ~BodyScope() { Reset(); }
-
- private:
- AstNodeFactory* factory_;
- Zone* prev_zone_;
- };
-
private:
// This zone may be deallocated upon returning from parsing a function body
// which we can guarantee is not going to be compiled or have its AST
diff --git a/deps/v8/src/ast/compile-time-value.cc b/deps/v8/src/ast/compile-time-value.cc
index 27dd29fee0..b86343d059 100644
--- a/deps/v8/src/ast/compile-time-value.cc
+++ b/deps/v8/src/ast/compile-time-value.cc
@@ -24,28 +24,24 @@ Handle<FixedArray> CompileTimeValue::GetValue(Isolate* isolate,
Factory* factory = isolate->factory();
DCHECK(IsCompileTimeValue(expression));
Handle<FixedArray> result = factory->NewFixedArray(2, TENURED);
- ObjectLiteral* object_literal = expression->AsObjectLiteral();
- if (object_literal != NULL) {
+ if (expression->IsObjectLiteral()) {
+ ObjectLiteral* object_literal = expression->AsObjectLiteral();
DCHECK(object_literal->is_simple());
- if (object_literal->fast_elements()) {
- result->set(kLiteralTypeSlot, Smi::FromInt(OBJECT_LITERAL_FAST_ELEMENTS));
- } else {
- result->set(kLiteralTypeSlot, Smi::FromInt(OBJECT_LITERAL_SLOW_ELEMENTS));
- }
+ int literalTypeFlag = object_literal->EncodeLiteralType();
+ DCHECK_NE(kArrayLiteralFlag, literalTypeFlag);
+ result->set(kLiteralTypeSlot, Smi::FromInt(literalTypeFlag));
result->set(kElementsSlot, *object_literal->constant_properties());
} else {
ArrayLiteral* array_literal = expression->AsArrayLiteral();
DCHECK(array_literal != NULL && array_literal->is_simple());
- result->set(kLiteralTypeSlot, Smi::FromInt(ARRAY_LITERAL));
+ result->set(kLiteralTypeSlot, Smi::FromInt(kArrayLiteralFlag));
result->set(kElementsSlot, *array_literal->constant_elements());
}
return result;
}
-CompileTimeValue::LiteralType CompileTimeValue::GetLiteralType(
- Handle<FixedArray> value) {
- Smi* literal_type = Smi::cast(value->get(kLiteralTypeSlot));
- return static_cast<LiteralType>(literal_type->value());
+int CompileTimeValue::GetLiteralTypeFlags(Handle<FixedArray> value) {
+ return Smi::cast(value->get(kLiteralTypeSlot))->value();
}
Handle<HeapObject> CompileTimeValue::GetElements(Handle<FixedArray> value) {
diff --git a/deps/v8/src/ast/compile-time-value.h b/deps/v8/src/ast/compile-time-value.h
index d61443e583..e8ded43122 100644
--- a/deps/v8/src/ast/compile-time-value.h
+++ b/deps/v8/src/ast/compile-time-value.h
@@ -17,19 +17,20 @@ class Expression;
// can be fully handled at compile time.
class CompileTimeValue : public AllStatic {
public:
- enum LiteralType {
- OBJECT_LITERAL_FAST_ELEMENTS,
- OBJECT_LITERAL_SLOW_ELEMENTS,
- ARRAY_LITERAL
- };
+ // This is a special marker used to encode array literals. The value has to be
+ // different from any value possibly returned by
+ // ObjectLiteral::EncodeLiteralType.
+ static const int kArrayLiteralFlag = -1;
static bool IsCompileTimeValue(Expression* expression);
// Get the value as a compile time value.
static Handle<FixedArray> GetValue(Isolate* isolate, Expression* expression);
- // Get the type of a compile time value returned by GetValue().
- static LiteralType GetLiteralType(Handle<FixedArray> value);
+ // Get the encoded literal type. This can either be kArrayLiteralFlag or
+ // encoded properties of an ObjectLiteral returned by
+ // ObjectLiteral::EncodeLiteralType.
+ static int GetLiteralTypeFlags(Handle<FixedArray> value);
// Get the elements of a compile time value returned by GetValue().
static Handle<HeapObject> GetElements(Handle<FixedArray> value);
diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc
index 99be5cd343..f4c21d7513 100644
--- a/deps/v8/src/ast/scopes.cc
+++ b/deps/v8/src/ast/scopes.cc
@@ -612,7 +612,7 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
Variable* var = DeclareVariableName(name, VAR);
if (var != kDummyPreParserVariable &&
var != kDummyPreParserLexicalVariable) {
- DCHECK(FLAG_preparser_scope_analysis);
+ DCHECK(FLAG_experimental_preparser_scope_analysis);
var->set_maybe_assigned();
}
}
@@ -644,7 +644,7 @@ void DeclarationScope::Analyze(ParseInfo* info, Isolate* isolate,
}
if (scope->is_eval_scope() && is_sloppy(scope->language_mode())) {
- AstNodeFactory factory(info->ast_value_factory());
+ AstNodeFactory factory(info->ast_value_factory(), info->zone());
scope->HoistSloppyBlockFunctions(&factory);
}
@@ -662,7 +662,7 @@ void DeclarationScope::Analyze(ParseInfo* info, Isolate* isolate,
scope->set_should_eager_compile();
if (scope->must_use_preparsed_scope_data_) {
- DCHECK(FLAG_preparser_scope_analysis);
+ DCHECK(FLAG_experimental_preparser_scope_analysis);
DCHECK_NOT_NULL(info->preparsed_scope_data());
DCHECK_EQ(scope->scope_type_, ScopeType::FUNCTION_SCOPE);
info->preparsed_scope_data()->RestoreData(scope);
@@ -1044,7 +1044,7 @@ Variable* DeclarationScope::DeclareParameterName(
if (name == ast_value_factory->arguments_string()) {
has_arguments_parameter_ = true;
}
- if (FLAG_preparser_scope_analysis) {
+ if (FLAG_experimental_preparser_scope_analysis) {
Variable* var = Declare(zone(), name, VAR);
params_.Add(var, zone());
return var;
@@ -1205,7 +1205,7 @@ Variable* Scope::DeclareVariableName(const AstRawString* name,
DCHECK(scope_info_.is_null());
// Declare the variable in the declaration scope.
- if (FLAG_preparser_scope_analysis) {
+ if (FLAG_experimental_preparser_scope_analysis) {
Variable* var = LookupLocal(name);
DCHECK_NE(var, kDummyPreParserLexicalVariable);
DCHECK_NE(var, kDummyPreParserVariable);
@@ -1332,7 +1332,7 @@ Declaration* Scope::CheckLexDeclarationsConflictingWith(
void DeclarationScope::AllocateVariables(ParseInfo* info, Isolate* isolate,
AnalyzeMode mode) {
// Module variables must be allocated before variable resolution
- // to ensure that AccessNeedsHoleCheck() can detect import variables.
+ // to ensure that UpdateNeedsHoleCheck() can detect import variables.
if (is_module_scope()) AsModuleScope()->AllocateModuleVariables();
ResolveVariablesRecursively(info);
@@ -1371,9 +1371,10 @@ bool Scope::AllowsLazyParsingWithoutUnresolvedVariables(
if (s->is_catch_scope()) continue;
// With scopes do not introduce variables that need allocation.
if (s->is_with_scope()) continue;
- // If everything is guaranteed to be context allocated we can ignore the
- // scope.
- if (s->has_forced_context_allocation()) continue;
+ // Module scopes context-allocate all variables, and have no
+ // {this} or {arguments} variables whose existence depends on
+ // references to them.
+ if (s->is_module_scope()) continue;
// Only block scopes and function scopes should disallow preparsing.
DCHECK(s->is_block_scope() || s->is_function_scope());
return false;
@@ -1407,19 +1408,6 @@ int Scope::ContextChainLengthUntilOutermostSloppyEval() const {
return result;
}
-int Scope::MaxNestedContextChainLength() {
- int max_context_chain_length = 0;
- for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
- if (scope->is_function_scope()) continue;
- max_context_chain_length = std::max(scope->MaxNestedContextChainLength(),
- max_context_chain_length);
- }
- if (NeedsContext()) {
- max_context_chain_length += 1;
- }
- return max_context_chain_length;
-}
-
DeclarationScope* Scope::GetDeclarationScope() {
Scope* scope = this;
while (!scope->is_declaration_scope()) {
@@ -1506,6 +1494,7 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
inner_scope_ = nullptr;
unresolved_ = nullptr;
sloppy_block_function_map_ = nullptr;
+ rare_data_ = nullptr;
if (aborted) {
// Prepare scope for use in the outer zone.
@@ -1552,7 +1541,8 @@ void DeclarationScope::AnalyzePartially(
arguments_ = nullptr;
}
- if (FLAG_preparser_scope_analysis && preparsed_scope_data->Producing()) {
+ if (FLAG_experimental_preparser_scope_analysis &&
+ preparsed_scope_data->Producing()) {
// Store the information needed for allocating the locals of this scope
// and its inner scopes.
preparsed_scope_data->SaveData(this);
@@ -1639,6 +1629,12 @@ void PrintVar(int indent, Variable* var) {
if (var->maybe_assigned() == kNotAssigned) {
if (comma) PrintF(", ");
PrintF("never assigned");
+ comma = true;
+ }
+ if (var->initialization_flag() == kNeedsInitialization &&
+ !var->binding_needs_init()) {
+ if (comma) PrintF(", ");
+ PrintF("hole initialization elided");
}
PrintF("\n");
}
@@ -1798,7 +1794,9 @@ void Scope::CheckZones() {
DCHECK_NULL(scope->inner_scope_);
continue;
}
- CHECK_EQ(scope->zone(), zone());
+ if (!scope->replaced_from_parse_task()) {
+ CHECK_EQ(scope->zone(), zone());
+ }
scope->CheckZones();
}
}
@@ -1910,25 +1908,28 @@ void Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy) {
namespace {
-bool AccessNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
+void SetNeedsHoleCheck(Variable* var, VariableProxy* proxy) {
+ proxy->set_needs_hole_check();
+ var->ForceHoleInitialization();
+}
+
+void UpdateNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
if (var->mode() == DYNAMIC_LOCAL) {
// Dynamically introduced variables never need a hole check (since they're
// VAR bindings, either from var or function declarations), but the variable
// they shadow might need a hole check, which we want to do if we decide
// that no shadowing variable was dynamically introoduced.
- DCHECK(!var->binding_needs_init());
- return AccessNeedsHoleCheck(var->local_if_not_shadowed(), proxy, scope);
+ DCHECK_EQ(kCreatedInitialized, var->initialization_flag());
+ return UpdateNeedsHoleCheck(var->local_if_not_shadowed(), proxy, scope);
}
- if (!var->binding_needs_init()) {
- return false;
- }
+ if (var->initialization_flag() == kCreatedInitialized) return;
// It's impossible to eliminate module import hole checks here, because it's
// unknown at compilation time whether the binding referred to in the
// exporting module itself requires hole checks.
if (var->location() == VariableLocation::MODULE && !var->IsExport()) {
- return true;
+ return SetNeedsHoleCheck(var, proxy);
}
// Check if the binding really needs an initialization check. The check
@@ -1939,7 +1940,7 @@ bool AccessNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
// the source physically located after the initializer of the variable,
// and that the initializer cannot be skipped due to a nonlinear scope.
//
- // The condition on the declaration scopes is a conservative check for
+ // The condition on the closure scopes is a conservative check for
// nested functions that access a binding and are called before the
// binding is initialized:
// function() { f(); let x = 1; function f() { x = 2; } }
@@ -1949,22 +1950,24 @@ bool AccessNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
// switch (1) { case 0: let x = 2; case 1: f(x); }
// The scope of the variable needs to be checked, in case the use is
// in a sub-block which may be linear.
- if (var->scope()->GetDeclarationScope() != scope->GetDeclarationScope()) {
- return true;
+ if (var->scope()->GetClosureScope() != scope->GetClosureScope()) {
+ return SetNeedsHoleCheck(var, proxy);
}
if (var->is_this()) {
- DCHECK(IsDerivedConstructor(scope->GetDeclarationScope()->function_kind()));
+ DCHECK(IsDerivedConstructor(scope->GetClosureScope()->function_kind()));
// TODO(littledan): implement 'this' hole check elimination.
- return true;
+ return SetNeedsHoleCheck(var, proxy);
}
// We should always have valid source positions.
DCHECK(var->initializer_position() != kNoSourcePosition);
DCHECK(proxy->position() != kNoSourcePosition);
- return var->scope()->is_nonlinear() ||
- var->initializer_position() >= proxy->position();
+ if (var->scope()->is_nonlinear() ||
+ var->initializer_position() >= proxy->position()) {
+ return SetNeedsHoleCheck(var, proxy);
+ }
}
} // anonymous namespace
@@ -1992,7 +1995,7 @@ void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
#endif
DCHECK_NOT_NULL(var);
- if (AccessNeedsHoleCheck(var, proxy, this)) proxy->set_needs_hole_check();
+ UpdateNeedsHoleCheck(var, proxy, this);
proxy->BindTo(var);
}
@@ -2031,7 +2034,7 @@ VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
ParseInfo* info,
VariableProxy* stack) {
// Module variables must be allocated before variable resolution
- // to ensure that AccessNeedsHoleCheck() can detect import variables.
+ // to ensure that UpdateNeedsHoleCheck() can detect import variables.
if (info != nullptr && is_module_scope()) {
AsModuleScope()->AllocateModuleVariables();
}
@@ -2257,7 +2260,8 @@ void ModuleScope::AllocateModuleVariables() {
void Scope::AllocateVariablesRecursively() {
DCHECK(!already_resolved_);
- DCHECK_IMPLIES(!FLAG_preparser_scope_analysis, num_stack_slots_ == 0);
+ DCHECK_IMPLIES(!FLAG_experimental_preparser_scope_analysis,
+ num_stack_slots_ == 0);
// Don't allocate variables of preparsed scopes.
if (is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()) {
diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h
index c7de9e88ee..35c0bb0b2d 100644
--- a/deps/v8/src/ast/scopes.h
+++ b/deps/v8/src/ast/scopes.h
@@ -400,10 +400,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// sloppy eval call. One if this->calls_sloppy_eval().
int ContextChainLengthUntilOutermostSloppyEval() const;
- // The maximum number of nested contexts required for this scope and any inner
- // scopes.
- int MaxNestedContextChainLength();
-
// Find the first function, script, eval or (declaration) block scope. This is
// the scope where var declarations will be hoisted to in the implementation.
DeclarationScope* GetDeclarationScope();
@@ -456,6 +452,11 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Check that all Scopes in the scope tree use the same Zone.
void CheckZones();
+
+ bool replaced_from_parse_task() const { return replaced_from_parse_task_; }
+ void set_replaced_from_parse_task(bool replaced_from_parse_task) {
+ replaced_from_parse_task_ = replaced_from_parse_task;
+ }
#endif
// Retrieve `IsSimpleParameterList` of current or outer function.
@@ -535,6 +536,10 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// True if this scope may contain objects from a temp zone that needs to be
// fixed up.
bool needs_migration_;
+
+ // True if scope comes from other zone - as a result of being created in a
+ // parse tasks.
+ bool replaced_from_parse_task_ = false;
#endif
// Source positions.
@@ -651,7 +656,12 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
}
// Inform the scope that the corresponding code uses "super".
- void RecordSuperPropertyUsage() { scope_uses_super_property_ = true; }
+ void RecordSuperPropertyUsage() {
+ DCHECK((IsConciseMethod(function_kind()) ||
+ IsAccessorFunction(function_kind()) ||
+ IsClassConstructor(function_kind())));
+ scope_uses_super_property_ = true;
+ }
// Does this scope access "super" property (super.foo).
bool uses_super_property() const { return scope_uses_super_property_; }
diff --git a/deps/v8/src/ast/variables.cc b/deps/v8/src/ast/variables.cc
index cd1d8f77b7..c6611bd0d9 100644
--- a/deps/v8/src/ast/variables.cc
+++ b/deps/v8/src/ast/variables.cc
@@ -27,6 +27,7 @@ Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
InitializationFlagField::encode(initialization_flag) |
VariableModeField::encode(mode) | IsUsedField::encode(false) |
ForceContextAllocationField::encode(false) |
+ ForceHoleInitializationField::encode(false) |
LocationField::encode(VariableLocation::UNALLOCATED) |
VariableKindField::encode(kind)) {
// Var declared variables never need initialization.
diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h
index 3eaa105168..c01db36274 100644
--- a/deps/v8/src/ast/variables.h
+++ b/deps/v8/src/ast/variables.h
@@ -66,11 +66,47 @@ class Variable final : public ZoneObject {
bool IsGlobalObjectProperty() const;
bool is_dynamic() const { return IsDynamicVariableMode(mode()); }
+
+ // Returns the InitializationFlag this Variable was created with.
+ // Scope analysis may allow us to relax this initialization
+ // requirement, which will be reflected in the return value of
+ // binding_needs_init().
+ InitializationFlag initialization_flag() const {
+ return InitializationFlagField::decode(bit_field_);
+ }
+
+ // Whether this variable needs to be initialized with the hole at
+ // declaration time. Only returns valid results after scope analysis.
bool binding_needs_init() const {
- DCHECK(initialization_flag() != kNeedsInitialization ||
- IsLexicalVariableMode(mode()));
+ DCHECK_IMPLIES(initialization_flag() == kNeedsInitialization,
+ IsLexicalVariableMode(mode()));
+ DCHECK_IMPLIES(ForceHoleInitializationField::decode(bit_field_),
+ initialization_flag() == kNeedsInitialization);
+
+ // Always initialize if hole initialization was forced during
+ // scope analysis.
+ if (ForceHoleInitializationField::decode(bit_field_)) return true;
+
+ // If initialization was not forced, no need for initialization
+ // for stack allocated variables, since UpdateNeedsHoleCheck()
+ // in scopes.cc has proven that no VariableProxy refers to
+ // this variable in such a way that a runtime hole check
+ // would be generated.
+ if (IsStackAllocated()) return false;
+
+ // Otherwise, defer to the flag set when this Variable was constructed.
return initialization_flag() == kNeedsInitialization;
}
+
+ // Called during scope analysis when a VariableProxy is found to
+ // reference this Variable in such a way that a hole check will
+ // be required at runtime.
+ void ForceHoleInitialization() {
+ DCHECK_EQ(kNeedsInitialization, initialization_flag());
+ DCHECK(IsLexicalVariableMode(mode()));
+ bit_field_ = ForceHoleInitializationField::update(bit_field_, true);
+ }
+
bool throw_on_const_assignment(LanguageMode language_mode) const {
return kind() != SLOPPY_FUNCTION_NAME_VARIABLE || is_strict(language_mode);
}
@@ -94,9 +130,6 @@ class Variable final : public ZoneObject {
return LocationField::decode(bit_field_);
}
VariableKind kind() const { return VariableKindField::decode(bit_field_); }
- InitializationFlag initialization_flag() const {
- return InitializationFlagField::decode(bit_field_);
- }
int index() const { return index_; }
@@ -152,10 +185,12 @@ class Variable final : public ZoneObject {
class IsUsedField
: public BitField16<bool, ForceContextAllocationField::kNext, 1> {};
class InitializationFlagField
- : public BitField16<InitializationFlag, IsUsedField::kNext, 2> {};
+ : public BitField16<InitializationFlag, IsUsedField::kNext, 1> {};
+ class ForceHoleInitializationField
+ : public BitField16<bool, InitializationFlagField::kNext, 1> {};
class MaybeAssignedFlagField
- : public BitField16<MaybeAssignedFlag, InitializationFlagField::kNext,
- 2> {};
+ : public BitField16<MaybeAssignedFlag,
+ ForceHoleInitializationField::kNext, 1> {};
Variable** next() { return &next_; }
friend List;
};
diff --git a/deps/v8/src/background-parsing-task.cc b/deps/v8/src/background-parsing-task.cc
index db47deedf3..c6d47ec396 100644
--- a/deps/v8/src/background-parsing-task.cc
+++ b/deps/v8/src/background-parsing-task.cc
@@ -37,6 +37,9 @@ BackgroundParsingTask::BackgroundParsingTask(
info->set_unicode_cache(&source_->unicode_cache);
info->set_compile_options(options);
info->set_allow_lazy_parsing();
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
+ info->set_runtime_call_stats(new (info->zone()) RuntimeCallStats());
+ }
source_->info->set_cached_data(&script_data_);
// Parser needs to stay alive for finalizing the parsing on the main
diff --git a/deps/v8/src/bailout-reason.h b/deps/v8/src/bailout-reason.h
index 1ae7df5837..b49b6eef5d 100644
--- a/deps/v8/src/bailout-reason.h
+++ b/deps/v8/src/bailout-reason.h
@@ -120,7 +120,6 @@ namespace internal {
V(kInvalidLhsInCountOperation, "Invalid lhs in count operation") \
V(kInvalidMinLength, "Invalid min_length") \
V(kInvalidRegisterFileInGenerator, "invalid register file in generator") \
- V(kLetBindingReInitialization, "Let binding re-initialization") \
V(kLiveEdit, "LiveEdit") \
V(kLookupVariableInCountOperation, "Lookup variable in count operation") \
V(kMapBecameDeprecated, "Map became deprecated") \
@@ -234,7 +233,6 @@ namespace internal {
V(kUnexpectedStringType, "Unexpected string type") \
V(kUnexpectedTestTypeofLiteralFlag, \
"Unexpected literal flag for TestTypeof bytecode") \
- V(kUnexpectedRegExpExecCall, "Unexpected call to the RegExpExecStub") \
V(kUnexpectedValue, "Unexpected value") \
V(kUnsupportedDoubleImmediate, "Unsupported double immediate") \
V(kUnsupportedLetCompoundAssignment, "Unsupported let compound assignment") \
diff --git a/deps/v8/src/base/bits.cc b/deps/v8/src/base/bits.cc
index 909f9deb8c..049dc4a1b1 100644
--- a/deps/v8/src/base/bits.cc
+++ b/deps/v8/src/base/bits.cc
@@ -14,14 +14,36 @@ namespace base {
namespace bits {
uint32_t RoundUpToPowerOfTwo32(uint32_t value) {
- DCHECK_LE(value, 0x80000000u);
- value = value - 1;
- value = value | (value >> 1);
- value = value | (value >> 2);
- value = value | (value >> 4);
- value = value | (value >> 8);
- value = value | (value >> 16);
+ DCHECK_LE(value, uint32_t{1} << 31);
+ if (value) --value;
+// Use computation based on leading zeros if we have compiler support for that.
+#if V8_HAS_BUILTIN_CLZ || V8_CC_MSVC
+ return 1u << (32 - CountLeadingZeros32(value));
+#else
+ value |= value >> 1;
+ value |= value >> 2;
+ value |= value >> 4;
+ value |= value >> 8;
+ value |= value >> 16;
return value + 1;
+#endif
+}
+
+uint64_t RoundUpToPowerOfTwo64(uint64_t value) {
+ DCHECK_LE(value, uint64_t{1} << 63);
+ if (value) --value;
+// Use computation based on leading zeros if we have compiler support for that.
+#if V8_HAS_BUILTIN_CLZ
+ return uint64_t{1} << (64 - CountLeadingZeros64(value));
+#else
+ value |= value >> 1;
+ value |= value >> 2;
+ value |= value >> 4;
+ value |= value >> 8;
+ value |= value >> 16;
+ value |= value >> 32;
+ return value + 1;
+#endif
}
diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h
index b1864940b8..362940fcbe 100644
--- a/deps/v8/src/base/bits.h
+++ b/deps/v8/src/base/bits.h
@@ -158,23 +158,25 @@ inline unsigned CountTrailingZeros(uint64_t value) {
}
// Returns true iff |value| is a power of 2.
-inline bool IsPowerOfTwo32(uint32_t value) {
+constexpr inline bool IsPowerOfTwo32(uint32_t value) {
return value && !(value & (value - 1));
}
// Returns true iff |value| is a power of 2.
-inline bool IsPowerOfTwo64(uint64_t value) {
+constexpr inline bool IsPowerOfTwo64(uint64_t value) {
return value && !(value & (value - 1));
}
-
// RoundUpToPowerOfTwo32(value) returns the smallest power of two which is
// greater than or equal to |value|. If you pass in a |value| that is already a
// power of two, it is returned as is. |value| must be less than or equal to
-// 0x80000000u. Implementation is from "Hacker's Delight" by Henry S. Warren,
-// Jr., figure 3-3, page 48, where the function is called clp2.
+// 0x80000000u. Uses computation based on leading zeros if we have compiler
+// support for that. Falls back to the implementation from "Hacker's Delight" by
+// Henry S. Warren, Jr., figure 3-3, page 48, where the function is called clp2.
V8_BASE_EXPORT uint32_t RoundUpToPowerOfTwo32(uint32_t value);
+// Same for 64 bit integers. |value| must be <= 2^63
+V8_BASE_EXPORT uint64_t RoundUpToPowerOfTwo64(uint64_t value);
// RoundDownToPowerOfTwo32(value) returns the greatest power of two which is
// less than or equal to |value|. If you pass in a |value| that is already a
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index d113c2a0fc..0374f0fc25 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -35,7 +35,7 @@
#define V8_HOST_ARCH_32_BIT 1
#elif defined(__PPC__) || defined(_ARCH_PPC)
#define V8_HOST_ARCH_PPC 1
-#if defined(__PPC64__) || defined(_ARCH_PPC64)
+#if defined(__PPC64__) || defined(_ARCH_PPC64) || defined(_ARCH_PPCGR)
#define V8_HOST_ARCH_64_BIT 1
#else
#define V8_HOST_ARCH_32_BIT 1
@@ -91,6 +91,8 @@
#define V8_TARGET_ARCH_MIPS64 1
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
#define V8_TARGET_ARCH_MIPS 1
+#elif defined(_ARCH_PPC)
+#define V8_TARGET_ARCH_PPC 1
#else
#error Target architecture was not detected as supported by v8
#endif
@@ -181,6 +183,8 @@
#endif
#elif V8_TARGET_ARCH_X87
#define V8_TARGET_LITTLE_ENDIAN 1
+#elif __BIG_ENDIAN__ // FOR PPCGR on AIX
+#define V8_TARGET_BIG_ENDIAN 1
#elif V8_TARGET_ARCH_PPC_LE
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_PPC_BE
diff --git a/deps/v8/src/base/debug/stack_trace_win.cc b/deps/v8/src/base/debug/stack_trace_win.cc
index 8333cd9ea8..64e6309122 100644
--- a/deps/v8/src/base/debug/stack_trace_win.cc
+++ b/deps/v8/src/base/debug/stack_trace_win.cc
@@ -168,7 +168,7 @@ void DisableSignalStackDump() {
// that breaks CaptureStackBackTrace() and prevents StackTrace from working
// in Release builds (it may still be janky if other frames are using FPO,
// but at least it will make it further).
-#if defined(COMPILER_MSVC)
+#if defined(V8_CC_MSVC)
#pragma optimize("", off)
#endif
@@ -177,7 +177,7 @@ StackTrace::StackTrace() {
count_ = CaptureStackBackTrace(0, arraysize(trace_), trace_, NULL);
}
-#if defined(COMPILER_MSVC)
+#if defined(V8_CC_MSVC)
#pragma optimize("", on)
#endif
diff --git a/deps/v8/src/base/export-template.h b/deps/v8/src/base/export-template.h
new file mode 100644
index 0000000000..861cfe4027
--- /dev/null
+++ b/deps/v8/src/base/export-template.h
@@ -0,0 +1,163 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_EXPORT_TEMPLATE_H_
+#define V8_BASE_EXPORT_TEMPLATE_H_
+
+// Synopsis
+//
+// This header provides macros for using FOO_EXPORT macros with explicit
+// template instantiation declarations and definitions.
+// Generally, the FOO_EXPORT macros are used at declarations,
+// and GCC requires them to be used at explicit instantiation declarations,
+// but MSVC requires __declspec(dllexport) to be used at the explicit
+// instantiation definitions instead.
+
+// Usage
+//
+// In a header file, write:
+//
+// extern template class EXPORT_TEMPLATE_DECLARE(FOO_EXPORT) foo<bar>;
+//
+// In a source file, write:
+//
+// template class EXPORT_TEMPLATE_DEFINE(FOO_EXPORT) foo<bar>;
+
+// Implementation notes
+//
+// The implementation of this header uses some subtle macro semantics to
+// detect what the provided FOO_EXPORT value was defined as and then
+// to dispatch to appropriate macro definitions. Unfortunately,
+// MSVC's C preprocessor is rather non-compliant and requires special
+// care to make it work.
+//
+// Issue 1.
+//
+// #define F(x)
+// F()
+//
+// MSVC emits warning C4003 ("not enough actual parameters for macro
+// 'F'), even though it's a valid macro invocation. This affects the
+// macros below that take just an "export" parameter, because export
+// may be empty.
+//
+// As a workaround, we can add a dummy parameter and arguments:
+//
+// #define F(x,_)
+// F(,)
+//
+// Issue 2.
+//
+// #define F(x) G##x
+// #define Gj() ok
+// F(j())
+//
+// The correct replacement for "F(j())" is "ok", but MSVC replaces it
+// with "Gj()". As a workaround, we can pass the result to an
+// identity macro to force MSVC to look for replacements again. (This
+// is why EXPORT_TEMPLATE_STYLE_3 exists.)
+
+#define EXPORT_TEMPLATE_DECLARE(export) \
+ EXPORT_TEMPLATE_INVOKE(DECLARE, EXPORT_TEMPLATE_STYLE(export, ), export)
+#define EXPORT_TEMPLATE_DEFINE(export) \
+ EXPORT_TEMPLATE_INVOKE(DEFINE, EXPORT_TEMPLATE_STYLE(export, ), export)
+
+// INVOKE is an internal helper macro to perform parameter replacements
+// and token pasting to chain invoke another macro. E.g.,
+// EXPORT_TEMPLATE_INVOKE(DECLARE, DEFAULT, FOO_EXPORT)
+// will export to call
+// EXPORT_TEMPLATE_DECLARE_DEFAULT(FOO_EXPORT, )
+// (but with FOO_EXPORT expanded too).
+#define EXPORT_TEMPLATE_INVOKE(which, style, export) \
+ EXPORT_TEMPLATE_INVOKE_2(which, style, export)
+#define EXPORT_TEMPLATE_INVOKE_2(which, style, export) \
+ EXPORT_TEMPLATE_##which##_##style(export, )
+
+// Default style is to apply the FOO_EXPORT macro at declaration sites.
+#define EXPORT_TEMPLATE_DECLARE_DEFAULT(export, _) export
+#define EXPORT_TEMPLATE_DEFINE_DEFAULT(export, _)
+
+// The "MSVC hack" style is used when FOO_EXPORT is defined
+// as __declspec(dllexport), which MSVC requires to be used at
+// definition sites instead.
+#define EXPORT_TEMPLATE_DECLARE_MSVC_HACK(export, _)
+#define EXPORT_TEMPLATE_DEFINE_MSVC_HACK(export, _) export
+
+// EXPORT_TEMPLATE_STYLE is an internal helper macro that identifies which
+// export style needs to be used for the provided FOO_EXPORT macro definition.
+// "", "__attribute__(...)", and "__declspec(dllimport)" are mapped
+// to "DEFAULT"; while "__declspec(dllexport)" is mapped to "MSVC_HACK".
+//
+// It's implemented with token pasting to transform the __attribute__ and
+// __declspec annotations into macro invocations. E.g., if FOO_EXPORT is
+// defined as "__declspec(dllimport)", it undergoes the following sequence of
+// macro substitutions:
+// EXPORT_TEMPLATE_STYLE(FOO_EXPORT, )
+// EXPORT_TEMPLATE_STYLE_2(__declspec(dllimport), )
+// EXPORT_TEMPLATE_STYLE_3(EXPORT_TEMPLATE_STYLE_MATCH__declspec(dllimport))
+// EXPORT_TEMPLATE_STYLE_MATCH__declspec(dllimport)
+// EXPORT_TEMPLATE_STYLE_MATCH_DECLSPEC_dllimport
+// DEFAULT
+#define EXPORT_TEMPLATE_STYLE(export, _) EXPORT_TEMPLATE_STYLE_2(export, )
+#define EXPORT_TEMPLATE_STYLE_2(export, _) \
+ EXPORT_TEMPLATE_STYLE_3( \
+ EXPORT_TEMPLATE_STYLE_MATCH_foj3FJo5StF0OvIzl7oMxA##export)
+#define EXPORT_TEMPLATE_STYLE_3(style) style
+
+// Internal helper macros for EXPORT_TEMPLATE_STYLE.
+//
+// XXX: C++ reserves all identifiers containing "__" for the implementation,
+// but "__attribute__" and "__declspec" already contain "__" and the token-paste
+// operator can only add characters; not remove them. To minimize the risk of
+// conflict with implementations, we include "foj3FJo5StF0OvIzl7oMxA" (a random
+// 128-bit string, encoded in Base64) in the macro name.
+#define EXPORT_TEMPLATE_STYLE_MATCH_foj3FJo5StF0OvIzl7oMxA DEFAULT
+#define EXPORT_TEMPLATE_STYLE_MATCH_foj3FJo5StF0OvIzl7oMxA__attribute__(...) \
+ DEFAULT
+#define EXPORT_TEMPLATE_STYLE_MATCH_foj3FJo5StF0OvIzl7oMxA__declspec(arg) \
+ EXPORT_TEMPLATE_STYLE_MATCH_DECLSPEC_##arg
+
+// Internal helper macros for EXPORT_TEMPLATE_STYLE.
+#define EXPORT_TEMPLATE_STYLE_MATCH_DECLSPEC_dllexport MSVC_HACK
+#define EXPORT_TEMPLATE_STYLE_MATCH_DECLSPEC_dllimport DEFAULT
+
+// Sanity checks.
+//
+// EXPORT_TEMPLATE_TEST uses the same macro invocation pattern as
+// EXPORT_TEMPLATE_DECLARE and EXPORT_TEMPLATE_DEFINE do to check that they're
+// working correctly. When they're working correctly, the sequence of macro
+// replacements should go something like:
+//
+// EXPORT_TEMPLATE_TEST(DEFAULT, __declspec(dllimport));
+//
+// static_assert(EXPORT_TEMPLATE_INVOKE(TEST_DEFAULT,
+// EXPORT_TEMPLATE_STYLE(__declspec(dllimport), ),
+// __declspec(dllimport)), "__declspec(dllimport)");
+//
+// static_assert(EXPORT_TEMPLATE_INVOKE(TEST_DEFAULT,
+// DEFAULT, __declspec(dllimport)), "__declspec(dllimport)");
+//
+// static_assert(EXPORT_TEMPLATE_TEST_DEFAULT_DEFAULT(
+// __declspec(dllimport)), "__declspec(dllimport)");
+//
+// static_assert(true, "__declspec(dllimport)");
+//
+// When they're not working correctly, a syntax error should occur instead.
+#define EXPORT_TEMPLATE_TEST(want, export) \
+ static_assert(EXPORT_TEMPLATE_INVOKE( \
+ TEST_##want, EXPORT_TEMPLATE_STYLE(export, ), export), \
+ #export)
+#define EXPORT_TEMPLATE_TEST_DEFAULT_DEFAULT(...) true
+#define EXPORT_TEMPLATE_TEST_MSVC_HACK_MSVC_HACK(...) true
+
+EXPORT_TEMPLATE_TEST(DEFAULT, );
+EXPORT_TEMPLATE_TEST(DEFAULT, __attribute__((visibility("default"))));
+EXPORT_TEMPLATE_TEST(MSVC_HACK, __declspec(dllexport));
+EXPORT_TEMPLATE_TEST(DEFAULT, __declspec(dllimport));
+
+#undef EXPORT_TEMPLATE_TEST
+#undef EXPORT_TEMPLATE_TEST_DEFAULT_DEFAULT
+#undef EXPORT_TEMPLATE_TEST_MSVC_HACK_MSVC_HACK
+
+#endif // V8_BASE_EXPORT_TEMPLATE_H_
diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h
index fd2cc2b995..6e54508d43 100644
--- a/deps/v8/src/base/logging.h
+++ b/deps/v8/src/base/logging.h
@@ -124,8 +124,10 @@ DEFINE_MAKE_CHECK_OP_STRING(void const*)
template <typename Lhs, typename Rhs>
struct is_signed_vs_unsigned {
enum : bool {
- value = std::is_integral<Lhs>::value && std::is_integral<Rhs>::value &&
- std::is_signed<Lhs>::value && std::is_unsigned<Rhs>::value
+ value = std::is_integral<typename std::decay<Lhs>::type>::value &&
+ std::is_integral<typename std::decay<Rhs>::type>::value &&
+ std::is_signed<typename std::decay<Lhs>::type>::value &&
+ std::is_unsigned<typename std::decay<Rhs>::type>::value
};
};
// Same thing, other way around: Lhs is unsigned, Rhs signed.
@@ -135,8 +137,10 @@ struct is_unsigned_vs_signed : public is_signed_vs_unsigned<Rhs, Lhs> {};
// Specialize the compare functions for signed vs. unsigned comparisons.
// std::enable_if ensures that this template is only instantiable if both Lhs
// and Rhs are integral types, and their signedness does not match.
-#define MAKE_UNSIGNED(Type, value) \
- static_cast<typename std::make_unsigned<Type>::type>(value)
+#define MAKE_UNSIGNED(Type, value) \
+ static_cast< \
+ typename std::make_unsigned<typename std::decay<Type>::type>::type>( \
+ value)
#define DEFINE_SIGNED_MISMATCH_COMP(CHECK, NAME, IMPL) \
template <typename Lhs, typename Rhs> \
V8_INLINE typename std::enable_if<CHECK<Lhs, Rhs>::value, bool>::type \
diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc
index d659dadedd..7d406996cb 100644
--- a/deps/v8/src/base/platform/platform-aix.cc
+++ b/deps/v8/src/base/platform/platform-aix.cc
@@ -71,9 +71,10 @@ double AIXTimezoneCache::LocalTimeOffset() {
TimezoneCache* OS::CreateTimezoneCache() { return new AIXTimezoneCache(); }
-void* OS::Allocate(const size_t requested, size_t* allocated, bool executable) {
+void* OS::Allocate(const size_t requested, size_t* allocated,
+ OS::MemoryPermission access) {
const size_t msize = RoundUp(requested, getpagesize());
- int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+ int prot = GetProtectionFromMemoryPermission(access);
void* mbase = mmapHelper(msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) return NULL;
diff --git a/deps/v8/src/base/platform/platform-cygwin.cc b/deps/v8/src/base/platform/platform-cygwin.cc
index d0c1a9eb07..0205021d69 100644
--- a/deps/v8/src/base/platform/platform-cygwin.cc
+++ b/deps/v8/src/base/platform/platform-cygwin.cc
@@ -55,12 +55,10 @@ double CygwinTimezoneCache::LocalTimeOffset() {
(loc->tm_isdst > 0 ? 3600 * msPerSecond : 0));
}
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
+void* OS::Allocate(const size_t requested, size_t* allocated,
+ OS::MemoryPermission access) {
const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ int prot = GetProtectionFromMemoryPermission(access);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) return NULL;
*allocated = msize;
diff --git a/deps/v8/src/base/platform/platform-freebsd.cc b/deps/v8/src/base/platform/platform-freebsd.cc
index 39036455c9..5c5b8a0d3b 100644
--- a/deps/v8/src/base/platform/platform-freebsd.cc
+++ b/deps/v8/src/base/platform/platform-freebsd.cc
@@ -37,11 +37,10 @@ namespace base {
TimezoneCache* OS::CreateTimezoneCache() { return new PosixTimezoneCache(); }
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool executable) {
+void* OS::Allocate(const size_t requested, size_t* allocated,
+ OS::MemoryPermission access) {
const size_t msize = RoundUp(requested, getpagesize());
- int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+ int prot = GetProtectionFromMemoryPermission(access);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) return NULL;
diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc
index 01fa778437..483cdd49ca 100644
--- a/deps/v8/src/base/platform/platform-linux.cc
+++ b/deps/v8/src/base/platform/platform-linux.cc
@@ -95,9 +95,9 @@ bool OS::ArmUsingHardFloat() {
TimezoneCache* OS::CreateTimezoneCache() { return new PosixTimezoneCache(); }
void* OS::Allocate(const size_t requested, size_t* allocated,
- bool is_executable) {
+ OS::MemoryPermission access) {
const size_t msize = RoundUp(requested, AllocateAlignment());
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ int prot = GetProtectionFromMemoryPermission(access);
void* addr = OS::GetRandomMmapAddr();
void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) return NULL;
diff --git a/deps/v8/src/base/platform/platform-macos.cc b/deps/v8/src/base/platform/platform-macos.cc
index 262ac1b37e..7d1a6d2471 100644
--- a/deps/v8/src/base/platform/platform-macos.cc
+++ b/deps/v8/src/base/platform/platform-macos.cc
@@ -51,12 +51,10 @@ namespace base {
static const int kMmapFd = VM_MAKE_TAG(255);
static const off_t kMmapFdOffset = 0;
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
+void* OS::Allocate(const size_t requested, size_t* allocated,
+ OS::MemoryPermission access) {
const size_t msize = RoundUp(requested, getpagesize());
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ int prot = GetProtectionFromMemoryPermission(access);
void* mbase = mmap(OS::GetRandomMmapAddr(),
msize,
prot,
diff --git a/deps/v8/src/base/platform/platform-openbsd.cc b/deps/v8/src/base/platform/platform-openbsd.cc
index ed52539512..06040e2f40 100644
--- a/deps/v8/src/base/platform/platform-openbsd.cc
+++ b/deps/v8/src/base/platform/platform-openbsd.cc
@@ -35,11 +35,10 @@ namespace base {
TimezoneCache* OS::CreateTimezoneCache() { return new PosixTimezoneCache(); }
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
+void* OS::Allocate(const size_t requested, size_t* allocated,
+ OS::MemoryPermission access) {
const size_t msize = RoundUp(requested, AllocateAlignment());
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ int prot = GetProtectionFromMemoryPermission(access);
void* addr = OS::GetRandomMmapAddr();
void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) return NULL;
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 31da95340e..25c270ad28 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -101,10 +101,17 @@ intptr_t OS::CommitPageSize() {
return page_size;
}
+void* OS::Allocate(const size_t requested, size_t* allocated,
+ bool is_executable) {
+ return OS::Allocate(requested, allocated,
+ is_executable ? OS::MemoryPermission::kReadWriteExecute
+ : OS::MemoryPermission::kReadWrite);
+}
+
void* OS::AllocateGuarded(const size_t requested) {
size_t allocated = 0;
- const bool is_executable = false;
- void* mbase = OS::Allocate(requested, &allocated, is_executable);
+ void* mbase =
+ OS::Allocate(requested, &allocated, OS::MemoryPermission::kNoAccess);
if (allocated != requested) {
OS::Free(mbase, allocated);
return nullptr;
@@ -112,7 +119,6 @@ void* OS::AllocateGuarded(const size_t requested) {
if (mbase == nullptr) {
return nullptr;
}
- OS::Guard(mbase, requested);
return mbase;
}
@@ -776,5 +782,17 @@ void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
USE(result);
}
+int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
+ switch (access) {
+ case OS::MemoryPermission::kNoAccess:
+ return PROT_NONE;
+ case OS::MemoryPermission::kReadWrite:
+ return PROT_READ | PROT_WRITE;
+ case OS::MemoryPermission::kReadWriteExecute:
+ return PROT_READ | PROT_WRITE | PROT_EXEC;
+ }
+ UNREACHABLE();
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-posix.h b/deps/v8/src/base/platform/platform-posix.h
index c243a723c1..9818f64247 100644
--- a/deps/v8/src/base/platform/platform-posix.h
+++ b/deps/v8/src/base/platform/platform-posix.h
@@ -5,6 +5,7 @@
#ifndef V8_BASE_PLATFORM_PLATFORM_POSIX_H_
#define V8_BASE_PLATFORM_PLATFORM_POSIX_H_
+#include "src/base/platform/platform.h"
#include "src/base/timezone-cache.h"
namespace v8 {
@@ -22,6 +23,8 @@ class PosixTimezoneCache : public TimezoneCache {
static const int msPerSecond = 1000;
};
+int GetProtectionFromMemoryPermission(OS::MemoryPermission access);
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/platform/platform-qnx.cc b/deps/v8/src/base/platform/platform-qnx.cc
index 0e03baa1a1..7ce3de119d 100644
--- a/deps/v8/src/base/platform/platform-qnx.cc
+++ b/deps/v8/src/base/platform/platform-qnx.cc
@@ -86,11 +86,10 @@ bool OS::ArmUsingHardFloat() {
TimezoneCache* OS::CreateTimezoneCache() { return new PosixTimezoneCache(); }
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
+void* OS::Allocate(const size_t requested, size_t* allocated,
+ OS::MemoryPermission access) {
const size_t msize = RoundUp(requested, AllocateAlignment());
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ int prot = GetProtectionFromMemoryPermission(access);
void* addr = OS::GetRandomMmapAddr();
void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (mbase == MAP_FAILED) return NULL;
diff --git a/deps/v8/src/base/platform/platform-solaris.cc b/deps/v8/src/base/platform/platform-solaris.cc
index d5cc658d73..64498eaf1f 100644
--- a/deps/v8/src/base/platform/platform-solaris.cc
+++ b/deps/v8/src/base/platform/platform-solaris.cc
@@ -58,11 +58,10 @@ double SolarisTimezoneCache::LocalTimeOffset() {
TimezoneCache* OS::CreateTimezoneCache() { return new SolarisTimezoneCache(); }
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
- bool is_executable) {
+void* OS::Allocate(const size_t requested, size_t* allocated,
+ OS::MemoryPermission access) {
const size_t msize = RoundUp(requested, getpagesize());
- int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+ int prot = GetProtectionFromMemoryPermission(access);
void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
if (mbase == MAP_FAILED) return NULL;
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 472397bb36..7b7ff99d20 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -764,15 +764,34 @@ static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
return base;
}
-
-void* OS::Allocate(const size_t requested,
- size_t* allocated,
+void* OS::Allocate(const size_t requested, size_t* allocated,
bool is_executable) {
+ return OS::Allocate(requested, allocated,
+ is_executable ? OS::MemoryPermission::kReadWriteExecute
+ : OS::MemoryPermission::kReadWrite);
+}
+
+void* OS::Allocate(const size_t requested, size_t* allocated,
+ OS::MemoryPermission access) {
// VirtualAlloc rounds allocated size to page size automatically.
size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
// Windows XP SP2 allows Data Excution Prevention (DEP).
- int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+ int prot = PAGE_NOACCESS;
+ switch (access) {
+ case OS::MemoryPermission::kNoAccess: {
+ prot = PAGE_NOACCESS;
+ break;
+ }
+ case OS::MemoryPermission::kReadWrite: {
+ prot = PAGE_READWRITE;
+ break;
+ }
+ case OS::MemoryPermission::kReadWriteExecute: {
+ prot = PAGE_EXECUTE_READWRITE;
+ break;
+ }
+ }
LPVOID mbase = RandomizedVirtualAlloc(msize,
MEM_COMMIT | MEM_RESERVE,
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index 9259ec8f78..55cff6bf64 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -158,6 +158,14 @@ class V8_BASE_EXPORT OS {
static PRINTF_FORMAT(1, 2) void PrintError(const char* format, ...);
static PRINTF_FORMAT(1, 0) void VPrintError(const char* format, va_list args);
+ // Memory access permissions. Only the modes currently used by V8 are listed
+ // here even though most systems support additional modes.
+ enum class MemoryPermission { kNoAccess, kReadWrite, kReadWriteExecute };
+
+ // Allocate/Free memory used by JS heap. Permissions are set according to the
+ // is_* flags. Returns the address of allocated memory, or NULL if failed.
+ static void* Allocate(const size_t requested, size_t* allocated,
+ MemoryPermission access);
// Allocate/Free memory used by JS heap. Pages are readable/writable, but
// they are not guaranteed to be executable unless 'executable' is true.
// Returns the address of allocated memory, or NULL if failed.
@@ -167,8 +175,8 @@ class V8_BASE_EXPORT OS {
static void Free(void* address, const size_t size);
// Allocates a region of memory that is inaccessible. On Windows this reserves
- // but does not commit the memory. On Linux, it is equivalent to a call to
- // Allocate() followed by Guard().
+ // but does not commit the memory. On POSIX systems it allocates memory as
+ // PROT_NONE, which also prevents it from being committed.
static void* AllocateGuarded(const size_t requested);
// This is the granularity at which the ProtectCode(...) call can set page
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 51a99d6ba5..b1fda971cf 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -23,9 +23,9 @@
#include "src/snapshot/snapshot.h"
#include "src/wasm/wasm-js.h"
-#if V8_I18N_SUPPORT
-#include "src/i18n.h"
-#endif // V8_I18N_SUPPORT
+#if V8_INTL_SUPPORT
+#include "src/objects/intl-objects.h"
+#endif // V8_INTL_SUPPORT
namespace v8 {
namespace internal {
@@ -178,7 +178,6 @@ class Genesis BASE_EMBEDDED {
void InstallOneBuiltinFunction(Handle<Object> prototype, const char* method,
Builtins::Name name);
- void InitializeGlobal_experimental_fast_array_builtins();
Handle<JSFunction> InstallArrayBuffer(Handle<JSObject> target,
const char* name,
@@ -270,8 +269,7 @@ class Genesis BASE_EMBEDDED {
friend class Bootstrapper;
};
-
-void Bootstrapper::Iterate(ObjectVisitor* v) {
+void Bootstrapper::Iterate(RootVisitor* v) {
extensions_cache_.Iterate(v);
v->Synchronize(VisitorSynchronization::kExtensions);
}
@@ -397,14 +395,14 @@ Handle<JSFunction> SimpleCreateFunction(Isolate* isolate, Handle<String> name,
Handle<JSFunction> InstallArrayBuiltinFunction(Handle<JSObject> base,
const char* name,
- Builtins::Name call,
- int argument_count) {
+ Builtins::Name call) {
Isolate* isolate = base->GetIsolate();
Handle<String> str_name = isolate->factory()->InternalizeUtf8String(name);
Handle<JSFunction> fun =
CreateFunction(isolate, str_name, JS_OBJECT_TYPE, JSObject::kHeaderSize,
MaybeHandle<JSObject>(), call, true);
- fun->shared()->set_internal_formal_parameter_count(argument_count);
+ fun->shared()->set_internal_formal_parameter_count(
+ Builtins::GetBuiltinParameterCount(call));
// Set the length to 1 to satisfy ECMA-262.
fun->shared()->set_length(1);
@@ -569,12 +567,21 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
object_function_prototype->set_map(*map);
native_context()->set_initial_object_prototype(*object_function_prototype);
- // For bootstrapping set the array prototype to be the same as the object
- // prototype, otherwise the missing initial_array_prototype will cause
- // assertions during startup.
- native_context()->set_initial_array_prototype(*object_function_prototype);
- Accessors::FunctionSetPrototype(object_fun, object_function_prototype)
- .Assert();
+ JSFunction::SetPrototype(object_fun, object_function_prototype);
+
+ {
+ // Set up slow map for Object.create(null) instances without in-object
+ // properties.
+ Handle<Map> map(object_fun->initial_map(), isolate);
+ map = Map::CopyInitialMapNormalized(map);
+ Map::SetPrototype(map, isolate->factory()->null_value());
+ native_context()->set_slow_object_with_null_prototype_map(*map);
+
+ // Set up slow map for literals with too many properties.
+ map = Map::Copy(map, "slow_object_with_object_prototype_map");
+ Map::SetPrototype(map, object_function_prototype);
+ native_context()->set_slow_object_with_object_prototype_map(*map);
+ }
}
// Allocate the empty function as the prototype for function - ES6 19.2.3
@@ -633,7 +640,8 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic(
}
// length needs to be non configurable.
- Handle<Object> value(Smi::FromInt(function->shared()->length()), isolate());
+ Handle<Object> value(Smi::FromInt(function->shared()->GetLength()),
+ isolate());
JSObject::SetOwnPropertyIgnoreAttributes(
function, factory()->length_string(), value,
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY))
@@ -644,8 +652,6 @@ Handle<JSFunction> Genesis::GetThrowTypeErrorIntrinsic(
DCHECK(false);
}
- JSObject::MigrateSlowToFast(function, 0, "Bootstrapping");
-
return function;
}
@@ -1189,7 +1195,7 @@ static void InstallError(Isolate* isolate, Handle<JSObject> global,
.FromMaybe(false));
}
- Accessors::FunctionSetPrototype(error_fun, prototype).Assert();
+ JSFunction::SetPrototype(error_fun, prototype);
}
Handle<Map> initial_map(error_fun->initial_map());
@@ -1304,7 +1310,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_object_is_sealed(*object_is_sealed);
Handle<JSFunction> object_keys = SimpleInstallFunction(
- object_function, "keys", Builtins::kObjectKeys, 1, false);
+ object_function, "keys", Builtins::kObjectKeys, 1, true);
native_context()->set_object_keys(*object_keys);
SimpleInstallFunction(object_function, factory->entries_string(),
Builtins::kObjectEntries, 1, false);
@@ -1328,6 +1334,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(
isolate->initial_object_prototype(), "propertyIsEnumerable",
Builtins::kObjectPrototypePropertyIsEnumerable, 1, false);
+ Handle<JSFunction> object_to_string = SimpleInstallFunction(
+ isolate->initial_object_prototype(), factory->toString_string(),
+ Builtins::kObjectProtoToString, 0, true);
+ native_context()->set_object_to_string(*object_to_string);
Handle<JSFunction> object_value_of = SimpleInstallFunction(
isolate->initial_object_prototype(), "valueOf",
Builtins::kObjectPrototypeValueOf, 0, true);
@@ -1387,8 +1397,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
sloppy_function_map_writable_prototype_->SetConstructor(*function_fun);
strict_function_map_writable_prototype_->SetConstructor(*function_fun);
class_function_map_->SetConstructor(*function_fun);
-
- JSObject::MigrateSlowToFast(function_fun, 0, "Bootstrapping");
}
{
@@ -1414,18 +1422,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallWithIntrinsicDefaultProto(isolate, await_uncaught,
Context::ASYNC_GENERATOR_AWAIT_UNCAUGHT);
- Handle<JSFunction> yield =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kAsyncGeneratorYield, 2, false);
- InstallWithIntrinsicDefaultProto(isolate, yield,
- Context::ASYNC_GENERATOR_YIELD);
-
- Handle<JSFunction> raw_yield =
- SimpleCreateFunction(isolate, factory->empty_string(),
- Builtins::kAsyncGeneratorRawYield, 2, false);
- InstallWithIntrinsicDefaultProto(isolate, raw_yield,
- Context::ASYNC_GENERATOR_RAW_YIELD);
-
Handle<Code> code =
isolate->builtins()->AsyncGeneratorAwaitResolveClosure();
Handle<SharedFunctionInfo> info =
@@ -1484,6 +1480,13 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<Code> code = array_constructor_stub.GetCode();
array_function->shared()->SetConstructStub(*code);
+ // Set up %ArrayPrototype%.
+ Handle<JSArray> array_prototype =
+ Handle<JSArray>::cast(factory->NewJSObject(array_function, TENURED));
+ JSArray::Initialize(array_prototype, 0);
+ JSFunction::SetPrototype(array_function, array_prototype);
+ native_context()->set_initial_array_prototype(*array_prototype);
+
Handle<JSFunction> is_arraylike = SimpleInstallFunction(
array_function, isolate->factory()->InternalizeUtf8String("isArray"),
Builtins::kArrayIsArray, 1, true);
@@ -1590,6 +1593,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
number_fun->shared()->SetConstructStub(
*isolate->builtins()->NumberConstructor_ConstructStub());
number_fun->shared()->set_length(1);
+ // https://tc39.github.io/ecma262/#sec-built-in-function-objects says
+ // that "Built-in functions that are ECMAScript function objects must
+ // be strict functions".
+ number_fun->shared()->set_language_mode(STRICT);
InstallWithIntrinsicDefaultProto(isolate, number_fun,
Context::NUMBER_FUNCTION_INDEX);
@@ -1597,7 +1604,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSValue> prototype =
Handle<JSValue>::cast(factory->NewJSObject(number_fun, TENURED));
prototype->set_value(Smi::kZero);
- Accessors::FunctionSetPrototype(number_fun, prototype).Assert();
+ JSFunction::SetPrototype(number_fun, prototype);
// Install the "constructor" property on the {prototype}.
JSObject::AddProperty(prototype, factory->constructor_string(), number_fun,
@@ -1615,7 +1622,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(prototype, "valueOf",
Builtins::kNumberPrototypeValueOf, 0, true);
- // Install i18n fallback functions.
+ // Install Intl fallback functions.
SimpleInstallFunction(prototype, "toLocaleString",
Builtins::kNumberPrototypeToLocaleString, 0, false);
@@ -1714,7 +1721,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSValue> prototype =
Handle<JSValue>::cast(factory->NewJSObject(boolean_fun, TENURED));
prototype->set_value(isolate->heap()->false_value());
- Accessors::FunctionSetPrototype(boolean_fun, prototype).Assert();
+ JSFunction::SetPrototype(boolean_fun, prototype);
// Install the "constructor" property on the {prototype}.
JSObject::AddProperty(prototype, factory->constructor_string(), boolean_fun,
@@ -1735,6 +1742,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
*isolate->builtins()->StringConstructor_ConstructStub());
string_fun->shared()->DontAdaptArguments();
string_fun->shared()->set_length(1);
+ // https://tc39.github.io/ecma262/#sec-built-in-function-objects says
+ // that "Built-in functions that are ECMAScript function objects must
+ // be strict functions".
+ string_fun->shared()->set_language_mode(STRICT);
InstallWithIntrinsicDefaultProto(isolate, string_fun,
Context::STRING_FUNCTION_INDEX);
@@ -1766,7 +1777,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSValue> prototype =
Handle<JSValue>::cast(factory->NewJSObject(string_fun, TENURED));
prototype->set_value(isolate->heap()->empty_string());
- Accessors::FunctionSetPrototype(string_fun, prototype).Assert();
+ JSFunction::SetPrototype(string_fun, prototype);
// Install the "constructor" property on the {prototype}.
JSObject::AddProperty(prototype, factory->constructor_string(), string_fun,
@@ -1789,15 +1800,17 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kStringPrototypeLastIndexOf, 1, false);
SimpleInstallFunction(prototype, "localeCompare",
Builtins::kStringPrototypeLocaleCompare, 1, true);
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
SimpleInstallFunction(prototype, "normalize",
- Builtins::kStringPrototypeNormalizeI18N, 0, false);
+ Builtins::kStringPrototypeNormalizeIntl, 0, false);
#else
SimpleInstallFunction(prototype, "normalize",
Builtins::kStringPrototypeNormalize, 0, false);
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
SimpleInstallFunction(prototype, "replace",
Builtins::kStringPrototypeReplace, 2, true);
+ SimpleInstallFunction(prototype, "slice", Builtins::kStringPrototypeSlice,
+ 2, false);
SimpleInstallFunction(prototype, "split", Builtins::kStringPrototypeSplit,
2, true);
SimpleInstallFunction(prototype, "substr", Builtins::kStringPrototypeSubstr,
@@ -2049,7 +2062,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(prototype, "toJSON", Builtins::kDatePrototypeToJson,
1, false);
- // Install i18n fallback functions.
+ // Install Intl fallback functions.
SimpleInstallFunction(prototype, "toLocaleString",
Builtins::kDatePrototypeToString, 0, false);
SimpleInstallFunction(prototype, "toLocaleDateString",
@@ -2208,8 +2221,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
info->set_length(1);
native_context()->set_promise_reject_shared_fun(*info);
}
-
- JSObject::MigrateSlowToFast(promise_fun, 0, "Bootstrapping");
}
{ // -- R e g E x p
@@ -2276,7 +2287,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{
Handle<JSFunction> fun = SimpleCreateFunction(
isolate, factory->InternalizeUtf8String("[Symbol.replace]"),
- Builtins::kRegExpPrototypeReplace, 2, true);
+ Builtins::kRegExpPrototypeReplace, 2, false);
InstallFunction(prototype, fun, factory->replace_symbol(), DONT_ENUM);
}
@@ -2290,7 +2301,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{
Handle<JSFunction> fun = SimpleCreateFunction(
isolate, factory->InternalizeUtf8String("[Symbol.split]"),
- Builtins::kRegExpPrototypeSplit, 2, true);
+ Builtins::kRegExpPrototypeSplit, 2, false);
InstallFunction(prototype, fun, factory->split_symbol(), DONT_ENUM);
}
@@ -2480,8 +2491,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- J S O N
Handle<String> name = factory->InternalizeUtf8String("JSON");
Handle<JSFunction> cons = factory->NewFunction(name);
- JSFunction::SetInstancePrototype(cons,
- Handle<Object>(native_context()->initial_object_prototype(), isolate));
+ JSFunction::SetPrototype(cons, isolate->initial_object_prototype());
Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
DCHECK(json_object->IsJSObject());
JSObject::AddProperty(global, name, json_object, DONT_ENUM);
@@ -2497,9 +2507,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
{ // -- M a t h
Handle<String> name = factory->InternalizeUtf8String("Math");
Handle<JSFunction> cons = factory->NewFunction(name);
- JSFunction::SetInstancePrototype(
- cons,
- Handle<Object>(native_context()->initial_object_prototype(), isolate));
+ JSFunction::SetPrototype(cons, isolate->initial_object_prototype());
Handle<JSObject> math = factory->NewJSObject(cons, TENURED);
DCHECK(math->IsJSObject());
JSObject::AddProperty(global, name, math, DONT_ENUM);
@@ -2565,13 +2573,71 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
}
-#ifdef V8_I18N_SUPPORT
+ { // -- C o n s o l e
+ Handle<String> name = factory->InternalizeUtf8String("console");
+ Handle<JSFunction> cons = factory->NewFunction(name);
+ Handle<JSObject> empty = factory->NewJSObject(isolate->object_function());
+ JSFunction::SetPrototype(cons, empty);
+ Handle<JSObject> console = factory->NewJSObject(cons, TENURED);
+ DCHECK(console->IsJSObject());
+ JSObject::AddProperty(global, name, console, DONT_ENUM);
+ SimpleInstallFunction(console, "debug", Builtins::kConsoleDebug, 1, false,
+ NONE);
+ SimpleInstallFunction(console, "error", Builtins::kConsoleError, 1, false,
+ NONE);
+ SimpleInstallFunction(console, "info", Builtins::kConsoleInfo, 1, false,
+ NONE);
+ SimpleInstallFunction(console, "log", Builtins::kConsoleLog, 1, false,
+ NONE);
+ SimpleInstallFunction(console, "warn", Builtins::kConsoleWarn, 1, false,
+ NONE);
+ SimpleInstallFunction(console, "dir", Builtins::kConsoleDir, 1, false,
+ NONE);
+ SimpleInstallFunction(console, "dirxml", Builtins::kConsoleDirXml, 1, false,
+ NONE);
+ SimpleInstallFunction(console, "table", Builtins::kConsoleTable, 1, false,
+ NONE);
+ SimpleInstallFunction(console, "trace", Builtins::kConsoleTrace, 1, false,
+ NONE);
+ SimpleInstallFunction(console, "group", Builtins::kConsoleGroup, 1, false,
+ NONE);
+ SimpleInstallFunction(console, "groupCollapsed",
+ Builtins::kConsoleGroupCollapsed, 1, false, NONE);
+ SimpleInstallFunction(console, "groupEnd", Builtins::kConsoleGroupEnd, 1,
+ false, NONE);
+ SimpleInstallFunction(console, "clear", Builtins::kConsoleClear, 1, false,
+ NONE);
+ SimpleInstallFunction(console, "count", Builtins::kConsoleCount, 1, false,
+ NONE);
+ SimpleInstallFunction(console, "assert", Builtins::kFastConsoleAssert, 1,
+ false, NONE);
+ SimpleInstallFunction(console, "markTimeline",
+ Builtins::kConsoleMarkTimeline, 1, false, NONE);
+ SimpleInstallFunction(console, "profile", Builtins::kConsoleProfile, 1,
+ false, NONE);
+ SimpleInstallFunction(console, "profileEnd", Builtins::kConsoleProfileEnd,
+ 1, false, NONE);
+ SimpleInstallFunction(console, "timeline", Builtins::kConsoleTimeline, 1,
+ false, NONE);
+ SimpleInstallFunction(console, "timelineEnd", Builtins::kConsoleTimelineEnd,
+ 1, false, NONE);
+ SimpleInstallFunction(console, "time", Builtins::kConsoleTime, 1, false,
+ NONE);
+ SimpleInstallFunction(console, "timeEnd", Builtins::kConsoleTimeEnd, 1,
+ false, NONE);
+ SimpleInstallFunction(console, "timeStamp", Builtins::kConsoleTimeStamp, 1,
+ false, NONE);
+ JSObject::AddProperty(
+ console, factory->to_string_tag_symbol(),
+ factory->NewStringFromAsciiChecked("Object"),
+ static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
+ }
+
+#ifdef V8_INTL_SUPPORT
{ // -- I n t l
Handle<String> name = factory->InternalizeUtf8String("Intl");
Handle<JSFunction> cons = factory->NewFunction(name);
- JSFunction::SetInstancePrototype(
- cons,
- Handle<Object>(native_context()->initial_object_prototype(), isolate));
+ JSFunction::SetPrototype(cons, isolate->initial_object_prototype());
Handle<JSObject> intl = factory->NewJSObject(cons, TENURED);
DCHECK(intl->IsJSObject());
JSObject::AddProperty(global, name, intl, DONT_ENUM);
@@ -2642,7 +2708,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate, v8_break_iterator_constructor,
Context::INTL_V8_BREAK_ITERATOR_FUNCTION_INDEX);
}
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
{ // -- A r r a y B u f f e r
Handle<JSFunction> array_buffer_fun = InstallArrayBuffer(
@@ -2713,6 +2779,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
// TODO(caitp): alphasort accessors/methods
SimpleInstallFunction(prototype, "copyWithin",
Builtins::kTypedArrayPrototypeCopyWithin, 2, false);
+ SimpleInstallFunction(prototype, "every",
+ Builtins::kTypedArrayPrototypeEvery, 1, false);
SimpleInstallFunction(prototype, "fill",
Builtins::kTypedArrayPrototypeFill, 1, false);
SimpleInstallFunction(prototype, "includes",
@@ -2721,10 +2789,18 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kTypedArrayPrototypeIndexOf, 1, false);
SimpleInstallFunction(prototype, "lastIndexOf",
Builtins::kTypedArrayPrototypeLastIndexOf, 1, false);
+ SimpleInstallFunction(prototype, "map", Builtins::kTypedArrayPrototypeMap,
+ 1, false);
SimpleInstallFunction(prototype, "reverse",
Builtins::kTypedArrayPrototypeReverse, 0, false);
+ SimpleInstallFunction(prototype, "reduce",
+ Builtins::kTypedArrayPrototypeReduce, 1, false);
+ SimpleInstallFunction(prototype, "reduceRight",
+ Builtins::kTypedArrayPrototypeReduceRight, 1, false);
SimpleInstallFunction(prototype, "slice",
Builtins::kTypedArrayPrototypeSlice, 2, false);
+ SimpleInstallFunction(prototype, "some", Builtins::kTypedArrayPrototypeSome,
+ 1, false);
}
{ // -- T y p e d A r r a y s
@@ -2738,12 +2814,6 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
TYPED_ARRAYS(INSTALL_TYPED_ARRAY)
#undef INSTALL_TYPED_ARRAY
- // %typed_array_initialize
- Handle<JSFunction> typed_array_initialize = SimpleCreateFunction(
- isolate, factory->NewStringFromAsciiChecked("typedArrayInitialize"),
- Builtins::kTypedArrayInitialize, 6, false);
- native_context()->set_typed_array_initialize(*typed_array_initialize);
-
// %typed_array_construct_by_length
Handle<JSFunction> construct_by_length = SimpleCreateFunction(
isolate,
@@ -3184,8 +3254,6 @@ void Genesis::InitializeExperimentalGlobal() {
HARMONY_STAGED(FEATURE_INITIALIZE_GLOBAL)
HARMONY_SHIPPING(FEATURE_INITIALIZE_GLOBAL)
#undef FEATURE_INITIALIZE_GLOBAL
-
- InitializeGlobal_experimental_fast_array_builtins();
}
bool Bootstrapper::CompileBuiltin(Isolate* isolate, int index) {
@@ -3416,15 +3484,6 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
WELL_KNOWN_SYMBOL_LIST(EXPORT_PUBLIC_SYMBOL)
#undef EXPORT_PUBLIC_SYMBOL
- {
- Handle<JSFunction> to_string = InstallFunction(
- container, "object_to_string", JS_OBJECT_TYPE, JSObject::kHeaderSize,
- MaybeHandle<JSObject>(), Builtins::kObjectProtoToString);
- to_string->shared()->set_internal_formal_parameter_count(0);
- to_string->shared()->set_length(0);
- native_context->set_object_to_string(*to_string);
- }
-
Handle<JSObject> iterator_prototype(
native_context->initial_iterator_prototype());
@@ -3525,7 +3584,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
isolate->initial_object_prototype(), Builtins::kUnsupportedThrower);
Handle<JSObject> prototype =
factory->NewJSObject(isolate->object_function(), TENURED);
- Accessors::FunctionSetPrototype(script_fun, prototype).Assert();
+ JSFunction::SetPrototype(script_fun, prototype);
native_context->set_script_function(*script_fun);
Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
@@ -3779,7 +3838,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
SimpleInstallFunction(proto, info.name, info.id, 0, true, attrs);
}
- Accessors::FunctionSetPrototype(callsite_fun, proto).Assert();
+ JSFunction::SetPrototype(callsite_fun, proto);
}
}
isolate->native_context()->set_exports_container(*container);
@@ -3802,6 +3861,8 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_rest_spread)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_template_escapes)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrict_constructor_return)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_strict_legacy_accessor_builtins)
void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
const char* name, Handle<Symbol> value) {
@@ -3825,33 +3886,10 @@ void Genesis::InstallOneBuiltinFunction(Handle<Object> prototype,
Handle<Object> function = Object::GetProperty(&it).ToHandleChecked();
Handle<JSFunction>::cast(function)->set_code(
isolate()->builtins()->builtin(builtin_name));
- Handle<JSFunction>::cast(function)->shared()->set_code(
- isolate()->builtins()->builtin(builtin_name));
-}
-
-void Genesis::InitializeGlobal_experimental_fast_array_builtins() {
- if (!FLAG_experimental_fast_array_builtins) return;
- {
- Handle<JSFunction> array_constructor(native_context()->array_function());
- Handle<Object> array_prototype(array_constructor->prototype(), isolate());
- // Insert experimental fast Array builtins here.
- InstallOneBuiltinFunction(array_prototype, "filter",
- Builtins::kArrayFilter);
- InstallOneBuiltinFunction(array_prototype, "map", Builtins::kArrayMap);
- }
- {
- Handle<Object> typed_array_prototype(
- native_context()->typed_array_prototype(), isolate());
- // Insert experimental fast TypedArray builtins here.
- InstallOneBuiltinFunction(typed_array_prototype, "every",
- Builtins::kTypedArrayPrototypeEvery);
- InstallOneBuiltinFunction(typed_array_prototype, "some",
- Builtins::kTypedArrayPrototypeSome);
- InstallOneBuiltinFunction(typed_array_prototype, "reduce",
- Builtins::kTypedArrayPrototypeReduce);
- InstallOneBuiltinFunction(typed_array_prototype, "reduceRight",
- Builtins::kTypedArrayPrototypeReduceRight);
- }
+ SharedFunctionInfo* info = Handle<JSFunction>::cast(function)->shared();
+ info->set_code(isolate()->builtins()->builtin(builtin_name));
+ info->set_internal_formal_parameter_count(
+ Builtins::GetBuiltinParameterCount(builtin_name));
}
void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
@@ -3872,9 +3910,7 @@ void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
Handle<String> name = factory->InternalizeUtf8String("Atomics");
Handle<JSFunction> cons = factory->NewFunction(name);
- JSFunction::SetInstancePrototype(
- cons,
- Handle<Object>(native_context()->initial_object_prototype(), isolate));
+ JSFunction::SetPrototype(cons, isolate->initial_object_prototype());
Handle<JSObject> atomics_object = factory->NewJSObject(cons, TENURED);
DCHECK(atomics_object->IsJSObject());
JSObject::AddProperty(global, name, atomics_object, DONT_ENUM);
@@ -4015,22 +4051,7 @@ void Genesis::InitializeGlobal_harmony_regexp_dotall() {
native_context()->set_regexp_prototype_map(*prototype_map);
}
-#ifdef V8_I18N_SUPPORT
-void Genesis::InitializeGlobal_datetime_format_to_parts() {
- if (!FLAG_datetime_format_to_parts) return;
- Handle<JSReceiver> exports_container(
- JSReceiver::cast(native_context()->exports_container()));
- Handle<JSObject> date_time_format_prototype(JSObject::cast(
- native_context()->intl_date_time_format_function()->prototype()));
- Handle<JSFunction> format_date_to_parts = Handle<JSFunction>::cast(
- JSReceiver::GetProperty(
- exports_container,
- factory()->InternalizeUtf8String("FormatDateToParts"))
- .ToHandleChecked());
- InstallFunction(date_time_format_prototype, format_date_to_parts,
- factory()->InternalizeUtf8String("formatToParts"));
-}
-
+#ifdef V8_INTL_SUPPORT
namespace {
void SetFunction(Handle<JSObject> target, Handle<JSFunction> function,
@@ -4054,15 +4075,15 @@ void Genesis::InitializeGlobal_icu_case_mapping() {
Handle<String> name = factory()->InternalizeUtf8String("toLowerCase");
SetFunction(string_prototype,
SimpleCreateFunction(isolate(), name,
- Builtins::kStringPrototypeToLowerCaseI18N,
- 0, false),
+ Builtins::kStringPrototypeToLowerCaseIntl,
+ 0, true),
name);
}
{
Handle<String> name = factory()->InternalizeUtf8String("toUpperCase");
SetFunction(string_prototype,
SimpleCreateFunction(isolate(), name,
- Builtins::kStringPrototypeToUpperCaseI18N,
+ Builtins::kStringPrototypeToUpperCaseIntl,
0, false),
name);
}
@@ -4070,7 +4091,7 @@ void Genesis::InitializeGlobal_icu_case_mapping() {
Handle<JSFunction> to_locale_lower_case = Handle<JSFunction>::cast(
JSReceiver::GetProperty(
exports_container,
- factory()->InternalizeUtf8String("ToLocaleLowerCaseI18N"))
+ factory()->InternalizeUtf8String("ToLocaleLowerCaseIntl"))
.ToHandleChecked());
SetFunction(string_prototype, to_locale_lower_case,
factory()->InternalizeUtf8String("toLocaleLowerCase"));
@@ -4078,7 +4099,7 @@ void Genesis::InitializeGlobal_icu_case_mapping() {
Handle<JSFunction> to_locale_upper_case = Handle<JSFunction>::cast(
JSReceiver::GetProperty(
exports_container,
- factory()->InternalizeUtf8String("ToLocaleUpperCaseI18N"))
+ factory()->InternalizeUtf8String("ToLocaleUpperCaseIntl"))
.ToHandleChecked());
SetFunction(string_prototype, to_locale_upper_case,
factory()->InternalizeUtf8String("toLocaleUpperCase"));
@@ -4192,11 +4213,9 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
factory()->NewStringFromAsciiChecked("isPromise"));
int builtin_index = Natives::GetDebuggerCount();
- // Only run prologue.js and runtime.js at this point.
+ // Only run prologue.js at this point.
DCHECK_EQ(builtin_index, Natives::GetIndex("prologue"));
if (!Bootstrapper::CompileBuiltin(isolate(), builtin_index++)) return false;
- DCHECK_EQ(builtin_index, Natives::GetIndex("runtime"));
- if (!Bootstrapper::CompileBuiltin(isolate(), builtin_index++)) return false;
{
// Builtin function for OpaqueReference -- a JSValue-based object,
@@ -4207,7 +4226,7 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
isolate()->initial_object_prototype(), JS_VALUE_TYPE, JSValue::kSize);
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
- Accessors::FunctionSetPrototype(opaque_reference_fun, prototype).Assert();
+ JSFunction::SetPrototype(opaque_reference_fun, prototype);
native_context()->set_opaque_reference_function(*opaque_reference_fun);
}
@@ -4244,20 +4263,13 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
// Store the map for the %ObjectPrototype% after the natives has been compiled
// and the Object function has been set up.
- Handle<JSFunction> object_function(native_context()->object_function());
- DCHECK(JSObject::cast(object_function->initial_map()->prototype())
- ->HasFastProperties());
- native_context()->set_object_function_prototype_map(
- HeapObject::cast(object_function->initial_map()->prototype())->map());
-
- // Set up the map for Object.create(null) instances.
- Handle<Map> slow_object_with_null_prototype_map =
- Map::CopyInitialMap(handle(object_function->initial_map(), isolate()));
- slow_object_with_null_prototype_map->set_dictionary_map(true);
- Map::SetPrototype(slow_object_with_null_prototype_map,
- isolate()->factory()->null_value());
- native_context()->set_slow_object_with_null_prototype_map(
- *slow_object_with_null_prototype_map);
+ {
+ Handle<JSFunction> object_function(native_context()->object_function());
+ DCHECK(JSObject::cast(object_function->initial_map()->prototype())
+ ->HasFastProperties());
+ native_context()->set_object_function_prototype_map(
+ HeapObject::cast(object_function->initial_map()->prototype())->map());
+ }
// Store the map for the %StringPrototype% after the natives has been compiled
// and the String function has been set up.
@@ -4341,23 +4353,29 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
concat->shared()->set_length(1);
// Install Array.prototype.forEach
- Handle<JSFunction> forEach = InstallArrayBuiltinFunction(
- proto, "forEach", Builtins::kArrayForEach, 2);
+ Handle<JSFunction> forEach =
+ InstallArrayBuiltinFunction(proto, "forEach", Builtins::kArrayForEach);
// Add forEach to the context.
native_context()->set_array_for_each_iterator(*forEach);
+ // Install Array.prototype.filter
+ InstallArrayBuiltinFunction(proto, "filter", Builtins::kArrayFilter);
+
+ // Install Array.prototype.map
+ InstallArrayBuiltinFunction(proto, "map", Builtins::kArrayMap);
+
// Install Array.prototype.every
- InstallArrayBuiltinFunction(proto, "every", Builtins::kArrayEvery, 2);
+ InstallArrayBuiltinFunction(proto, "every", Builtins::kArrayEvery);
// Install Array.prototype.some
- InstallArrayBuiltinFunction(proto, "some", Builtins::kArraySome, 2);
+ InstallArrayBuiltinFunction(proto, "some", Builtins::kArraySome);
// Install Array.prototype.reduce
- InstallArrayBuiltinFunction(proto, "reduce", Builtins::kArrayReduce, 2);
+ InstallArrayBuiltinFunction(proto, "reduce", Builtins::kArrayReduce);
// Install Array.prototype.reduceRight
InstallArrayBuiltinFunction(proto, "reduceRight",
- Builtins::kArrayReduceRight, 2);
+ Builtins::kArrayReduceRight);
}
// Install InternalArray.prototype.concat
@@ -4828,8 +4846,6 @@ bool Genesis::ConfigureGlobalObjects(
JSObject::ForceSetPrototype(global_proxy, global_object);
- native_context()->set_initial_array_prototype(
- JSArray::cast(native_context()->array_function()->prototype()));
native_context()->set_array_buffer_map(
native_context()->array_buffer_fun()->initial_map());
native_context()->set_js_map_map(
@@ -5006,29 +5022,6 @@ void Genesis::MakeFunctionInstancePrototypeWritable() {
}
-class NoTrackDoubleFieldsForSerializerScope {
- public:
- explicit NoTrackDoubleFieldsForSerializerScope(Isolate* isolate)
- : flag_(FLAG_track_double_fields), enabled_(false) {
- if (isolate->serializer_enabled()) {
- // Disable tracking double fields because heap numbers treated as
- // immutable by the serializer.
- FLAG_track_double_fields = false;
- enabled_ = true;
- }
- }
-
- ~NoTrackDoubleFieldsForSerializerScope() {
- if (enabled_) {
- FLAG_track_double_fields = flag_;
- }
- }
-
- private:
- bool flag_;
- bool enabled_;
-};
-
Genesis::Genesis(
Isolate* isolate, MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template,
@@ -5036,7 +5029,6 @@ Genesis::Genesis(
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer,
GlobalContextType context_type)
: isolate_(isolate), active_(isolate->bootstrapper()) {
- NoTrackDoubleFieldsForSerializerScope disable_scope(isolate);
result_ = Handle<Context>::null();
global_proxy_ = Handle<JSGlobalProxy>::null();
@@ -5091,7 +5083,7 @@ Genesis::Genesis(
AddToWeakNativeContextList(*native_context());
isolate->set_context(*native_context());
isolate->counters()->contexts_created_by_snapshot()->Increment();
-#if TRACE_MAPS
+#if V8_TRACE_MAPS
if (FLAG_trace_maps) {
Handle<JSFunction> object_fun = isolate->object_function();
PrintF("[TraceMap: InitialMap map= %p SFI= %d_Object ]\n",
@@ -5176,7 +5168,6 @@ Genesis::Genesis(Isolate* isolate,
MaybeHandle<JSGlobalProxy> maybe_global_proxy,
v8::Local<v8::ObjectTemplate> global_proxy_template)
: isolate_(isolate), active_(isolate->bootstrapper()) {
- NoTrackDoubleFieldsForSerializerScope disable_scope(isolate);
result_ = Handle<Context>::null();
global_proxy_ = Handle<JSGlobalProxy>::null();
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index b549a21702..286ec1ad54 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -7,13 +7,14 @@
#include "src/factory.h"
#include "src/snapshot/natives.h"
+#include "src/visitors.h"
namespace v8 {
namespace internal {
// A SourceCodeCache uses a FixedArray to store pairs of
// (OneByteString*, JSFunction*), mapping names of native code files
-// (runtime.js, etc.) to precompiled functions. Instead of mapping
+// (array.js, etc.) to precompiled functions. Instead of mapping
// names to functions it might make sense to let the JS2C tool
// generate an index for each native JS file.
class SourceCodeCache final BASE_EMBEDDED {
@@ -24,8 +25,9 @@ class SourceCodeCache final BASE_EMBEDDED {
cache_ = create_heap_objects ? isolate->heap()->empty_fixed_array() : NULL;
}
- void Iterate(ObjectVisitor* v) {
- v->VisitPointer(bit_cast<Object**, FixedArray**>(&cache_));
+ void Iterate(RootVisitor* v) {
+ v->VisitRootPointer(Root::kExtensions,
+ bit_cast<Object**, FixedArray**>(&cache_));
}
bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
@@ -94,7 +96,7 @@ class Bootstrapper final {
void DetachGlobal(Handle<Context> env);
// Traverses the pointers for memory management.
- void Iterate(ObjectVisitor* v);
+ void Iterate(RootVisitor* v);
// Accessor for the native scripts source code.
Handle<String> GetNativeSource(NativeType type, int index);
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index 05593bd419..286df2eea7 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -446,11 +446,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
namespace {
-void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
- Label post_instantiation_deopt_entry;
-
+void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
@@ -460,8 +456,6 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// -- sp[...]: constructor arguments
// -----------------------------------
- Isolate* isolate = masm->isolate();
-
// Enter a construct frame.
{
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
@@ -469,189 +463,250 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Preserve the incoming parameters on the stack.
__ SmiTag(r0);
__ Push(cp, r0);
-
- if (create_implicit_receiver) {
- // Allocate the new receiver object.
- __ Push(r1, r3);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
- RelocInfo::CODE_TARGET);
- __ mov(r4, r0);
- __ Pop(r1, r3);
-
- // ----------- S t a t e -------------
- // -- r1: constructor function
- // -- r3: new target
- // -- r4: newly allocated object
- // -----------------------------------
-
- // Retrieve smi-tagged arguments count from the stack.
- __ ldr(r0, MemOperand(sp));
- }
-
__ SmiUntag(r0);
- if (create_implicit_receiver) {
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ push(r4);
- __ push(r4);
- } else {
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- }
-
- // Deoptimizer re-enters stub code here.
- __ bind(&post_instantiation_deopt_entry);
+ // The receiver for the builtin/api call.
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
// Set up pointer to last argument.
- __ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ add(r4, fp, Operand(StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
- // r0: number of arguments
- // r1: constructor function
- // r2: address of last argument (caller sp)
- // r3: new target
- // r4: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: receiver
- // sp[2]: number of arguments (smi-tagged)
Label loop, entry;
- __ SmiTag(r4, r0);
+ __ mov(r5, r0);
+ // ----------- S t a t e -------------
+ // -- r0: number of arguments (untagged)
+ // -- r1: constructor function
+ // -- r3: new target
+ // -- r4: pointer to last argument
+ // -- r5: counter
+ // -- sp[0*kPointerSize]: the hole (receiver)
+ // -- sp[1*kPointerSize]: number of arguments (tagged)
+ // -- sp[2*kPointerSize]: context
+ // -----------------------------------
__ b(&entry);
__ bind(&loop);
- __ ldr(ip, MemOperand(r2, r4, LSL, kPointerSizeLog2 - 1));
+ __ ldr(ip, MemOperand(r4, r5, LSL, kPointerSizeLog2));
__ push(ip);
__ bind(&entry);
- __ sub(r4, r4, Operand(2), SetCC);
+ __ sub(r5, r5, Operand(1), SetCC);
__ b(ge, &loop);
// Call the function.
- // r0: number of arguments
+ // r0: number of arguments (untagged)
// r1: constructor function
// r3: new target
ParameterCount actual(r0);
__ InvokeFunction(r1, r3, actual, CALL_FUNCTION,
CheckDebugStepCallWrapper());
- // Store offset of return address for deoptimizer.
- if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
- masm->pc_offset());
- }
-
// Restore context from the frame.
- // r0: result
- // sp[0]: receiver
- // sp[1]: number of arguments (smi-tagged)
__ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
-
- if (create_implicit_receiver) {
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // r0: result
- // sp[0]: receiver
- // sp[1]: number of arguments (smi-tagged)
- __ JumpIfSmi(r0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r0, r1, r3, FIRST_JS_RECEIVER_TYPE);
- __ b(ge, &exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ ldr(r0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // r0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: number of arguments (smi-tagged)
- __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
- } else {
- __ ldr(r1, MemOperand(sp));
- }
-
+ // Restore smi-tagged arguments count from the frame.
+ __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
- // ES6 9.2.2. Step 13+
- // Check that the result is not a Smi, indicating that the constructor result
- // from a derived class is neither undefined nor an Object.
- if (check_derived_construct) {
- Label do_throw, dont_throw;
- __ JumpIfSmi(r0, &do_throw);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CompareObjectType(r0, r3, r3, FIRST_JS_RECEIVER_TYPE);
- __ b(ge, &dont_throw);
- __ bind(&do_throw);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
- }
- __ bind(&dont_throw);
- }
-
- __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
+ // Remove caller arguments from the stack and return.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
__ add(sp, sp, Operand(kPointerSize));
- if (create_implicit_receiver) {
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
- }
__ Jump(lr);
+}
+
+// The construct stub for ES5 constructor functions and ES6 class constructors.
+void Generate_JSConstructStubGeneric(MacroAssembler* masm,
+ bool restrict_constructor_return) {
+ // ----------- S t a t e -------------
+ // -- r0: number of arguments (untagged)
+ // -- r1: constructor function
+ // -- r3: new target
+ // -- cp: context
+ // -- lr: return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Enter a construct frame.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
- // Store offset of trampoline address for deoptimizer. This is the bailout
- // point after the receiver instantiation but before the function invocation.
- // We need to restore some registers in order to continue the above code.
- if (create_implicit_receiver && !is_api_function) {
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(r0);
+ __ Push(cp, r0, r1, r3);
+
+ // ----------- S t a t e -------------
+ // -- sp[0*kPointerSize]: new target
+ // -- r1 and sp[1*kPointerSize]: constructor function
+ // -- sp[2*kPointerSize]: number of arguments (tagged)
+ // -- sp[3*kPointerSize]: context
+ // -----------------------------------
+
+ __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldrb(r4,
+ FieldMemOperand(r4, SharedFunctionInfo::kFunctionKindByteOffset));
+ __ tst(r4, Operand(SharedFunctionInfo::kDerivedConstructorBitsWithinByte));
+ __ b(ne, &not_create_implicit_receiver);
+
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
+ r4, r5);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
+ __ b(&post_instantiation_deopt_entry);
+
+ // Else: use TheHoleValue as receiver for constructor call
+ __ bind(&not_create_implicit_receiver);
+ __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+
+ // ----------- S t a t e -------------
+ // -- r0: receiver
+ // -- Slot 3 / sp[0*kPointerSize]: new target
+ // -- Slot 2 / sp[1*kPointerSize]: constructor function
+ // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[3*kPointerSize]: context
+ // -----------------------------------
+ // Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
masm->pc_offset());
+ __ bind(&post_instantiation_deopt_entry);
+
+ // Restore new target.
+ __ Pop(r3);
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ Push(r0, r0);
// ----------- S t a t e -------------
- // -- r0 : newly allocated object
- // -- sp[0] : constructor function
+ // -- r3: new target
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
- __ pop(r1);
- __ push(r0);
- __ push(r0);
-
- // Retrieve smi-tagged arguments count from the stack.
+ // Restore constructor function and argument count.
+ __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ ldr(r0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(r0);
- // Retrieve the new target value from the stack. This was placed into the
- // frame description in place of the receiver by the optimizing compiler.
- __ add(r3, fp, Operand(StandardFrameConstants::kCallerSPOffset));
- __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2));
+ // Set up pointer to last argument.
+ __ add(r4, fp, Operand(StandardFrameConstants::kCallerSPOffset));
- // Continue with constructor function invocation.
- __ b(&post_instantiation_deopt_entry);
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ mov(r5, r0);
+ // ----------- S t a t e -------------
+ // -- r0: number of arguments (untagged)
+ // -- r3: new target
+ // -- r4: pointer to last argument
+ // -- r5: counter
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- r1 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
+ __ b(&entry);
+ __ bind(&loop);
+ __ ldr(ip, MemOperand(r4, r5, LSL, kPointerSizeLog2));
+ __ push(ip);
+ __ bind(&entry);
+ __ sub(r5, r5, Operand(1), SetCC);
+ __ b(ge, &loop);
+
+ // Call the function.
+ ParameterCount actual(r0);
+ __ InvokeFunction(r1, r3, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
+
+ // ----------- S t a t e -------------
+ // -- r0: constructor result
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: constructor function
+ // -- sp[2*kPointerSize]: number of arguments
+ // -- sp[3*kPointerSize]: context
+ // -----------------------------------
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
+
+ // Restore the context from the frame.
+ __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, do_throw, other_result, leave_frame;
+
+ // If the result is undefined, we jump out to using the implicit receiver.
+ __ JumpIfRoot(r0, Heap::kUndefinedValueRootIndex, &use_receiver);
+
+ // Otherwise we do a smi check and fall through to check if the return value
+ // is a valid receiver.
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(r0, &other_result);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r0, r4, r5, FIRST_JS_RECEIVER_TYPE);
+ __ b(ge, &leave_frame);
+
+ __ bind(&other_result);
+ // The result is now neither undefined nor an object.
+ if (restrict_constructor_return) {
+ // Throw if constructor function is a class constructor
+ __ ldr(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ ldr(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ ldrb(r4,
+ FieldMemOperand(r4, SharedFunctionInfo::kFunctionKindByteOffset));
+ __ tst(r4, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ b(eq, &use_receiver);
+
+ } else {
+ __ b(&use_receiver);
+ }
+
+ __ bind(&do_throw);
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
+ __ JumpIfRoot(r0, Heap::kTheHoleValueRootIndex, &do_throw);
+
+ __ bind(&leave_frame);
+ // Restore smi-tagged arguments count from the frame.
+ __ ldr(r1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
}
+ // Remove caller arguments from the stack and return.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(sp, sp, Operand(kPointerSize));
+ __ Jump(lr);
}
-
} // namespace
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, false);
+void Builtins::Generate_JSConstructStubGenericRestrictedReturn(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubGeneric(masm, true);
+}
+void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubGeneric(masm, false);
}
-
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
+ Generate_JSBuiltinsConstructStubHelper(masm);
}
-
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, false);
-}
-
-void Builtins::Generate_JSBuiltinsConstructStubForDerived(
- MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, true);
+ Generate_JSBuiltinsConstructStubHelper(masm);
}
// static
@@ -830,7 +885,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// r2: receiver
// r3: argc
// r4: argv
- // r5-r6, r8 (if !FLAG_enable_embedded_constant_pool) and cp may be clobbered
+ // r5-r6, r8 and cp may be clobbered
ProfileEntryHookStub::MaybeCallEntryHook(masm);
// Enter an internal frame.
@@ -880,9 +935,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
__ mov(r5, Operand(r4));
__ mov(r6, Operand(r4));
- if (!FLAG_enable_embedded_constant_pool) {
- __ mov(r8, Operand(r4));
- }
+ __ mov(r8, Operand(r4));
if (kR9Available == 1) {
__ mov(r9, Operand(r4));
}
@@ -910,6 +963,38 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+static void ReplaceClosureEntryWithOptimizedCode(
+ MacroAssembler* masm, Register optimized_code_entry, Register closure,
+ Register scratch1, Register scratch2, Register scratch3) {
+ Register native_context = scratch1;
+
+ // Store code entry in the closure.
+ __ add(optimized_code_entry, optimized_code_entry,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ str(optimized_code_entry,
+ FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
+
+ // Link the closure into the optimized function list.
+ __ ldr(native_context, NativeContextMemOperand());
+ __ ldr(scratch2,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ str(scratch2,
+ FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
+ scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ str(closure,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ // Save closure before the write barrier.
+ __ mov(scratch2, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, closure,
+ scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ mov(closure, scratch2);
+}
+
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
Register args_count = scratch;
@@ -935,7 +1020,6 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
// o r1: the JS function object being called.
// o r3: the new target
// o cp: our context
-// o pp: the caller's constant pool pointer (if enabled)
// o fp: the caller's frame pointer
// o sp: stack pointer
// o lr: return address
@@ -951,6 +1035,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(r1);
+ // First check if there is optimized code in the feedback vector which we
+ // could call instead.
+ Label switch_to_optimized_code;
+ Register optimized_code_entry = r4;
+ __ ldr(r0, FieldMemOperand(r1, JSFunction::kFeedbackVectorOffset));
+ __ ldr(r0, FieldMemOperand(r0, Cell::kValueOffset));
+ __ ldr(
+ optimized_code_entry,
+ FieldMemOperand(r0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
+ __ ldr(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
__ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
@@ -1063,6 +1161,30 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ str(r4, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(r1, r4, r5);
__ Jump(r4);
+
+ // If there is optimized code on the type feedback vector, check if it is good
+ // to run, and if so, self heal the closure and call the optimized code.
+ __ bind(&switch_to_optimized_code);
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ Label gotta_call_runtime;
+
+ // Check if the optimized code is marked for deopt.
+ __ ldr(r5, FieldMemOperand(optimized_code_entry,
+ Code::kKindSpecificFlags1Offset));
+ __ tst(r5, Operand(1 << Code::kMarkedForDeoptimizationBit));
+
+ __ b(ne, &gotta_call_runtime);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r1, r6, r5,
+ r2);
+ __ Jump(optimized_code_entry);
+
+ // Optimized code is marked for deopt, bailout to the CompileLazy runtime
+ // function which will clear the feedback vector's optimized code slot.
+ __ bind(&gotta_call_runtime);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1300,114 +1422,50 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -- r1 : target function (preserved for callee)
// -----------------------------------
// First lookup code, maybe we don't need to compile!
- Label gotta_call_runtime, gotta_call_runtime_no_stack;
+ Label gotta_call_runtime;
Label try_shared;
- Label loop_top, loop_bottom;
- Register argument_count = r0;
Register closure = r1;
- Register new_target = r3;
- Register map = argument_count;
Register index = r2;
// Do we have a valid feedback vector?
__ ldr(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ ldr(index, FieldMemOperand(index, Cell::kValueOffset));
- __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex,
- &gotta_call_runtime_no_stack);
-
- __ push(argument_count);
- __ push(new_target);
- __ push(closure);
-
- __ ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(map,
- FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ ldr(index, FieldMemOperand(map, FixedArray::kLengthOffset));
- __ cmp(index, Operand(Smi::FromInt(2)));
- __ b(lt, &try_shared);
-
- // r3 : native context
- // r2 : length / index
- // r0 : optimized code map
- // stack[0] : new target
- // stack[4] : closure
- Register native_context = r3;
- __ ldr(native_context, NativeContextMemOperand());
-
- __ bind(&loop_top);
- Register temp = r1;
- Register array_pointer = r5;
+ __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
- // Does the native context match?
- __ add(array_pointer, map, Operand::PointerOffsetFromSmiKey(index));
- __ ldr(temp, FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousContext));
- __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
- __ cmp(temp, native_context);
- __ b(ne, &loop_bottom);
-
- // Code available?
+ // Is optimized code available in the feedback vector?
Register entry = r4;
- __ ldr(entry,
- FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ ldr(entry, FieldMemOperand(
+ index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
- // Found code. Get it into the closure and return.
- __ pop(closure);
- // Store code entry in the closure.
- __ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
- __ RecordWriteCodeEntryField(closure, entry, r5);
+ // Found code, check if it is marked for deopt, if so call into runtime to
+ // clear the optimized code slot.
+ __ ldr(r5, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
+ __ tst(r5, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ b(ne, &gotta_call_runtime);
- // Link the closure into the optimized function list.
- // r4 : code entry
- // r3 : native context
- // r1 : closure
- __ ldr(r5,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- __ str(r5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r5, r0,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- const int function_list_offset =
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
- __ str(closure,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- // Save closure before the write barrier.
- __ mov(r5, closure);
- __ RecordWriteContextSlot(native_context, function_list_offset, closure, r0,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
- __ mov(closure, r5);
- __ pop(new_target);
- __ pop(argument_count);
+ // Code is good, get it into the closure and tail call.
+ ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r6, r5, r2);
__ Jump(entry);
- __ bind(&loop_bottom);
- __ sub(index, index, Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ cmp(index, Operand(Smi::FromInt(1)));
- __ b(gt, &loop_top);
-
- // We found no code.
+ // We found no optimized code.
__ bind(&try_shared);
- __ pop(closure);
- __ pop(new_target);
- __ pop(argument_count);
__ ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
__ ldrb(r5, FieldMemOperand(entry,
SharedFunctionInfo::kMarkedForTierUpByteOffset));
__ tst(r5, Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
- __ b(ne, &gotta_call_runtime_no_stack);
+ __ b(ne, &gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ ldr(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ Move(r5, masm->CodeObject());
__ cmp(entry, r5);
- __ b(eq, &gotta_call_runtime_no_stack);
+ __ b(eq, &gotta_call_runtime);
// Install the SFI's code entry.
__ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -1416,10 +1474,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ Jump(entry);
__ bind(&gotta_call_runtime);
- __ pop(closure);
- __ pop(new_target);
- __ pop(argument_count);
- __ bind(&gotta_call_runtime_no_stack);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
@@ -1676,10 +1730,6 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
- if (FLAG_enable_embedded_constant_pool) {
- __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r0);
- }
-
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
__ ldr(r1, FieldMemOperand(
@@ -1937,7 +1987,6 @@ static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r0);
__ mov(r4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() |
- (FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
fp.bit() | lr.bit());
__ add(fp, sp,
Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
@@ -2104,54 +2153,54 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- r1 : the target to call (can be any Object)
- // -- r2 : start index (to support rest parameters)
- // -- lr : return address.
- // -- sp[0] : thisArgument
+ // -- r0 : the number of arguments (not including the receiver)
+ // -- r3 : the new.target (for [[Construct]] calls)
+ // -- r1 : the target to call (can be any Object)
+ // -- r2 : start index (to support rest parameters)
// -----------------------------------
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
- __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(ip, MemOperand(r3, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ ldr(r4, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(ip, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &arguments_adaptor);
{
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r0, FieldMemOperand(
- r0, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(r3, fp);
+ __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ ldr(r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r5, FieldMemOperand(
+ r5, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(r4, fp);
}
__ b(&arguments_done);
__ bind(&arguments_adaptor);
{
// Load the length from the ArgumentsAdaptorFrame.
- __ ldr(r0, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ ldr(r5, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
}
__ bind(&arguments_done);
- Label stack_empty, stack_done, stack_overflow;
- __ SmiUntag(r0);
- __ sub(r0, r0, r2, SetCC);
- __ b(le, &stack_empty);
+ Label stack_done, stack_overflow;
+ __ SmiUntag(r5);
+ __ sub(r5, r5, r2, SetCC);
+ __ b(le, &stack_done);
{
// Check for stack overflow.
- Generate_StackOverflowCheck(masm, r0, r2, &stack_overflow);
+ Generate_StackOverflowCheck(masm, r5, r2, &stack_overflow);
// Forward the arguments from the caller frame.
{
Label loop;
- __ add(r3, r3, Operand(kPointerSize));
- __ mov(r2, r0);
+ __ add(r4, r4, Operand(kPointerSize));
+ __ add(r0, r0, r5);
__ bind(&loop);
{
- __ ldr(ip, MemOperand(r3, r2, LSL, kPointerSizeLog2));
+ __ ldr(ip, MemOperand(r4, r5, LSL, kPointerSizeLog2));
__ push(ip);
- __ sub(r2, r2, Operand(1), SetCC);
+ __ sub(r5, r5, Operand(1), SetCC);
__ b(ne, &loop);
}
}
@@ -2159,13 +2208,9 @@ void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
__ b(&stack_done);
__ bind(&stack_overflow);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&stack_empty);
- {
- // We just pass the receiver, which is already on the stack.
- __ mov(r0, Operand(0));
- }
__ bind(&stack_done);
+ // Tail-call to the {code} handler.
__ Jump(code, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index 9824ab64a4..7e96dc4fb3 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -446,86 +446,50 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
namespace {
-void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
+void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
Label post_instantiation_deopt_entry;
// ----------- S t a t e -------------
// -- x0 : number of arguments
// -- x1 : constructor function
// -- x3 : new target
+ // -- cp : context
// -- lr : return address
- // -- cp : context pointer
// -- sp[...]: constructor arguments
// -----------------------------------
ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
- Isolate* isolate = masm->isolate();
-
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
- // Preserve the four incoming parameters on the stack.
- Register argc = x0;
- Register constructor = x1;
- Register new_target = x3;
-
// Preserve the incoming parameters on the stack.
- __ SmiTag(argc);
- __ Push(cp, argc);
-
- if (create_implicit_receiver) {
- // Allocate the new receiver object.
- __ Push(constructor, new_target);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
- RelocInfo::CODE_TARGET);
- __ Mov(x4, x0);
- __ Pop(new_target, constructor);
-
- // ----------- S t a t e -------------
- // -- x1: constructor function
- // -- x3: new target
- // -- x4: newly allocated object
- // -----------------------------------
-
- // Reload the number of arguments from the stack.
- // Set it up in x0 for the function call below.
- // jssp[0]: number of arguments (smi-tagged)
- __ Peek(argc, 0); // Load number of arguments.
- }
-
- __ SmiUntag(argc);
-
- if (create_implicit_receiver) {
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ Push(x4, x4);
- } else {
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- }
+ __ SmiTag(x0);
+ __ Push(cp, x0);
+ __ SmiUntag(x0);
- // Deoptimizer re-enters stub code here.
- __ Bind(&post_instantiation_deopt_entry);
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
// Set up pointer to last argument.
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
// Copy arguments and receiver to the expression stack.
// Copy 2 values every loop to use ldp/stp.
- // x0: number of arguments
- // x1: constructor function
- // x2: address of last argument (caller sp)
- // x3: new target
- // jssp[0]: receiver
- // jssp[1]: receiver
- // jssp[2]: number of arguments (smi-tagged)
- // Compute the start address of the copy in x3.
- __ Add(x4, x2, Operand(argc, LSL, kPointerSizeLog2));
+
+ // Compute pointer behind the first argument.
+ __ Add(x4, x2, Operand(x0, LSL, kPointerSizeLog2));
Label loop, entry, done_copying_arguments;
+ // ----------- S t a t e -------------
+ // -- x0: number of arguments (untagged)
+ // -- x1: constructor function
+ // -- x3: new target
+ // -- x2: pointer to last argument (caller sp)
+ // -- x4: pointer to argument last copied
+ // -- sp[0*kPointerSize]: the hole (receiver)
+ // -- sp[1*kPointerSize]: number of arguments (tagged)
+ // -- sp[2*kPointerSize]: context
+ // -----------------------------------
__ B(&entry);
__ Bind(&loop);
__ Ldp(x10, x11, MemOperand(x4, -2 * kPointerSize, PreIndex));
@@ -543,126 +507,226 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// x0: number of arguments
// x1: constructor function
// x3: new target
- ParameterCount actual(argc);
- __ InvokeFunction(constructor, new_target, actual, CALL_FUNCTION,
+ ParameterCount actual(x0);
+ __ InvokeFunction(x1, x3, actual, CALL_FUNCTION,
CheckDebugStepCallWrapper());
- // Store offset of return address for deoptimizer.
- if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
- masm->pc_offset());
- }
-
// Restore the context from the frame.
- // x0: result
- // jssp[0]: receiver
- // jssp[1]: number of arguments (smi-tagged)
__ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
-
- if (create_implicit_receiver) {
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // x0: result
- // jssp[0]: receiver (newly allocated object)
- // jssp[1]: number of arguments (smi-tagged)
- __ JumpIfSmi(x0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- __ JumpIfObjectType(x0, x1, x3, FIRST_JS_RECEIVER_TYPE, &exit, ge);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ Bind(&use_receiver);
- __ Peek(x0, 0);
-
- // Remove the receiver from the stack, remove caller arguments, and
- // return.
- __ Bind(&exit);
- // x0: result
- // jssp[0]: receiver (newly allocated object)
- // jssp[1]: number of arguments (smi-tagged)
- __ Peek(x1, 1 * kXRegSize);
- } else {
- __ Peek(x1, 0);
- }
-
+ // Restore smi-tagged arguments count from the frame.
+ __ Peek(x1, 0);
// Leave construct frame.
}
- // ES6 9.2.2. Step 13+
- // Check that the result is not a Smi, indicating that the constructor result
- // from a derived class is neither undefined nor an Object.
- if (check_derived_construct) {
- Label do_throw, dont_throw;
- __ JumpIfSmi(x0, &do_throw);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ JumpIfObjectType(x0, x3, x3, FIRST_JS_RECEIVER_TYPE, &dont_throw, ge);
- __ Bind(&do_throw);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
- }
- __ Bind(&dont_throw);
- }
-
+ // Remove caller arguments from the stack and return.
__ DropBySMI(x1);
__ Drop(1);
- if (create_implicit_receiver) {
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, x1, x2);
- }
__ Ret();
+}
- // Store offset of trampoline address for deoptimizer. This is the bailout
- // point after the receiver instantiation but before the function invocation.
- // We need to restore some registers in order to continue the above code.
- if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
- masm->pc_offset());
+// The construct stub for ES5 constructor functions and ES6 class constructors.
+void Generate_JSConstructStubGeneric(MacroAssembler* masm,
+ bool restrict_constructor_return) {
+ // ----------- S t a t e -------------
+ // -- x0 : number of arguments
+ // -- x1 : constructor function
+ // -- x3 : new target
+ // -- lr : return address
+ // -- cp : context pointer
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
+
+ // Enter a construct frame.
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
+
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(x0);
+ __ Push(cp, x0, x1, x3);
// ----------- S t a t e -------------
- // -- x0 : newly allocated object
- // -- sp[0] : constructor function
+ // -- sp[0*kPointerSize]: new target
+ // -- x1 and sp[1*kPointerSize]: constructor function
+ // -- sp[2*kPointerSize]: number of arguments (tagged)
+ // -- sp[3*kPointerSize]: context
// -----------------------------------
- __ Pop(x1);
+ __ Ldr(x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldrb(x4,
+ FieldMemOperand(x4, SharedFunctionInfo::kFunctionKindByteOffset));
+ __ tst(x4, Operand(SharedFunctionInfo::kDerivedConstructorBitsWithinByte));
+ __ B(ne, &not_create_implicit_receiver);
+
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
+ x4, x5);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
+ __ B(&post_instantiation_deopt_entry);
+
+ // Else: use TheHoleValue as receiver for constructor call
+ __ bind(&not_create_implicit_receiver);
+ __ LoadRoot(x0, Heap::kTheHoleValueRootIndex);
+
+ // ----------- S t a t e -------------
+ // -- x0: receiver
+ // -- Slot 3 / sp[0*kPointerSize]: new target
+ // -- Slot 2 / sp[1*kPointerSize]: constructor function
+ // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[3*kPointerSize]: context
+ // -----------------------------------
+ // Deoptimizer enters here.
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+ __ bind(&post_instantiation_deopt_entry);
+
+ // Restore new target.
+ __ Pop(x3);
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
__ Push(x0, x0);
- // Retrieve smi-tagged arguments count from the stack.
+ // ----------- S t a t e -------------
+ // -- x3: new target
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
+
+ // Restore constructor function and argument count.
+ __ Ldr(x1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ Ldr(x0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(x0);
- // Retrieve the new target value from the stack. This was placed into the
- // frame description in place of the receiver by the optimizing compiler.
- __ Add(x3, fp, Operand(StandardFrameConstants::kCallerSPOffset));
- __ Ldr(x3, MemOperand(x3, x0, LSL, kPointerSizeLog2));
+ // Set up pointer to last argument.
+ __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
+
+ // Copy arguments and receiver to the expression stack.
+ // Copy 2 values every loop to use ldp/stp.
- // Continue with constructor function invocation.
- __ B(&post_instantiation_deopt_entry);
+ // Compute pointer behind the first argument.
+ __ Add(x4, x2, Operand(x0, LSL, kPointerSizeLog2));
+ Label loop, entry, done_copying_arguments;
+ // ----------- S t a t e -------------
+ // -- x0: number of arguments (untagged)
+ // -- x3: new target
+ // -- x2: pointer to last argument (caller sp)
+ // -- x4: pointer to argument last copied
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- x1 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
+ __ B(&entry);
+ __ Bind(&loop);
+ __ Ldp(x10, x11, MemOperand(x4, -2 * kPointerSize, PreIndex));
+ __ Push(x11, x10);
+ __ Bind(&entry);
+ __ Cmp(x4, x2);
+ __ B(gt, &loop);
+ // Because we copied values 2 by 2 we may have copied one extra value.
+ // Drop it if that is the case.
+ __ B(eq, &done_copying_arguments);
+ __ Drop(1);
+ __ Bind(&done_copying_arguments);
+
+ // Call the function.
+ ParameterCount actual(x0);
+ __ InvokeFunction(x1, x3, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
+
+ // ----------- S t a t e -------------
+ // -- x0: constructor result
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: constructor function
+ // -- sp[2*kPointerSize]: number of arguments
+ // -- sp[3*kPointerSize]: context
+ // -----------------------------------
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
+
+ // Restore the context from the frame.
+ __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, do_throw, other_result, leave_frame;
+
+ // If the result is undefined, we jump out to using the implicit receiver.
+ __ CompareRoot(x0, Heap::kUndefinedValueRootIndex);
+ __ B(eq, &use_receiver);
+
+ // Otherwise we do a smi check and fall through to check if the return value
+ // is a valid receiver.
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(x0, &other_result);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ JumpIfObjectType(x0, x4, x5, FIRST_JS_RECEIVER_TYPE, &leave_frame, ge);
+
+ __ Bind(&other_result);
+ // The result is now neither undefined nor an object.
+ if (restrict_constructor_return) {
+ // Throw if constructor function is a class constructor
+ __ Ldr(x4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ Ldr(x4, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldrb(x4,
+ FieldMemOperand(x4, SharedFunctionInfo::kFunctionKindByteOffset));
+ __ tst(x4, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ B(eq, &use_receiver);
+
+ } else {
+ __ B(&use_receiver);
+ }
+
+ __ Bind(&do_throw);
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ Bind(&use_receiver);
+ __ Peek(x0, 0 * kPointerSize);
+ __ CompareRoot(x0, Heap::kTheHoleValueRootIndex);
+ __ B(eq, &do_throw);
+
+ __ Bind(&leave_frame);
+ // Restore smi-tagged arguments count from the frame.
+ __ Ldr(x1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
}
+ // Remove caller arguments from the stack and return.
+ __ DropBySMI(x1);
+ __ Drop(1);
+ __ Ret();
}
-
} // namespace
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, false);
+void Builtins::Generate_JSConstructStubGenericRestrictedReturn(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubGeneric(masm, true);
+}
+void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubGeneric(masm, false);
}
-
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
+ Generate_JSBuiltinsConstructStubHelper(masm);
}
-
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, false);
-}
-
-void Builtins::Generate_JSBuiltinsConstructStubForDerived(
- MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, true);
+ Generate_JSBuiltinsConstructStubHelper(masm);
}
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
@@ -923,6 +987,36 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+static void ReplaceClosureEntryWithOptimizedCode(
+ MacroAssembler* masm, Register optimized_code_entry, Register closure,
+ Register scratch1, Register scratch2, Register scratch3) {
+ Register native_context = scratch1;
+
+ // Store code entry in the closure.
+ __ Add(optimized_code_entry, optimized_code_entry,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Str(optimized_code_entry,
+ FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
+
+ // Link the closure into the optimized function list.
+ __ Ldr(native_context, NativeContextMemOperand());
+ __ Ldr(scratch2,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ Str(scratch2,
+ FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
+ scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ Str(closure,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ Mov(scratch2, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, scratch2,
+ scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs);
+}
+
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
Register args_count = scratch;
@@ -964,6 +1058,20 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(lr, fp, cp, x1);
__ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+ // First check if there is optimized code in the feedback vector which we
+ // could call instead.
+ Label switch_to_optimized_code;
+ Register optimized_code_entry = x7;
+ __ Ldr(x0, FieldMemOperand(x1, JSFunction::kFeedbackVectorOffset));
+ __ Ldr(x0, FieldMemOperand(x0, Cell::kValueOffset));
+ __ Ldr(
+ optimized_code_entry,
+ FieldMemOperand(x0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
+ __ Ldr(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
__ Ldr(x0, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
@@ -1079,6 +1187,29 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Str(x7, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(x1, x7, x5);
__ Jump(x7);
+
+ // If there is optimized code on the type feedback vector, check if it is good
+ // to run, and if so, self heal the closure and call the optimized code.
+ __ bind(&switch_to_optimized_code);
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ Label gotta_call_runtime;
+
+ // Check if the optimized code is marked for deopt.
+ __ Ldr(w8, FieldMemOperand(optimized_code_entry,
+ Code::kKindSpecificFlags1Offset));
+ __ TestAndBranchIfAnySet(w8, 1 << Code::kMarkedForDeoptimizationBit,
+ &gotta_call_runtime);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, x1, x4, x5,
+ x13);
+ __ Jump(optimized_code_entry);
+
+ // Optimized code is marked for deopt, bailout to the CompileLazy runtime
+ // function which will clear the feedback vector's optimized code slot.
+ __ bind(&gotta_call_runtime);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1324,10 +1455,8 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label try_shared;
- Label loop_top, loop_bottom;
Register closure = x1;
- Register map = x13;
Register index = x2;
// Do we have a valid feedback vector?
@@ -1335,71 +1464,26 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ Ldr(index, FieldMemOperand(index, Cell::kValueOffset));
__ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
- __ Ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ Ldr(map,
- FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ Ldrsw(index, UntagSmiFieldMemOperand(map, FixedArray::kLengthOffset));
- __ Cmp(index, Operand(2));
- __ B(lt, &try_shared);
-
- // x3 : native context
- // x2 : length / index
- // x13 : optimized code map
- // stack[0] : new target
- // stack[4] : closure
- Register native_context = x4;
- __ Ldr(native_context, NativeContextMemOperand());
-
- __ Bind(&loop_top);
- Register temp = x5;
- Register array_pointer = x6;
-
- // Does the native context match?
- __ Add(array_pointer, map, Operand(index, LSL, kPointerSizeLog2));
- __ Ldr(temp, FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousContext));
- __ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
- __ Cmp(temp, native_context);
- __ B(ne, &loop_bottom);
-
- // Code available?
+ // Is optimized code available in the feedback vector?
Register entry = x7;
- __ Ldr(entry,
- FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ Ldr(entry, FieldMemOperand(
+ index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
- // Found code. Get it into the closure and return.
- __ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
- __ RecordWriteCodeEntryField(closure, entry, x5);
+ // Found code, check if it is marked for deopt, if so call into runtime to
+ // clear the optimized code slot.
+ __ Ldr(w8, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
+ __ TestAndBranchIfAnySet(w8, 1 << Code::kMarkedForDeoptimizationBit,
+ &gotta_call_runtime);
- // Link the closure into the optimized function list.
- // x7 : code entry
- // x4 : native context
- // x1 : closure
- __ Ldr(x8,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- __ Str(x8, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, x8, x13,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- const int function_list_offset =
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
- __ Str(closure,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- __ Mov(x5, closure);
- __ RecordWriteContextSlot(native_context, function_list_offset, x5, x13,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
+ // Code is good, get it into the closure and tail call.
+ ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, x4, x5, x13);
__ Jump(entry);
- __ Bind(&loop_bottom);
- __ Sub(index, index, Operand(SharedFunctionInfo::kEntryLength));
- __ Cmp(index, Operand(1));
- __ B(gt, &loop_top);
-
- // We found no code.
+ // We found no optimized code.
+ Register temp = x5;
__ Bind(&try_shared);
__ Ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
@@ -2202,54 +2286,54 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- x1 : the target to call (can be any Object)
- // -- x2 : start index (to support rest parameters)
- // -- lr : return address.
- // -- sp[0] : thisArgument
+ // -- x0 : the number of arguments (not including the receiver)
+ // -- x3 : the new.target (for [[Construct]] calls)
+ // -- x1 : the target to call (can be any Object)
+ // -- x2 : start index (to support rest parameters)
// -----------------------------------
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
- __ Ldr(x3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ Ldr(x4, MemOperand(x3, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ Ldr(x5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ldr(x4, MemOperand(x5, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
__ B(eq, &arguments_adaptor);
{
- __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Ldr(x0, FieldMemOperand(x0, JSFunction::kSharedFunctionInfoOffset));
- __ Ldrsw(x0, FieldMemOperand(
- x0, SharedFunctionInfo::kFormalParameterCountOffset));
- __ Mov(x3, fp);
+ __ Ldr(x6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ldr(x6, FieldMemOperand(x6, JSFunction::kSharedFunctionInfoOffset));
+ __ Ldrsw(x6, FieldMemOperand(
+ x6, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Mov(x5, fp);
}
__ B(&arguments_done);
__ Bind(&arguments_adaptor);
{
// Just load the length from ArgumentsAdaptorFrame.
- __ Ldrsw(x0, UntagSmiMemOperand(
- x3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Ldrsw(x6, UntagSmiMemOperand(
+ x5, ArgumentsAdaptorFrameConstants::kLengthOffset));
}
__ Bind(&arguments_done);
- Label stack_empty, stack_done, stack_overflow;
- __ Subs(x0, x0, x2);
- __ B(le, &stack_empty);
+ Label stack_done, stack_overflow;
+ __ Subs(x6, x6, x2);
+ __ B(le, &stack_done);
{
// Check for stack overflow.
- Generate_StackOverflowCheck(masm, x0, x2, &stack_overflow);
+ Generate_StackOverflowCheck(masm, x6, x2, &stack_overflow);
// Forward the arguments from the caller frame.
{
Label loop;
- __ Add(x3, x3, kPointerSize);
- __ Mov(x2, x0);
+ __ Add(x5, x5, kPointerSize);
+ __ Add(x0, x0, x6);
__ bind(&loop);
{
- __ Ldr(x4, MemOperand(x3, x2, LSL, kPointerSizeLog2));
+ __ Ldr(x4, MemOperand(x5, x6, LSL, kPointerSizeLog2));
__ Push(x4);
- __ Subs(x2, x2, 1);
+ __ Subs(x6, x6, 1);
__ B(ne, &loop);
}
}
@@ -2257,11 +2341,6 @@ void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
__ B(&stack_done);
__ Bind(&stack_overflow);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ Bind(&stack_empty);
- {
- // We just pass the receiver, which is already on the stack.
- __ Mov(x0, 0);
- }
__ Bind(&stack_done);
__ Jump(code, RelocInfo::CODE_TARGET);
diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc
index eb34638fa0..52023efd65 100644
--- a/deps/v8/src/builtins/builtins-api.cc
+++ b/deps/v8/src/builtins/builtins-api.cc
@@ -11,6 +11,7 @@
#include "src/log.h"
#include "src/objects-inl.h"
#include "src/prototype.h"
+#include "src/visitors.h"
namespace v8 {
namespace internal {
@@ -117,7 +118,8 @@ MUST_USE_RESULT MaybeHandle<Object> HandleApiCallHelper(
}
// Rebox the result.
result->VerifyApiCallResultType();
- if (!is_construct || result->IsJSObject()) return handle(*result, isolate);
+ if (!is_construct || result->IsJSReceiver())
+ return handle(*result, isolate);
}
return js_receiver;
@@ -150,9 +152,10 @@ class RelocatableArguments : public BuiltinArguments, public Relocatable {
RelocatableArguments(Isolate* isolate, int length, Object** arguments)
: BuiltinArguments(length, arguments), Relocatable(isolate) {}
- virtual inline void IterateInstance(ObjectVisitor* v) {
+ virtual inline void IterateInstance(RootVisitor* v) {
if (length() == 0) return;
- v->VisitPointers(lowest_address(), highest_address() + 1);
+ v->VisitRootPointers(Root::kRelocatable, lowest_address(),
+ highest_address() + 1);
}
private:
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index aad31db8a1..8c95007622 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-string-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
@@ -15,13 +16,11 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
: CodeStubAssembler(state),
k_(this, MachineRepresentation::kTagged),
a_(this, MachineRepresentation::kTagged),
- to_(this, MachineRepresentation::kTagged, SmiConstant(0)) {}
-
- typedef std::function<Node*(ArrayBuiltinCodeStubAssembler* masm)>
- BuiltinResultGenerator;
+ to_(this, MachineRepresentation::kTagged, SmiConstant(0)),
+ fully_spec_compliant_(this, {&k_, &a_, &to_}) {}
typedef std::function<void(ArrayBuiltinCodeStubAssembler* masm)>
- BuiltinResultIndexInitializer;
+ BuiltinResultGenerator;
typedef std::function<Node*(ArrayBuiltinCodeStubAssembler* masm,
Node* k_value, Node* k)>
@@ -30,7 +29,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
typedef std::function<void(ArrayBuiltinCodeStubAssembler* masm)>
PostLoopAction;
- Node* ForEachResultGenerator() { return UndefinedConstant(); }
+ void ForEachResultGenerator() { a_.Bind(UndefinedConstant()); }
Node* ForEachProcessor(Node* k_value, Node* k) {
CallJS(CodeFactory::Call(isolate()), context(), callbackfn(), this_arg(),
@@ -38,7 +37,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
return a();
}
- Node* SomeResultGenerator() { return FalseConstant(); }
+ void SomeResultGenerator() { a_.Bind(FalseConstant()); }
Node* SomeProcessor(Node* k_value, Node* k) {
Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
@@ -46,12 +45,12 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Label false_continue(this), return_true(this);
BranchIfToBooleanIsTrue(value, &return_true, &false_continue);
BIND(&return_true);
- Return(TrueConstant());
+ ReturnFromBuiltin(TrueConstant());
BIND(&false_continue);
return a();
}
- Node* EveryResultGenerator() { return TrueConstant(); }
+ void EveryResultGenerator() { a_.Bind(TrueConstant()); }
Node* EveryProcessor(Node* k_value, Node* k) {
Node* value = CallJS(CodeFactory::Call(isolate()), context(), callbackfn(),
@@ -59,44 +58,12 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Label true_continue(this), return_false(this);
BranchIfToBooleanIsTrue(value, &true_continue, &return_false);
BIND(&return_false);
- Return(FalseConstant());
+ ReturnFromBuiltin(FalseConstant());
BIND(&true_continue);
return a();
}
- Node* ReduceResultGenerator() {
- VARIABLE(a, MachineRepresentation::kTagged, UndefinedConstant());
- Label no_initial_value(this), has_initial_value(this), done(this, {&a});
-
- // 8. If initialValue is present, then
- Node* parent_frame_ptr = LoadParentFramePointer();
- Node* marker_or_function = LoadBufferObject(
- parent_frame_ptr, CommonFrameConstants::kContextOrFrameTypeOffset);
- GotoIf(
- MarkerIsNotFrameType(marker_or_function, StackFrame::ARGUMENTS_ADAPTOR),
- &has_initial_value);
-
- // Has arguments adapter, check count.
- Node* adapted_parameter_count = LoadBufferObject(
- parent_frame_ptr, ArgumentsAdaptorFrameConstants::kLengthOffset);
- Branch(SmiLessThan(adapted_parameter_count,
- SmiConstant(IteratingArrayBuiltinDescriptor::kThisArg)),
- &no_initial_value, &has_initial_value);
-
- // a. Set accumulator to initialValue.
- BIND(&has_initial_value);
- a.Bind(this_arg());
- Goto(&done);
-
- // 9. Else initialValue is not present,
- BIND(&no_initial_value);
-
- // a. Let kPresent be false.
- a.Bind(TheHoleConstant());
- Goto(&done);
- BIND(&done);
- return a.value();
- }
+ void ReduceResultGenerator() { return a_.Bind(this_arg()); }
Node* ReduceProcessor(Node* k_value, Node* k) {
VARIABLE(result, MachineRepresentation::kTagged);
@@ -123,9 +90,10 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&ok);
}
- Node* FilterResultGenerator() {
+ void FilterResultGenerator() {
// 7. Let A be ArraySpeciesCreate(O, 0).
- return ArraySpeciesCreate(context(), o(), SmiConstant(0));
+ Node* len = SmiConstant(0);
+ ArraySpeciesCreate(len);
}
Node* FilterProcessor(Node* k_value, Node* k) {
@@ -137,25 +105,82 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&true_continue);
// iii. If selected is true, then...
{
- // 1. Perform ? CreateDataPropertyOrThrow(A, ToString(to), kValue).
- CallRuntime(Runtime::kCreateDataProperty, context(), a(), to_.value(),
- k_value);
+ Label after_work(this, &to_);
+ Node* kind = nullptr;
+
+ // If a() is a JSArray, we can have a fast path.
+ Label fast(this);
+ Label runtime(this);
+ Label object_push_pre(this), object_push(this), double_push(this);
+ BranchIfFastJSArray(a(), context(), FastJSArrayAccessMode::ANY_ACCESS,
+ &fast, &runtime);
+
+ BIND(&fast);
+ {
+ kind = EnsureArrayPushable(a(), &runtime);
+ GotoIf(IsElementsKindGreaterThan(kind, FAST_HOLEY_SMI_ELEMENTS),
+ &object_push_pre);
+
+ BuildAppendJSArray(FAST_SMI_ELEMENTS, a(), k_value, &runtime);
+ Goto(&after_work);
+ }
+
+ BIND(&object_push_pre);
+ {
+ Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_ELEMENTS),
+ &double_push, &object_push);
+ }
+
+ BIND(&object_push);
+ {
+ BuildAppendJSArray(FAST_ELEMENTS, a(), k_value, &runtime);
+ Goto(&after_work);
+ }
+
+ BIND(&double_push);
+ {
+ BuildAppendJSArray(FAST_DOUBLE_ELEMENTS, a(), k_value, &runtime);
+ Goto(&after_work);
+ }
+
+ BIND(&runtime);
+ {
+ // 1. Perform ? CreateDataPropertyOrThrow(A, ToString(to), kValue).
+ CallRuntime(Runtime::kCreateDataProperty, context(), a(), to_.value(),
+ k_value);
+ Goto(&after_work);
+ }
- // 2. Increase to by 1.
- to_.Bind(NumberInc(to_.value()));
- Goto(&false_continue);
+ BIND(&after_work);
+ {
+ // 2. Increase to by 1.
+ to_.Bind(NumberInc(to_.value()));
+ Goto(&false_continue);
+ }
}
BIND(&false_continue);
return a();
}
- Node* MapResultGenerator() {
- // 5. Let A be ? ArraySpeciesCreate(O, len).
- return ArraySpeciesCreate(context(), o(), len_);
+ void MapResultGenerator() { ArraySpeciesCreate(len_); }
+
+ void TypedArrayMapResultGenerator() {
+ // 6. Let A be ? TypedArraySpeciesCreate(O, len).
+ Node* a = TypedArraySpeciesCreateByLength(context(), o(), len_);
+ // In the Spec and our current implementation, the length check is already
+ // performed in TypedArraySpeciesCreate. Repeating the check here to
+ // keep this invariant local.
+ // TODO(tebbi): Change this to a release mode check.
+ CSA_ASSERT(
+ this, WordEqual(len_, LoadObjectField(a, JSTypedArray::kLengthOffset)));
+ fast_typed_array_target_ = Word32Equal(LoadInstanceType(LoadElements(o_)),
+ LoadInstanceType(LoadElements(a)));
+ a_.Bind(a);
}
- Node* MapProcessor(Node* k_value, Node* k) {
- // i. Let kValue be ? Get(O, Pk). Performed by the caller of MapProcessor.
+ Node* SpecCompliantMapProcessor(Node* k_value, Node* k) {
+ // i. Let kValue be ? Get(O, Pk). Performed by the caller of
+ // SpecCompliantMapProcessor.
// ii. Let mappedValue be ? Call(callbackfn, T, kValue, k, O).
Node* mappedValue = CallJS(CodeFactory::Call(isolate()), context(),
callbackfn(), this_arg(), k_value, k, o());
@@ -165,12 +190,117 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
return a();
}
+ Node* FastMapProcessor(Node* k_value, Node* k) {
+ // i. Let kValue be ? Get(O, Pk). Performed by the caller of
+ // FastMapProcessor.
+ // ii. Let mappedValue be ? Call(callbackfn, T, kValue, k, O).
+ Node* mappedValue = CallJS(CodeFactory::Call(isolate()), context(),
+ callbackfn(), this_arg(), k_value, k, o());
+
+ Label finished(this);
+ Node* kind = nullptr;
+ Node* elements = nullptr;
+
+ // If a() is a JSArray, we can have a fast path.
+ // mode is SMI_PARAMETERS because k has tagged representation.
+ ParameterMode mode = SMI_PARAMETERS;
+ Label fast(this);
+ Label runtime(this);
+ Label object_push_pre(this), object_push(this), double_push(this);
+ BranchIfFastJSArray(a(), context(), FastJSArrayAccessMode::ANY_ACCESS,
+ &fast, &runtime);
+
+ BIND(&fast);
+ {
+ kind = EnsureArrayPushable(a(), &runtime);
+ elements = LoadElements(a());
+ GotoIf(IsElementsKindGreaterThan(kind, FAST_HOLEY_SMI_ELEMENTS),
+ &object_push_pre);
+ TryStoreArrayElement(FAST_SMI_ELEMENTS, mode, &runtime, elements, k,
+ mappedValue);
+ Goto(&finished);
+ }
+
+ BIND(&object_push_pre);
+ {
+ Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_ELEMENTS), &double_push,
+ &object_push);
+ }
+
+ BIND(&object_push);
+ {
+ TryStoreArrayElement(FAST_ELEMENTS, mode, &runtime, elements, k,
+ mappedValue);
+ Goto(&finished);
+ }
+
+ BIND(&double_push);
+ {
+ TryStoreArrayElement(FAST_DOUBLE_ELEMENTS, mode, &runtime, elements, k,
+ mappedValue);
+ Goto(&finished);
+ }
+
+ BIND(&runtime);
+ {
+ // iii. Perform ? CreateDataPropertyOrThrow(A, Pk, mappedValue).
+ CallRuntime(Runtime::kCreateDataProperty, context(), a(), k, mappedValue);
+ Goto(&finished);
+ }
+
+ BIND(&finished);
+ return a();
+ }
+
+ // See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map.
+ Node* TypedArrayMapProcessor(Node* k_value, Node* k) {
+ // 8. c. Let mappedValue be ? Call(callbackfn, T, « kValue, k, O »).
+ Node* mappedValue = CallJS(CodeFactory::Call(isolate()), context(),
+ callbackfn(), this_arg(), k_value, k, o());
+ Label fast(this), slow(this), done(this), detached(this, Label::kDeferred);
+
+ // 8. d. Perform ? Set(A, Pk, mappedValue, true).
+ // Since we know that A is a TypedArray, this always ends up in
+ // #sec-integer-indexed-exotic-objects-set-p-v-receiver and then
+ // tc39.github.io/ecma262/#sec-integerindexedelementset .
+ Branch(fast_typed_array_target_, &fast, &slow);
+
+ BIND(&fast);
+ // #sec-integerindexedelementset 3. Let numValue be ? ToNumber(value).
+ Node* num_value = ToNumber(context(), mappedValue);
+ // The only way how this can bailout is because of a detached buffer.
+ EmitElementStore(
+ a(), k, num_value, false, source_elements_kind_,
+ KeyedAccessStoreMode::STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS,
+ &detached);
+ Goto(&done);
+
+ BIND(&slow);
+ CallRuntime(Runtime::kSetProperty, context(), a(), k, mappedValue,
+ SmiConstant(STRICT));
+ Goto(&done);
+
+ BIND(&detached);
+ {
+ // tc39.github.io/ecma262/#sec-integerindexedelementset
+ // 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
+ CallRuntime(Runtime::kThrowTypeError, context_,
+ SmiConstant(MessageTemplate::kDetachedOperation),
+ name_string_);
+ Unreachable();
+ }
+
+ BIND(&done);
+ return a();
+ }
+
void NullPostLoopAction() {}
protected:
Node* context() { return context_; }
Node* receiver() { return receiver_; }
Node* new_target() { return new_target_; }
+ Node* argc() { return argc_; }
Node* o() { return o_; }
Node* len() { return len_; }
Node* callbackfn() { return callbackfn_; }
@@ -178,14 +308,25 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Node* k() { return k_.value(); }
Node* a() { return a_.value(); }
+ void ReturnFromBuiltin(Node* value) {
+ if (argc_ == nullptr) {
+ Return(value);
+ } else {
+ // argc_ doesn't include the receiver, so it has to be added back in
+ // manually.
+ PopAndReturn(IntPtrAdd(argc_, IntPtrConstant(1)), value);
+ }
+ }
+
void InitIteratingArrayBuiltinBody(Node* context, Node* receiver,
Node* callbackfn, Node* this_arg,
- Node* new_target) {
+ Node* new_target, Node* argc) {
context_ = context;
receiver_ = receiver;
new_target_ = new_target;
callbackfn_ = callbackfn;
this_arg_ = this_arg;
+ argc_ = argc;
}
void GenerateIteratingArrayBuiltinBody(
@@ -193,8 +334,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
const CallResultProcessor& processor, const PostLoopAction& action,
const Callable& slow_case_continuation,
ForEachDirection direction = ForEachDirection::kForward) {
- Label non_array(this), slow(this, {&k_, &a_, &to_}),
- array_changes(this, {&k_, &a_, &to_});
+ Label non_array(this), array_changes(this, {&k_, &a_, &to_});
// TODO(danno): Seriously? Do we really need to throw the exact error
// message on null and undefined so that the webkit tests pass?
@@ -220,8 +360,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
BIND(&not_js_array);
Node* len_property =
GetProperty(context(), o(), isolate()->factory()->length_string());
- merged_length.Bind(
- CallStub(CodeFactory::ToLength(isolate()), context(), len_property));
+ merged_length.Bind(ToLength_Inline(context(), len_property));
Goto(&has_length);
BIND(&has_length);
len_ = merged_length.value();
@@ -261,19 +400,16 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
k_.Bind(NumberDec(len()));
}
- a_.Bind(generator(this));
+ generator(this);
- HandleFastElements(processor, action, &slow, direction);
+ HandleFastElements(processor, action, &fully_spec_compliant_, direction);
- BIND(&slow);
+ BIND(&fully_spec_compliant_);
- Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
- MachineType::TaggedPointer());
- TailCallStub(
- slow_case_continuation, context(), target, new_target(),
- Int32Constant(IteratingArrayBuiltinLoopContinuationDescriptor::kArity),
- receiver(), callbackfn(), this_arg(), a_.value(), o(), k_.value(),
- len(), to_.value());
+ Node* result =
+ CallStub(slow_case_continuation, context(), receiver(), callbackfn(),
+ this_arg(), a_.value(), o(), k_.value(), len(), to_.value());
+ ReturnFromBuiltin(result);
}
void InitIteratingArrayBuiltinLoopContinuation(Node* context, Node* receiver,
@@ -284,6 +420,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
context_ = context;
this_arg_ = this_arg;
callbackfn_ = callbackfn;
+ argc_ = nullptr;
a_.Bind(a);
k_.Bind(initial_k);
o_ = o;
@@ -295,7 +432,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
const char* name, const BuiltinResultGenerator& generator,
const CallResultProcessor& processor, const PostLoopAction& action,
ForEachDirection direction = ForEachDirection::kForward) {
- Node* name_string =
+ name_string_ =
HeapConstant(isolate()->factory()->NewStringFromAsciiChecked(name));
// ValidateTypedArray: tc39.github.io/ecma262/#sec-validatetypedarray
@@ -330,7 +467,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
{
CallRuntime(Runtime::kThrowTypeError, context_,
SmiConstant(MessageTemplate::kDetachedOperation),
- name_string);
+ name_string_);
Unreachable();
}
@@ -367,25 +504,25 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
} else {
k_.Bind(NumberDec(len()));
}
- a_.Bind(generator(this));
- Node* elements_type = LoadInstanceType(LoadElements(o_));
- Switch(elements_type, &unexpected_instance_type, instance_types.data(),
+ Node* instance_type = LoadInstanceType(LoadElements(o_));
+ Switch(instance_type, &unexpected_instance_type, instance_types.data(),
label_ptrs.data(), labels.size());
for (size_t i = 0; i < labels.size(); ++i) {
BIND(&labels[i]);
Label done(this);
+ source_elements_kind_ = ElementsKindForInstanceType(
+ static_cast<InstanceType>(instance_types[i]));
+ generator(this);
// TODO(tebbi): Silently cancelling the loop on buffer detachment is a
- // spec violation. Should go to &detached and throw a TypeError instead.
- VisitAllTypedArrayElements(
- ElementsKindForInstanceType(
- static_cast<InstanceType>(instance_types[i])),
- array_buffer, processor, &done, direction);
+ // spec violation. Should go to &throw_detached and throw a TypeError
+ // instead.
+ VisitAllTypedArrayElements(array_buffer, processor, &done, direction);
Goto(&done);
// No exception, return success
BIND(&done);
action(this);
- Return(a_.value());
+ ReturnFromBuiltin(a_.value());
}
}
@@ -459,7 +596,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
}
}
- void VisitAllTypedArrayElements(ElementsKind kind, Node* array_buffer,
+ void VisitAllTypedArrayElements(Node* array_buffer,
const CallResultProcessor& processor,
Label* detached, ForEachDirection direction) {
VariableList list({&a_, &k_, &to_}, zone());
@@ -473,8 +610,8 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
MachineType::Pointer());
Node* data_ptr = IntPtrAdd(BitcastTaggedToWord(base_ptr), external_ptr);
- Node* value = LoadFixedTypedArrayElementAsTagged(data_ptr, index, kind,
- SMI_PARAMETERS);
+ Node* value = LoadFixedTypedArrayElementAsTagged(
+ data_ptr, index, source_elements_kind_, SMI_PARAMETERS);
k_.Bind(index);
a_.Bind(processor(this, value, index));
};
@@ -575,7 +712,7 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Node* o_map = LoadMap(o());
Node* bit_field2 = LoadMapBitField2(o_map);
Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
- Branch(Int32GreaterThan(kind, Int32Constant(FAST_HOLEY_ELEMENTS)),
+ Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_ELEMENTS),
&maybe_double_elements, &fast_elements);
ParameterMode mode = OptimalParameterMode();
@@ -587,12 +724,12 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
action(this);
// No exception, return success
- Return(a_.value());
+ ReturnFromBuiltin(a_.value());
}
BIND(&maybe_double_elements);
- Branch(Int32GreaterThan(kind, Int32Constant(FAST_HOLEY_DOUBLE_ELEMENTS)),
- slow, &fast_double_elements);
+ Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_DOUBLE_ELEMENTS), slow,
+ &fast_double_elements);
BIND(&fast_double_elements);
{
@@ -602,10 +739,55 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
action(this);
// No exception, return success
- Return(a_.value());
+ ReturnFromBuiltin(a_.value());
}
}
+ // Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
+ void ArraySpeciesCreate(Node* len) {
+ Label runtime(this, Label::kDeferred), done(this);
+
+ Node* const original_map = LoadMap(o());
+ GotoIf(Word32NotEqual(LoadMapInstanceType(original_map),
+ Int32Constant(JS_ARRAY_TYPE)),
+ &runtime);
+
+ Node* const native_context = LoadNativeContext(context());
+ Node* const initial_array_prototype = LoadContextElement(
+ native_context, Context::INITIAL_ARRAY_PROTOTYPE_INDEX);
+ Node* proto = LoadMapPrototype(original_map);
+ GotoIf(WordNotEqual(proto, initial_array_prototype), &runtime);
+
+ Node* species_protector = SpeciesProtectorConstant();
+ Node* value = LoadObjectField(species_protector, Cell::kValueOffset);
+ Node* const protector_invalid = SmiConstant(Isolate::kProtectorInvalid);
+ GotoIf(WordEqual(value, protector_invalid), &runtime);
+
+ GotoIfNot(TaggedIsPositiveSmi(len), &runtime);
+ GotoIf(SmiAbove(len, SmiConstant(JSArray::kInitialMaxFastElementArray)),
+ &runtime);
+
+ const ElementsKind elements_kind =
+ GetHoleyElementsKind(GetInitialFastElementsKind());
+ Node* array_map = LoadJSArrayElementsMap(elements_kind, native_context);
+ a_.Bind(AllocateJSArray(FAST_SMI_ELEMENTS, array_map, len, len, nullptr,
+ CodeStubAssembler::SMI_PARAMETERS));
+
+ Goto(&done);
+
+ BIND(&runtime);
+ {
+ // 5. Let A be ? ArraySpeciesCreate(O, len).
+ Node* constructor =
+ CallRuntime(Runtime::kArraySpeciesConstructor, context(), o());
+ a_.Bind(ConstructJS(CodeFactory::Construct(isolate()), context(),
+ constructor, len));
+ Goto(&fully_spec_compliant_);
+ }
+
+ BIND(&done);
+ }
+
Node* callbackfn_ = nullptr;
Node* o_ = nullptr;
Node* this_arg_ = nullptr;
@@ -613,11 +795,117 @@ class ArrayBuiltinCodeStubAssembler : public CodeStubAssembler {
Node* context_ = nullptr;
Node* receiver_ = nullptr;
Node* new_target_ = nullptr;
+ Node* argc_ = nullptr;
+ Node* fast_typed_array_target_ = nullptr;
+ Node* name_string_ = nullptr;
Variable k_;
Variable a_;
Variable to_;
+ Label fully_spec_compliant_;
+ ElementsKind source_elements_kind_ = ElementsKind::NO_ELEMENTS;
};
+TF_BUILTIN(FastArrayPop, CodeStubAssembler) {
+ Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ CSA_ASSERT(this, WordEqual(Parameter(BuiltinDescriptor::kNewTarget),
+ UndefinedConstant()));
+
+ CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+ Node* receiver = args.GetReceiver();
+
+ Label runtime(this, Label::kDeferred);
+ Label fast(this);
+
+ // Only pop in this stub if
+ // 1) the array has fast elements
+ // 2) the length is writable,
+ // 3) the elements backing store isn't copy-on-write,
+ // 4) we aren't supposed to shrink the backing store.
+
+ // 1) Check that the array has fast elements.
+ BranchIfFastJSArray(receiver, context, FastJSArrayAccessMode::INBOUNDS_READ,
+ &fast, &runtime);
+
+ BIND(&fast);
+ {
+ CSA_ASSERT(this, TaggedIsPositiveSmi(
+ LoadObjectField(receiver, JSArray::kLengthOffset)));
+ Node* length = LoadAndUntagObjectField(receiver, JSArray::kLengthOffset);
+ Label return_undefined(this), fast_elements(this);
+ GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
+
+ // 2) Ensure that the length is writable.
+ EnsureArrayLengthWritable(LoadMap(receiver), &runtime);
+
+ // 3) Check that the elements backing store isn't copy-on-write.
+ Node* elements = LoadElements(receiver);
+ GotoIf(WordEqual(LoadMap(elements),
+ LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
+ &runtime);
+
+ Node* new_length = IntPtrSub(length, IntPtrConstant(1));
+
+ // 4) Check that we're not supposed to shrink the backing store, as
+ // implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
+ Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
+ GotoIf(IntPtrLessThan(
+ IntPtrAdd(IntPtrAdd(new_length, new_length),
+ IntPtrConstant(JSObject::kMinAddedElementsCapacity)),
+ capacity),
+ &runtime);
+
+ StoreObjectFieldNoWriteBarrier(receiver, JSArray::kLengthOffset,
+ SmiTag(new_length));
+
+ Node* elements_kind = LoadMapElementsKind(LoadMap(receiver));
+ GotoIf(Int32LessThanOrEqual(elements_kind,
+ Int32Constant(TERMINAL_FAST_ELEMENTS_KIND)),
+ &fast_elements);
+
+ Node* value = LoadFixedDoubleArrayElement(
+ elements, new_length, MachineType::Float64(), 0, INTPTR_PARAMETERS,
+ &return_undefined);
+
+ int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag;
+ Node* offset = ElementOffsetFromIndex(
+ new_length, FAST_HOLEY_DOUBLE_ELEMENTS, INTPTR_PARAMETERS, header_size);
+ if (Is64()) {
+ Node* double_hole = Int64Constant(kHoleNanInt64);
+ StoreNoWriteBarrier(MachineRepresentation::kWord64, elements, offset,
+ double_hole);
+ } else {
+ STATIC_ASSERT(kHoleNanLower32 == kHoleNanUpper32);
+ Node* double_hole = Int32Constant(kHoleNanLower32);
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, elements, offset,
+ double_hole);
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, elements,
+ IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
+ double_hole);
+ }
+ args.PopAndReturn(AllocateHeapNumberWithValue(value));
+
+ BIND(&fast_elements);
+ {
+ Node* value = LoadFixedArrayElement(elements, new_length);
+ StoreFixedArrayElement(elements, new_length, TheHoleConstant());
+ GotoIf(WordEqual(value, TheHoleConstant()), &return_undefined);
+ args.PopAndReturn(value);
+ }
+
+ BIND(&return_undefined);
+ { args.PopAndReturn(UndefinedConstant()); }
+ }
+
+ BIND(&runtime);
+ {
+ Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
+ MachineType::TaggedPointer());
+ TailCallStub(CodeFactory::ArrayPop(isolate()), context, target,
+ UndefinedConstant(), argc);
+ }
+}
+
TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
VARIABLE(arg_index, MachineType::PointerRepresentation());
Label default_label(this, &arg_index);
@@ -632,7 +920,8 @@ TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
// arguments are reordered.
Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
Node* context = Parameter(BuiltinDescriptor::kContext);
- Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ CSA_ASSERT(this, WordEqual(Parameter(BuiltinDescriptor::kNewTarget),
+ UndefinedConstant()));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
Node* receiver = args.GetReceiver();
@@ -644,39 +933,13 @@ TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
BIND(&fast);
{
- // Disallow pushing onto prototypes. It might be the JSArray prototype.
- // Disallow pushing onto non-extensible objects.
- Comment("Disallow pushing onto prototypes");
- Node* map = LoadMap(receiver);
- Node* bit_field2 = LoadMapBitField2(map);
- int mask = static_cast<int>(Map::IsPrototypeMapBits::kMask) |
- (1 << Map::kIsExtensible);
- Node* test = Word32And(bit_field2, Int32Constant(mask));
- GotoIf(Word32NotEqual(test, Int32Constant(1 << Map::kIsExtensible)),
- &runtime);
-
- // Disallow pushing onto arrays in dictionary named property mode. We need
- // to figure out whether the length property is still writable.
- Comment("Disallow pushing onto arrays in dictionary named property mode");
- GotoIf(IsDictionaryMap(map), &runtime);
-
- // Check whether the length property is writable. The length property is the
- // only default named property on arrays. It's nonconfigurable, hence is
- // guaranteed to stay the first property.
- Node* descriptors = LoadMapDescriptors(map);
- Node* details =
- LoadFixedArrayElement(descriptors, DescriptorArray::ToDetailsIndex(0));
- GotoIf(IsSetSmi(details, PropertyDetails::kAttributesReadOnlyMask),
- &runtime);
-
arg_index.Bind(IntPtrConstant(0));
- kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
-
- GotoIf(Int32GreaterThan(kind, Int32Constant(FAST_HOLEY_SMI_ELEMENTS)),
+ kind = EnsureArrayPushable(receiver, &runtime);
+ GotoIf(IsElementsKindGreaterThan(kind, FAST_HOLEY_SMI_ELEMENTS),
&object_push_pre);
- Node* new_length = BuildAppendJSArray(FAST_SMI_ELEMENTS, context, receiver,
- args, arg_index, &smi_transition);
+ Node* new_length = BuildAppendJSArray(FAST_SMI_ELEMENTS, receiver, args,
+ arg_index, &smi_transition);
args.PopAndReturn(new_length);
}
@@ -708,22 +971,21 @@ TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
BIND(&object_push_pre);
{
- Branch(Int32GreaterThan(kind, Int32Constant(FAST_HOLEY_ELEMENTS)),
- &double_push, &object_push);
+ Branch(IsElementsKindGreaterThan(kind, FAST_HOLEY_ELEMENTS), &double_push,
+ &object_push);
}
BIND(&object_push);
{
- Node* new_length = BuildAppendJSArray(FAST_ELEMENTS, context, receiver,
- args, arg_index, &default_label);
+ Node* new_length = BuildAppendJSArray(FAST_ELEMENTS, receiver, args,
+ arg_index, &default_label);
args.PopAndReturn(new_length);
}
BIND(&double_push);
{
- Node* new_length =
- BuildAppendJSArray(FAST_DOUBLE_ELEMENTS, context, receiver, args,
- arg_index, &double_transition);
+ Node* new_length = BuildAppendJSArray(FAST_DOUBLE_ELEMENTS, receiver, args,
+ arg_index, &double_transition);
args.PopAndReturn(new_length);
}
@@ -769,8 +1031,156 @@ TF_BUILTIN(FastArrayPush, CodeStubAssembler) {
{
Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
MachineType::TaggedPointer());
- TailCallStub(CodeFactory::ArrayPush(isolate()), context, target, new_target,
- argc);
+ TailCallStub(CodeFactory::ArrayPush(isolate()), context, target,
+ UndefinedConstant(), argc);
+ }
+}
+
+TF_BUILTIN(FastArrayShift, CodeStubAssembler) {
+ Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ CSA_ASSERT(this, WordEqual(Parameter(BuiltinDescriptor::kNewTarget),
+ UndefinedConstant()));
+
+ CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+ Node* receiver = args.GetReceiver();
+
+ Label runtime(this, Label::kDeferred);
+ Label fast(this);
+
+ // Only shift in this stub if
+ // 1) the array has fast elements
+ // 2) the length is writable,
+ // 3) the elements backing store isn't copy-on-write,
+ // 4) we aren't supposed to shrink the backing store,
+ // 5) we aren't supposed to left-trim the backing store.
+
+ // 1) Check that the array has fast elements.
+ BranchIfFastJSArray(receiver, context, FastJSArrayAccessMode::INBOUNDS_READ,
+ &fast, &runtime);
+
+ BIND(&fast);
+ {
+ CSA_ASSERT(this, TaggedIsPositiveSmi(
+ LoadObjectField(receiver, JSArray::kLengthOffset)));
+ Node* length = LoadAndUntagObjectField(receiver, JSArray::kLengthOffset);
+ Label return_undefined(this), fast_elements_tagged(this),
+ fast_elements_untagged(this);
+ GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_undefined);
+
+ // 2) Ensure that the length is writable.
+ EnsureArrayLengthWritable(LoadMap(receiver), &runtime);
+
+ // 3) Check that the elements backing store isn't copy-on-write.
+ Node* elements = LoadElements(receiver);
+ GotoIf(WordEqual(LoadMap(elements),
+ LoadRoot(Heap::kFixedCOWArrayMapRootIndex)),
+ &runtime);
+
+ Node* new_length = IntPtrSub(length, IntPtrConstant(1));
+
+ // 4) Check that we're not supposed to right-trim the backing store, as
+ // implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
+ Node* capacity = SmiUntag(LoadFixedArrayBaseLength(elements));
+ GotoIf(IntPtrLessThan(
+ IntPtrAdd(IntPtrAdd(new_length, new_length),
+ IntPtrConstant(JSObject::kMinAddedElementsCapacity)),
+ capacity),
+ &runtime);
+
+ // 5) Check that we're not supposed to left-trim the backing store, as
+ // implemented in elements.cc:FastElementsAccessor::MoveElements.
+ GotoIf(IntPtrGreaterThan(new_length,
+ IntPtrConstant(JSArray::kMaxCopyElements)),
+ &runtime);
+
+ StoreObjectFieldNoWriteBarrier(receiver, JSArray::kLengthOffset,
+ SmiTag(new_length));
+
+ Node* elements_kind = LoadMapElementsKind(LoadMap(receiver));
+ GotoIf(Int32LessThanOrEqual(elements_kind,
+ Int32Constant(FAST_HOLEY_SMI_ELEMENTS)),
+ &fast_elements_untagged);
+ GotoIf(Int32LessThanOrEqual(elements_kind,
+ Int32Constant(TERMINAL_FAST_ELEMENTS_KIND)),
+ &fast_elements_tagged);
+ Node* value = LoadFixedDoubleArrayElement(
+ elements, IntPtrConstant(0), MachineType::Float64(), 0,
+ INTPTR_PARAMETERS, &return_undefined);
+
+ int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag;
+ Node* memmove =
+ ExternalConstant(ExternalReference::libc_memmove_function(isolate()));
+ Node* start = IntPtrAdd(
+ BitcastTaggedToWord(elements),
+ ElementOffsetFromIndex(IntPtrConstant(0), FAST_HOLEY_DOUBLE_ELEMENTS,
+ INTPTR_PARAMETERS, header_size));
+ CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
+ MachineType::Pointer(), MachineType::UintPtr(), memmove,
+ start, IntPtrAdd(start, IntPtrConstant(kDoubleSize)),
+ IntPtrMul(new_length, IntPtrConstant(kDoubleSize)));
+ Node* offset = ElementOffsetFromIndex(
+ new_length, FAST_HOLEY_DOUBLE_ELEMENTS, INTPTR_PARAMETERS, header_size);
+ if (Is64()) {
+ Node* double_hole = Int64Constant(kHoleNanInt64);
+ StoreNoWriteBarrier(MachineRepresentation::kWord64, elements, offset,
+ double_hole);
+ } else {
+ STATIC_ASSERT(kHoleNanLower32 == kHoleNanUpper32);
+ Node* double_hole = Int32Constant(kHoleNanLower32);
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, elements, offset,
+ double_hole);
+ StoreNoWriteBarrier(MachineRepresentation::kWord32, elements,
+ IntPtrAdd(offset, IntPtrConstant(kPointerSize)),
+ double_hole);
+ }
+ args.PopAndReturn(AllocateHeapNumberWithValue(value));
+
+ BIND(&fast_elements_tagged);
+ {
+ Node* value = LoadFixedArrayElement(elements, 0);
+ BuildFastLoop(IntPtrConstant(0), new_length,
+ [&](Node* index) {
+ StoreFixedArrayElement(
+ elements, index,
+ LoadFixedArrayElement(
+ elements, IntPtrAdd(index, IntPtrConstant(1))));
+ },
+ 1, ParameterMode::INTPTR_PARAMETERS,
+ IndexAdvanceMode::kPost);
+ StoreFixedArrayElement(elements, new_length, TheHoleConstant());
+ GotoIf(WordEqual(value, TheHoleConstant()), &return_undefined);
+ args.PopAndReturn(value);
+ }
+
+ BIND(&fast_elements_untagged);
+ {
+ Node* value = LoadFixedArrayElement(elements, 0);
+ Node* memmove =
+ ExternalConstant(ExternalReference::libc_memmove_function(isolate()));
+ Node* start = IntPtrAdd(
+ BitcastTaggedToWord(elements),
+ ElementOffsetFromIndex(IntPtrConstant(0), FAST_HOLEY_SMI_ELEMENTS,
+ INTPTR_PARAMETERS, header_size));
+ CallCFunction3(MachineType::AnyTagged(), MachineType::Pointer(),
+ MachineType::Pointer(), MachineType::UintPtr(), memmove,
+ start, IntPtrAdd(start, IntPtrConstant(kPointerSize)),
+ IntPtrMul(new_length, IntPtrConstant(kPointerSize)));
+ StoreFixedArrayElement(elements, new_length, TheHoleConstant());
+ GotoIf(WordEqual(value, TheHoleConstant()), &return_undefined);
+ args.PopAndReturn(value);
+ }
+
+ BIND(&return_undefined);
+ { args.PopAndReturn(UndefinedConstant()); }
+ }
+
+ BIND(&runtime);
+ {
+ Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
+ MachineType::TaggedPointer());
+ TailCallStub(CodeFactory::ArrayShift(isolate()), context, target,
+ UndefinedConstant(), argc);
}
}
@@ -795,21 +1205,25 @@ TF_BUILTIN(ArrayForEachLoopContinuation, ArrayBuiltinCodeStubAssembler) {
}
TF_BUILTIN(ArrayForEach, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* new_target = Parameter(Descriptor::kNewTarget);
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* receiver = args.GetReceiver();
+ Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
+ Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
- new_target);
+ new_target, argc);
GenerateIteratingArrayBuiltinBody(
"Array.prototype.forEach",
&ArrayBuiltinCodeStubAssembler::ForEachResultGenerator,
&ArrayBuiltinCodeStubAssembler::ForEachProcessor,
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- CodeFactory::ArrayForEachLoopContinuation(isolate()));
+ Builtins::CallableFor(isolate(),
+ Builtins::kArrayForEachLoopContinuation));
}
TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinCodeStubAssembler) {
@@ -833,32 +1247,38 @@ TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinCodeStubAssembler) {
}
TF_BUILTIN(ArraySome, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* new_target = Parameter(Descriptor::kNewTarget);
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* receiver = args.GetReceiver();
+ Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
+ Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
- new_target);
+ new_target, argc);
GenerateIteratingArrayBuiltinBody(
"Array.prototype.some",
&ArrayBuiltinCodeStubAssembler::SomeResultGenerator,
&ArrayBuiltinCodeStubAssembler::SomeProcessor,
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- CodeFactory::ArraySomeLoopContinuation(isolate()));
+ Builtins::CallableFor(isolate(), Builtins::kArraySomeLoopContinuation));
}
TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* new_target = Parameter(Descriptor::kNewTarget);
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* receiver = args.GetReceiver();
+ Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
+ Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
- new_target);
+ new_target, argc);
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.some",
@@ -888,32 +1308,38 @@ TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinCodeStubAssembler) {
}
TF_BUILTIN(ArrayEvery, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* new_target = Parameter(Descriptor::kNewTarget);
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* receiver = args.GetReceiver();
+ Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
+ Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
- new_target);
+ new_target, argc);
GenerateIteratingArrayBuiltinBody(
"Array.prototype.every",
&ArrayBuiltinCodeStubAssembler::EveryResultGenerator,
&ArrayBuiltinCodeStubAssembler::EveryProcessor,
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- CodeFactory::ArrayEveryLoopContinuation(isolate()));
+ Builtins::CallableFor(isolate(), Builtins::kArrayEveryLoopContinuation));
}
TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* new_target = Parameter(Descriptor::kNewTarget);
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* receiver = args.GetReceiver();
+ Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
+ Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
- new_target);
+ new_target, argc);
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.every",
@@ -943,32 +1369,38 @@ TF_BUILTIN(ArrayReduceLoopContinuation, ArrayBuiltinCodeStubAssembler) {
}
TF_BUILTIN(ArrayReduce, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* initial_value = Parameter(Descriptor::kInitialValue);
- Node* new_target = Parameter(Descriptor::kNewTarget);
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* receiver = args.GetReceiver();
+ Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
+ Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
- new_target);
+ new_target, argc);
GenerateIteratingArrayBuiltinBody(
"Array.prototype.reduce",
&ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
&ArrayBuiltinCodeStubAssembler::ReduceProcessor,
&ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
- CodeFactory::ArrayReduceLoopContinuation(isolate()));
+ Builtins::CallableFor(isolate(), Builtins::kArrayReduceLoopContinuation));
}
TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* initial_value = Parameter(Descriptor::kInitialValue);
- Node* new_target = Parameter(Descriptor::kNewTarget);
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* receiver = args.GetReceiver();
+ Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
+ Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
- new_target);
+ new_target, argc);
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.reduce",
@@ -999,33 +1431,40 @@ TF_BUILTIN(ArrayReduceRightLoopContinuation, ArrayBuiltinCodeStubAssembler) {
}
TF_BUILTIN(ArrayReduceRight, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* initial_value = Parameter(Descriptor::kInitialValue);
- Node* new_target = Parameter(Descriptor::kNewTarget);
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* receiver = args.GetReceiver();
+ Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
+ Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
- new_target);
+ new_target, argc);
GenerateIteratingArrayBuiltinBody(
"Array.prototype.reduceRight",
&ArrayBuiltinCodeStubAssembler::ReduceResultGenerator,
&ArrayBuiltinCodeStubAssembler::ReduceProcessor,
&ArrayBuiltinCodeStubAssembler::ReducePostLoopAction,
- CodeFactory::ArrayReduceRightLoopContinuation(isolate()),
+ Builtins::CallableFor(isolate(),
+ Builtins::kArrayReduceRightLoopContinuation),
ForEachDirection::kReverse);
}
TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* initial_value = Parameter(Descriptor::kInitialValue);
- Node* new_target = Parameter(Descriptor::kNewTarget);
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* receiver = args.GetReceiver();
+ Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
+ Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, initial_value,
- new_target);
+ new_target, argc);
GenerateIteratingTypedArrayBuiltinBody(
"%TypedArray%.prototype.reduceRight",
@@ -1056,21 +1495,24 @@ TF_BUILTIN(ArrayFilterLoopContinuation, ArrayBuiltinCodeStubAssembler) {
}
TF_BUILTIN(ArrayFilter, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* new_target = Parameter(Descriptor::kNewTarget);
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* receiver = args.GetReceiver();
+ Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
+ Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
- new_target);
+ new_target, argc);
GenerateIteratingArrayBuiltinBody(
"Array.prototype.filter",
&ArrayBuiltinCodeStubAssembler::FilterResultGenerator,
&ArrayBuiltinCodeStubAssembler::FilterProcessor,
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- CodeFactory::ArrayFilterLoopContinuation(isolate()));
+ Builtins::CallableFor(isolate(), Builtins::kArrayFilterLoopContinuation));
}
TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinCodeStubAssembler) {
@@ -1089,25 +1531,48 @@ TF_BUILTIN(ArrayMapLoopContinuation, ArrayBuiltinCodeStubAssembler) {
len, to);
GenerateIteratingArrayBuiltinLoopContinuation(
- &ArrayBuiltinCodeStubAssembler::MapProcessor,
+ &ArrayBuiltinCodeStubAssembler::SpecCompliantMapProcessor,
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
}
TF_BUILTIN(ArrayMap, ArrayBuiltinCodeStubAssembler) {
- Node* context = Parameter(Descriptor::kContext);
- Node* receiver = Parameter(Descriptor::kReceiver);
- Node* callbackfn = Parameter(Descriptor::kCallbackFn);
- Node* this_arg = Parameter(Descriptor::kThisArg);
- Node* new_target = Parameter(Descriptor::kNewTarget);
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* receiver = args.GetReceiver();
+ Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
+ Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
- new_target);
+ new_target, argc);
GenerateIteratingArrayBuiltinBody(
"Array.prototype.map", &ArrayBuiltinCodeStubAssembler::MapResultGenerator,
- &ArrayBuiltinCodeStubAssembler::MapProcessor,
+ &ArrayBuiltinCodeStubAssembler::FastMapProcessor,
&ArrayBuiltinCodeStubAssembler::NullPostLoopAction,
- CodeFactory::ArrayMapLoopContinuation(isolate()));
+ Builtins::CallableFor(isolate(), Builtins::kArrayMapLoopContinuation));
+}
+
+TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinCodeStubAssembler) {
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ Node* receiver = args.GetReceiver();
+ Node* callbackfn = args.GetOptionalArgumentValue(0, UndefinedConstant());
+ Node* this_arg = args.GetOptionalArgumentValue(1, UndefinedConstant());
+
+ InitIteratingArrayBuiltinBody(context, receiver, callbackfn, this_arg,
+ new_target, argc);
+
+ GenerateIteratingTypedArrayBuiltinBody(
+ "%TypedArray%.prototype.map",
+ &ArrayBuiltinCodeStubAssembler::TypedArrayMapResultGenerator,
+ &ArrayBuiltinCodeStubAssembler::TypedArrayMapProcessor,
+ &ArrayBuiltinCodeStubAssembler::NullPostLoopAction);
}
TF_BUILTIN(ArrayIsArray, CodeStubAssembler) {
@@ -1136,79 +1601,117 @@ TF_BUILTIN(ArrayIsArray, CodeStubAssembler) {
Return(CallRuntime(Runtime::kArrayIsArray, context, object));
}
-TF_BUILTIN(ArrayIncludes, CodeStubAssembler) {
- Node* const array = Parameter(Descriptor::kReceiver);
- Node* const search_element = Parameter(Descriptor::kSearchElement);
- Node* const start_from = Parameter(Descriptor::kFromIndex);
- Node* const context = Parameter(Descriptor::kContext);
+class ArrayIncludesIndexofAssembler : public CodeStubAssembler {
+ public:
+ explicit ArrayIncludesIndexofAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ enum SearchVariant { kIncludes, kIndexOf };
+
+ void Generate(SearchVariant variant);
+};
- VARIABLE(index_var, MachineType::PointerRepresentation());
+void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
+ const int kSearchElementArg = 0;
+ const int kFromIndexArg = 1;
- Label init_k(this), return_true(this), return_false(this), call_runtime(this);
- Label init_len(this), select_loop(this);
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
- index_var.Bind(IntPtrConstant(0));
+ Node* array = args.GetReceiver();
+ Node* search_element =
+ args.GetOptionalArgumentValue(kSearchElementArg, UndefinedConstant());
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+
+ Node* intptr_zero = IntPtrConstant(0);
+
+ Label init_index(this), return_found(this), return_not_found(this),
+ call_runtime(this);
// Take slow path if not a JSArray, if retrieving elements requires
// traversing prototype, or if access checks are required.
BranchIfFastJSArray(array, context, FastJSArrayAccessMode::INBOUNDS_READ,
- &init_len, &call_runtime);
-
- BIND(&init_len);
- // JSArray length is always an Smi for fast arrays.
- CSA_ASSERT(this, TaggedIsSmi(LoadObjectField(array, JSArray::kLengthOffset)));
- Node* const len = LoadAndUntagObjectField(array, JSArray::kLengthOffset);
-
- GotoIf(IsUndefined(start_from), &select_loop);
-
- // Bailout to slow path if startIndex is not an Smi.
- Branch(TaggedIsSmi(start_from), &init_k, &call_runtime);
-
- BIND(&init_k);
- CSA_ASSERT(this, TaggedIsSmi(start_from));
- Node* const untagged_start_from = SmiToWord(start_from);
- index_var.Bind(
- Select(IntPtrGreaterThanOrEqual(untagged_start_from, IntPtrConstant(0)),
- [=]() { return untagged_start_from; },
- [=]() {
- Node* const index = IntPtrAdd(len, untagged_start_from);
- return SelectConstant(IntPtrLessThan(index, IntPtrConstant(0)),
- IntPtrConstant(0), index,
- MachineType::PointerRepresentation());
- },
- MachineType::PointerRepresentation()));
-
- Goto(&select_loop);
- BIND(&select_loop);
- static int32_t kElementsKind[] = {
- FAST_SMI_ELEMENTS, FAST_HOLEY_SMI_ELEMENTS, FAST_ELEMENTS,
- FAST_HOLEY_ELEMENTS, FAST_DOUBLE_ELEMENTS, FAST_HOLEY_DOUBLE_ELEMENTS,
- };
+ &init_index, &call_runtime);
+
+ BIND(&init_index);
+ VARIABLE(index_var, MachineType::PointerRepresentation(), intptr_zero);
+
+ // JSArray length is always a positive Smi for fast arrays.
+ CSA_ASSERT(this, TaggedIsPositiveSmi(LoadJSArrayLength(array)));
+ Node* array_length = SmiUntag(LoadJSArrayLength(array));
+
+ {
+ // Initialize fromIndex.
+ Label is_smi(this), is_nonsmi(this), done(this);
+
+ // If no fromIndex was passed, default to 0.
+ GotoIf(IntPtrLessThanOrEqual(argc, IntPtrConstant(kFromIndexArg)), &done);
+
+ Node* start_from = args.AtIndex(kFromIndexArg);
+ // Handle Smis and undefined here and everything else in runtime.
+ // We must be very careful with side effects from the ToInteger conversion,
+ // as the side effects might render previously checked assumptions about
+ // the receiver being a fast JSArray and its length invalid.
+ Branch(TaggedIsSmi(start_from), &is_smi, &is_nonsmi);
+
+ BIND(&is_nonsmi);
+ {
+ GotoIfNot(IsUndefined(start_from), &call_runtime);
+ Goto(&done);
+ }
+ BIND(&is_smi);
+ {
+ Node* intptr_start_from = SmiUntag(start_from);
+ index_var.Bind(intptr_start_from);
+
+ GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), intptr_zero), &done);
+ // The fromIndex is negative: add it to the array's length.
+ index_var.Bind(IntPtrAdd(array_length, index_var.value()));
+ // Clamp negative results at zero.
+ GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), intptr_zero), &done);
+ index_var.Bind(intptr_zero);
+ Goto(&done);
+ }
+ BIND(&done);
+ }
+
+ // Fail early if startIndex >= array.length.
+ GotoIf(IntPtrGreaterThanOrEqual(index_var.value(), array_length),
+ &return_not_found);
Label if_smiorobjects(this), if_packed_doubles(this), if_holey_doubles(this);
- Label* element_kind_handlers[] = {&if_smiorobjects, &if_smiorobjects,
- &if_smiorobjects, &if_smiorobjects,
- &if_packed_doubles, &if_holey_doubles};
- Node* map = LoadMap(array);
- Node* elements_kind = LoadMapElementsKind(map);
+ Node* elements_kind = LoadMapElementsKind(LoadMap(array));
Node* elements = LoadElements(array);
- Switch(elements_kind, &return_false, kElementsKind, element_kind_handlers,
- arraysize(kElementsKind));
+ STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+ STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+ STATIC_ASSERT(FAST_ELEMENTS == 2);
+ STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+ GotoIf(
+ Uint32LessThanOrEqual(elements_kind, Int32Constant(FAST_HOLEY_ELEMENTS)),
+ &if_smiorobjects);
+ GotoIf(Word32Equal(elements_kind, Int32Constant(FAST_DOUBLE_ELEMENTS)),
+ &if_packed_doubles);
+ GotoIf(Word32Equal(elements_kind, Int32Constant(FAST_HOLEY_DOUBLE_ELEMENTS)),
+ &if_holey_doubles);
+ Goto(&return_not_found);
BIND(&if_smiorobjects);
{
VARIABLE(search_num, MachineRepresentation::kFloat64);
Label ident_loop(this, &index_var), heap_num_loop(this, &search_num),
- string_loop(this, &index_var), undef_loop(this, &index_var),
- not_smi(this), not_heap_num(this);
+ string_loop(this), undef_loop(this, &index_var), not_smi(this),
+ not_heap_num(this);
GotoIfNot(TaggedIsSmi(search_element), &not_smi);
search_num.Bind(SmiToFloat64(search_element));
Goto(&heap_num_loop);
BIND(&not_smi);
- GotoIf(WordEqual(search_element, UndefinedConstant()), &undef_loop);
+ if (variant == kIncludes) {
+ GotoIf(IsUndefined(search_element), &undef_loop);
+ }
Node* map = LoadMap(search_element);
GotoIfNot(IsHeapNumberMap(map), &not_heap_num);
search_num.Bind(LoadHeapNumberValue(search_element));
@@ -1221,83 +1724,105 @@ TF_BUILTIN(ArrayIncludes, CodeStubAssembler) {
BIND(&ident_loop);
{
- GotoIfNot(UintPtrLessThan(index_var.value(), len), &return_false);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
+ &return_not_found);
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
- GotoIf(WordEqual(element_k, search_element), &return_true);
+ GotoIf(WordEqual(element_k, search_element), &return_found);
- index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+ Increment(index_var);
Goto(&ident_loop);
}
- BIND(&undef_loop);
- {
- GotoIfNot(UintPtrLessThan(index_var.value(), len), &return_false);
+ if (variant == kIncludes) {
+ BIND(&undef_loop);
+
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
+ &return_not_found);
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
- GotoIf(WordEqual(element_k, UndefinedConstant()), &return_true);
- GotoIf(WordEqual(element_k, TheHoleConstant()), &return_true);
+ GotoIf(IsUndefined(element_k), &return_found);
+ GotoIf(IsTheHole(element_k), &return_found);
- index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+ Increment(index_var);
Goto(&undef_loop);
}
BIND(&heap_num_loop);
{
Label nan_loop(this, &index_var), not_nan_loop(this, &index_var);
- BranchIfFloat64IsNaN(search_num.value(), &nan_loop, &not_nan_loop);
+ Label* nan_handling =
+ variant == kIncludes ? &nan_loop : &return_not_found;
+ BranchIfFloat64IsNaN(search_num.value(), nan_handling, &not_nan_loop);
BIND(&not_nan_loop);
{
Label continue_loop(this), not_smi(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), len), &return_false);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
+ &return_not_found);
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
GotoIfNot(TaggedIsSmi(element_k), &not_smi);
Branch(Float64Equal(search_num.value(), SmiToFloat64(element_k)),
- &return_true, &continue_loop);
+ &return_found, &continue_loop);
BIND(&not_smi);
GotoIfNot(IsHeapNumber(element_k), &continue_loop);
Branch(Float64Equal(search_num.value(), LoadHeapNumberValue(element_k)),
- &return_true, &continue_loop);
+ &return_found, &continue_loop);
BIND(&continue_loop);
- index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+ Increment(index_var);
Goto(&not_nan_loop);
}
- BIND(&nan_loop);
- {
+ // Array.p.includes uses SameValueZero comparisons, where NaN == NaN.
+ if (variant == kIncludes) {
+ BIND(&nan_loop);
Label continue_loop(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), len), &return_false);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
+ &return_not_found);
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
GotoIf(TaggedIsSmi(element_k), &continue_loop);
GotoIfNot(IsHeapNumber(element_k), &continue_loop);
- BranchIfFloat64IsNaN(LoadHeapNumberValue(element_k), &return_true,
+ BranchIfFloat64IsNaN(LoadHeapNumberValue(element_k), &return_found,
&continue_loop);
BIND(&continue_loop);
- index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+ Increment(index_var);
Goto(&nan_loop);
}
}
BIND(&string_loop);
{
- Label continue_loop(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), len), &return_false);
+ CSA_ASSERT(this, IsString(search_element));
+ Label continue_loop(this), next_iteration(this, &index_var),
+ slow_compare(this), runtime(this, Label::kDeferred);
+ Node* search_length = LoadStringLength(search_element);
+ Goto(&next_iteration);
+ BIND(&next_iteration);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
+ &return_not_found);
Node* element_k = LoadFixedArrayElement(elements, index_var.value());
GotoIf(TaggedIsSmi(element_k), &continue_loop);
- GotoIfNot(IsStringInstanceType(LoadInstanceType(element_k)),
- &continue_loop);
-
- // TODO(bmeurer): Consider inlining the StringEqual logic here.
- Node* result = CallStub(CodeFactory::StringEqual(isolate()), context,
- search_element, element_k);
- Branch(WordEqual(BooleanConstant(true), result), &return_true,
+ GotoIf(WordEqual(search_element, element_k), &return_found);
+ Node* element_k_type = LoadInstanceType(element_k);
+ GotoIfNot(IsStringInstanceType(element_k_type), &continue_loop);
+ Branch(WordEqual(search_length, LoadStringLength(element_k)),
+ &slow_compare, &continue_loop);
+
+ BIND(&slow_compare);
+ StringBuiltinsAssembler string_asm(state());
+ string_asm.StringEqual_Core(context, search_element, search_type,
+ search_length, element_k, element_k_type,
+ &return_found, &continue_loop, &runtime);
+ BIND(&runtime);
+ Node* result = CallRuntime(Runtime::kStringEqual, context, search_element,
+ element_k);
+ Branch(WordEqual(BooleanConstant(true), result), &return_found,
&continue_loop);
BIND(&continue_loop);
- index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
- Goto(&string_loop);
+ Increment(index_var);
+ Goto(&next_iteration);
}
}
@@ -1312,36 +1837,38 @@ TF_BUILTIN(ArrayIncludes, CodeStubAssembler) {
Goto(&not_nan_loop);
BIND(&search_notnan);
- GotoIfNot(IsHeapNumber(search_element), &return_false);
+ GotoIfNot(IsHeapNumber(search_element), &return_not_found);
search_num.Bind(LoadHeapNumberValue(search_element));
- BranchIfFloat64IsNaN(search_num.value(), &nan_loop, &not_nan_loop);
+ Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found;
+ BranchIfFloat64IsNaN(search_num.value(), nan_handling, &not_nan_loop);
- // Search for HeapNumber
BIND(&not_nan_loop);
{
Label continue_loop(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), len), &return_false);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
+ &return_not_found);
Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
MachineType::Float64());
- Branch(Float64Equal(element_k, search_num.value()), &return_true,
+ Branch(Float64Equal(element_k, search_num.value()), &return_found,
&continue_loop);
BIND(&continue_loop);
- index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+ Increment(index_var);
Goto(&not_nan_loop);
}
- // Search for NaN
- BIND(&nan_loop);
- {
+ // Array.p.includes uses SameValueZero comparisons, where NaN == NaN.
+ if (variant == kIncludes) {
+ BIND(&nan_loop);
Label continue_loop(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), len), &return_false);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
+ &return_not_found);
Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
MachineType::Float64());
- BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
+ BranchIfFloat64IsNaN(element_k, &return_found, &continue_loop);
BIND(&continue_loop);
- index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+ Increment(index_var);
Goto(&nan_loop);
}
}
@@ -1357,339 +1884,94 @@ TF_BUILTIN(ArrayIncludes, CodeStubAssembler) {
Goto(&not_nan_loop);
BIND(&search_notnan);
- GotoIf(WordEqual(search_element, UndefinedConstant()), &hole_loop);
- GotoIfNot(IsHeapNumber(search_element), &return_false);
+ if (variant == kIncludes) {
+ GotoIf(IsUndefined(search_element), &hole_loop);
+ }
+ GotoIfNot(IsHeapNumber(search_element), &return_not_found);
search_num.Bind(LoadHeapNumberValue(search_element));
- BranchIfFloat64IsNaN(search_num.value(), &nan_loop, &not_nan_loop);
+ Label* nan_handling = variant == kIncludes ? &nan_loop : &return_not_found;
+ BranchIfFloat64IsNaN(search_num.value(), nan_handling, &not_nan_loop);
- // Search for HeapNumber
BIND(&not_nan_loop);
{
Label continue_loop(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), len), &return_false);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
+ &return_not_found);
- // Load double value or continue if it contains a double hole.
- Node* element_k = LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Float64(), 0,
- INTPTR_PARAMETERS, &continue_loop);
+ // No need for hole checking here; the following Float64Equal will
+ // return 'not equal' for holes anyway.
+ Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
+ MachineType::Float64());
- Branch(Float64Equal(element_k, search_num.value()), &return_true,
+ Branch(Float64Equal(element_k, search_num.value()), &return_found,
&continue_loop);
BIND(&continue_loop);
- index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+ Increment(index_var);
Goto(&not_nan_loop);
}
- // Search for NaN
- BIND(&nan_loop);
- {
+ // Array.p.includes uses SameValueZero comparisons, where NaN == NaN.
+ if (variant == kIncludes) {
+ BIND(&nan_loop);
Label continue_loop(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), len), &return_false);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
+ &return_not_found);
- // Load double value or continue if it contains a double hole.
+ // Load double value or continue if it's the hole NaN.
Node* element_k = LoadFixedDoubleArrayElement(
elements, index_var.value(), MachineType::Float64(), 0,
INTPTR_PARAMETERS, &continue_loop);
- BranchIfFloat64IsNaN(element_k, &return_true, &continue_loop);
+ BranchIfFloat64IsNaN(element_k, &return_found, &continue_loop);
BIND(&continue_loop);
- index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+ Increment(index_var);
Goto(&nan_loop);
}
- // Search for the Hole
- BIND(&hole_loop);
- {
- GotoIfNot(UintPtrLessThan(index_var.value(), len), &return_false);
+ // Array.p.includes treats the hole as undefined.
+ if (variant == kIncludes) {
+ BIND(&hole_loop);
+ GotoIfNot(UintPtrLessThan(index_var.value(), array_length),
+ &return_not_found);
// Check if the element is a double hole, but don't load it.
LoadFixedDoubleArrayElement(elements, index_var.value(),
MachineType::None(), 0, INTPTR_PARAMETERS,
- &return_true);
+ &return_found);
- index_var.Bind(IntPtrAdd(index_var.value(), IntPtrConstant(1)));
+ Increment(index_var);
Goto(&hole_loop);
}
}
- BIND(&return_true);
- Return(TrueConstant());
+ BIND(&return_found);
+ args.PopAndReturn(variant == kIncludes ? TrueConstant()
+ : SmiTag(index_var.value()));
- BIND(&return_false);
- Return(FalseConstant());
+ BIND(&return_not_found);
+ args.PopAndReturn(variant == kIncludes ? FalseConstant()
+ : NumberConstant(-1));
BIND(&call_runtime);
- Return(CallRuntime(Runtime::kArrayIncludes_Slow, context, array,
- search_element, start_from));
-}
-
-TF_BUILTIN(ArrayIndexOf, CodeStubAssembler) {
- Node* array = Parameter(Descriptor::kReceiver);
- Node* search_element = Parameter(Descriptor::kSearchElement);
- Node* start_from = Parameter(Descriptor::kFromIndex);
- Node* context = Parameter(Descriptor::kContext);
-
- Node* intptr_zero = IntPtrConstant(0);
- Node* intptr_one = IntPtrConstant(1);
-
- VARIABLE(len_var, MachineType::PointerRepresentation());
- VARIABLE(index_var, MachineType::PointerRepresentation());
- VARIABLE(start_from_var, MachineType::PointerRepresentation());
-
- Label init_k(this), return_found(this), return_not_found(this),
- call_runtime(this);
-
- Label init_len(this);
-
- index_var.Bind(intptr_zero);
- len_var.Bind(intptr_zero);
-
- // Take slow path if not a JSArray, if retrieving elements requires
- // traversing prototype, or if access checks are required.
- BranchIfFastJSArray(array, context, FastJSArrayAccessMode::INBOUNDS_READ,
- &init_len, &call_runtime);
-
- BIND(&init_len);
- {
- // JSArray length is always an Smi for fast arrays.
- CSA_ASSERT(this,
- TaggedIsSmi(LoadObjectField(array, JSArray::kLengthOffset)));
- Node* len = LoadAndUntagObjectField(array, JSArray::kLengthOffset);
-
- len_var.Bind(len);
- Branch(WordEqual(len_var.value(), intptr_zero), &return_not_found, &init_k);
- }
-
- BIND(&init_k);
- {
- // For now only deal with undefined and Smis here; we must be really careful
- // with side-effects from the ToInteger conversion as the side-effects might
- // render our assumptions about the receiver being a fast JSArray and the
- // length invalid.
- Label done(this), init_k_smi(this), init_k_other(this), init_k_zero(this),
- init_k_n(this);
- Branch(TaggedIsSmi(start_from), &init_k_smi, &init_k_other);
-
- BIND(&init_k_smi);
- {
- // The fromIndex is a Smi.
- start_from_var.Bind(SmiUntag(start_from));
- Goto(&init_k_n);
- }
-
- BIND(&init_k_other);
- {
- // The fromIndex must be undefined then, otherwise bailout and let the
- // runtime deal with the full ToInteger conversion.
- GotoIfNot(IsUndefined(start_from), &call_runtime);
- start_from_var.Bind(intptr_zero);
- Goto(&init_k_n);
- }
-
- BIND(&init_k_n);
- {
- Label if_positive(this), if_negative(this), done(this);
- Branch(IntPtrLessThan(start_from_var.value(), intptr_zero), &if_negative,
- &if_positive);
-
- BIND(&if_positive);
- {
- index_var.Bind(start_from_var.value());
- Goto(&done);
- }
-
- BIND(&if_negative);
- {
- index_var.Bind(IntPtrAdd(len_var.value(), start_from_var.value()));
- Branch(IntPtrLessThan(index_var.value(), intptr_zero), &init_k_zero,
- &done);
- }
-
- BIND(&init_k_zero);
- {
- index_var.Bind(intptr_zero);
- Goto(&done);
- }
-
- BIND(&done);
- }
- }
-
- static int32_t kElementsKind[] = {
- FAST_SMI_ELEMENTS, FAST_HOLEY_SMI_ELEMENTS, FAST_ELEMENTS,
- FAST_HOLEY_ELEMENTS, FAST_DOUBLE_ELEMENTS, FAST_HOLEY_DOUBLE_ELEMENTS,
- };
-
- Label if_smiorobjects(this), if_packed_doubles(this), if_holey_doubles(this);
- Label* element_kind_handlers[] = {&if_smiorobjects, &if_smiorobjects,
- &if_smiorobjects, &if_smiorobjects,
- &if_packed_doubles, &if_holey_doubles};
-
- Node* map = LoadMap(array);
- Node* elements_kind = LoadMapElementsKind(map);
- Node* elements = LoadElements(array);
- Switch(elements_kind, &return_not_found, kElementsKind, element_kind_handlers,
- arraysize(kElementsKind));
-
- BIND(&if_smiorobjects);
- {
- VARIABLE(search_num, MachineRepresentation::kFloat64);
- Label ident_loop(this, &index_var), heap_num_loop(this, &search_num),
- string_loop(this, &index_var), not_smi(this), not_heap_num(this);
-
- GotoIfNot(TaggedIsSmi(search_element), &not_smi);
- search_num.Bind(SmiToFloat64(search_element));
- Goto(&heap_num_loop);
-
- BIND(&not_smi);
- Node* map = LoadMap(search_element);
- GotoIfNot(IsHeapNumberMap(map), &not_heap_num);
- search_num.Bind(LoadHeapNumberValue(search_element));
- Goto(&heap_num_loop);
-
- BIND(&not_heap_num);
- Node* search_type = LoadMapInstanceType(map);
- GotoIf(IsStringInstanceType(search_type), &string_loop);
- Goto(&ident_loop);
-
- BIND(&ident_loop);
- {
- GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
- &return_not_found);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
- GotoIf(WordEqual(element_k, search_element), &return_found);
-
- index_var.Bind(IntPtrAdd(index_var.value(), intptr_one));
- Goto(&ident_loop);
- }
-
- BIND(&heap_num_loop);
- {
- Label not_nan_loop(this, &index_var);
- BranchIfFloat64IsNaN(search_num.value(), &return_not_found,
- &not_nan_loop);
-
- BIND(&not_nan_loop);
- {
- Label continue_loop(this), not_smi(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
- &return_not_found);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
- GotoIfNot(TaggedIsSmi(element_k), &not_smi);
- Branch(Float64Equal(search_num.value(), SmiToFloat64(element_k)),
- &return_found, &continue_loop);
-
- BIND(&not_smi);
- GotoIfNot(IsHeapNumber(element_k), &continue_loop);
- Branch(Float64Equal(search_num.value(), LoadHeapNumberValue(element_k)),
- &return_found, &continue_loop);
-
- BIND(&continue_loop);
- index_var.Bind(IntPtrAdd(index_var.value(), intptr_one));
- Goto(&not_nan_loop);
- }
- }
-
- BIND(&string_loop);
- {
- Label continue_loop(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
- &return_not_found);
- Node* element_k = LoadFixedArrayElement(elements, index_var.value());
- GotoIf(TaggedIsSmi(element_k), &continue_loop);
- GotoIfNot(IsString(element_k), &continue_loop);
-
- // TODO(bmeurer): Consider inlining the StringEqual logic here.
- Callable callable = CodeFactory::StringEqual(isolate());
- Node* result = CallStub(callable, context, search_element, element_k);
- Branch(WordEqual(BooleanConstant(true), result), &return_found,
- &continue_loop);
-
- BIND(&continue_loop);
- index_var.Bind(IntPtrAdd(index_var.value(), intptr_one));
- Goto(&string_loop);
- }
- }
-
- BIND(&if_packed_doubles);
{
- Label not_nan_loop(this, &index_var), search_notnan(this);
- VARIABLE(search_num, MachineRepresentation::kFloat64);
-
- GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
- search_num.Bind(SmiToFloat64(search_element));
- Goto(&not_nan_loop);
-
- BIND(&search_notnan);
- GotoIfNot(IsHeapNumber(search_element), &return_not_found);
-
- search_num.Bind(LoadHeapNumberValue(search_element));
-
- BranchIfFloat64IsNaN(search_num.value(), &return_not_found, &not_nan_loop);
-
- // Search for HeapNumber
- BIND(&not_nan_loop);
- {
- GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
- &return_not_found);
- Node* element_k = LoadFixedDoubleArrayElement(elements, index_var.value(),
- MachineType::Float64());
- GotoIf(Float64Equal(element_k, search_num.value()), &return_found);
-
- index_var.Bind(IntPtrAdd(index_var.value(), intptr_one));
- Goto(&not_nan_loop);
- }
- }
-
- BIND(&if_holey_doubles);
- {
- Label not_nan_loop(this, &index_var), search_notnan(this);
- VARIABLE(search_num, MachineRepresentation::kFloat64);
-
- GotoIfNot(TaggedIsSmi(search_element), &search_notnan);
- search_num.Bind(SmiToFloat64(search_element));
- Goto(&not_nan_loop);
-
- BIND(&search_notnan);
- GotoIfNot(IsHeapNumber(search_element), &return_not_found);
-
- search_num.Bind(LoadHeapNumberValue(search_element));
-
- BranchIfFloat64IsNaN(search_num.value(), &return_not_found, &not_nan_loop);
-
- // Search for HeapNumber
- BIND(&not_nan_loop);
- {
- Label continue_loop(this);
- GotoIfNot(UintPtrLessThan(index_var.value(), len_var.value()),
- &return_not_found);
-
- // Load double value or continue if it contains a double hole.
- Node* element_k = LoadFixedDoubleArrayElement(
- elements, index_var.value(), MachineType::Float64(), 0,
- INTPTR_PARAMETERS, &continue_loop);
-
- Branch(Float64Equal(element_k, search_num.value()), &return_found,
- &continue_loop);
- BIND(&continue_loop);
- index_var.Bind(IntPtrAdd(index_var.value(), intptr_one));
- Goto(&not_nan_loop);
- }
+ Node* start_from =
+ args.GetOptionalArgumentValue(kFromIndexArg, UndefinedConstant());
+ Runtime::FunctionId function = variant == kIncludes
+ ? Runtime::kArrayIncludes_Slow
+ : Runtime::kArrayIndexOf;
+ args.PopAndReturn(
+ CallRuntime(function, context, array, search_element, start_from));
}
+}
- BIND(&return_found);
- Return(SmiTag(index_var.value()));
-
- BIND(&return_not_found);
- Return(NumberConstant(-1));
-
- BIND(&call_runtime);
- Return(CallRuntime(Runtime::kArrayIndexOf, context, array, search_element,
- start_from));
+TF_BUILTIN(ArrayIncludes, ArrayIncludesIndexofAssembler) {
+ Generate(kIncludes);
}
+TF_BUILTIN(ArrayIndexOf, ArrayIncludesIndexofAssembler) { Generate(kIndexOf); }
+
class ArrayPrototypeIterationAssembler : public CodeStubAssembler {
public:
explicit ArrayPrototypeIterationAssembler(compiler::CodeAssemblerState* state)
@@ -1945,8 +2227,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
{
Node* length =
GetProperty(context, array, factory()->length_string());
- Callable to_length = CodeFactory::ToLength(isolate());
- var_length.Bind(CallStub(to_length, context, length));
+ var_length.Bind(ToLength_Inline(context, length));
Goto(&done);
}
diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc
index c565fff1cd..da1602b963 100644
--- a/deps/v8/src/builtins/builtins-array.cc
+++ b/deps/v8/src/builtins/builtins-array.cc
@@ -727,9 +727,12 @@ void CollectElementIndices(Handle<JSObject> object, uint32_t range,
}
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: {
+ DisallowHeapAllocation no_gc;
+ FixedArrayBase* elements = object->elements();
+ JSObject* raw_object = *object;
ElementsAccessor* accessor = object->GetElementsAccessor();
for (uint32_t i = 0; i < range; i++) {
- if (accessor->HasElement(object, i)) {
+ if (accessor->HasElement(raw_object, i, elements)) {
indices->Add(i);
}
}
@@ -749,7 +752,7 @@ void CollectElementIndices(Handle<JSObject> object, uint32_t range,
}
ElementsAccessor* accessor = object->GetElementsAccessor();
for (; i < range; i++) {
- if (accessor->HasElement(object, i)) {
+ if (accessor->HasElement(*object, i)) {
indices->Add(i);
}
}
@@ -1203,15 +1206,9 @@ BUILTIN(ArrayConcat) {
HandleScope scope(isolate);
Handle<Object> receiver = args.receiver();
- // TODO(bmeurer): Do we really care about the exact exception message here?
- if (receiver->IsNullOrUndefined(isolate)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
- isolate->factory()->NewStringFromAsciiChecked(
- "Array.prototype.concat")));
- }
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, receiver, Object::ToObject(isolate, args.receiver()));
+ isolate, receiver,
+ Object::ToObject(isolate, args.receiver(), "Array.prototype.concat"));
args[0] = *receiver;
Handle<JSArray> result_array;
diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc
index 20ea39dcca..4f9078b4b6 100644
--- a/deps/v8/src/builtins/builtins-arraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-arraybuffer.cc
@@ -201,8 +201,7 @@ static Object* SliceHelper(BuiltinArguments args, Isolate* isolate,
Handle<Object> new_obj;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, new_obj,
- Execution::New(Handle<JSFunction>::cast(ctor), argc, argv.start()));
+ isolate, new_obj, Execution::New(isolate, ctor, argc, argv.start()));
new_ = Handle<JSReceiver>::cast(new_obj);
}
diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc
index 82a9d31cd1..b3cb3d8ebd 100644
--- a/deps/v8/src/builtins/builtins-async-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc
@@ -360,68 +360,6 @@ TF_BUILTIN(AsyncGeneratorPrototypeThrow, AsyncGeneratorBuiltinsAssembler) {
"[AsyncGenerator].prototype.throw");
}
-TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) {
- Node* const generator = Parameter(Descriptor::kReceiver);
- Node* const value = Parameter(Descriptor::kValue);
- Node* const context = Parameter(Descriptor::kContext);
-
- CSA_ASSERT_JS_ARGC_EQ(this, 1);
- CSA_SLOW_ASSERT(this,
- HasInstanceType(generator, JS_ASYNC_GENERATOR_OBJECT_TYPE));
- CSA_ASSERT(this, IsGeneratorNotSuspendedForAwait(generator));
-
- CallBuiltin(Builtins::kAsyncGeneratorResolve, context, generator, value,
- FalseConstant());
-
- // Yield must have been reached via ResumeNext(), so don't recursively call
- // it.
- Return(UndefinedConstant());
-}
-
-TF_BUILTIN(AsyncGeneratorRawYield, AsyncGeneratorBuiltinsAssembler) {
- Node* const generator = Parameter(Descriptor::kReceiver);
- Node* const iter_result = Parameter(Descriptor::kValue);
- Node* const context = Parameter(Descriptor::kContext);
-
- CSA_ASSERT_JS_ARGC_EQ(this, 1);
- CSA_SLOW_ASSERT(this,
- HasInstanceType(generator, JS_ASYNC_GENERATOR_OBJECT_TYPE));
- CSA_ASSERT(this, IsGeneratorNotSuspendedForAwait(generator));
-
- VARIABLE(var_value, MachineRepresentation::kTagged);
- VARIABLE(var_done, MachineRepresentation::kTagged);
-
- // RawYield is used for yield*, and values sent to yield* are always
- // iterator result objects.
- Label if_slow(this), async_generator_resolve(this);
-
- GotoIfNot(IsFastJSIterResult(context, iter_result), &if_slow);
- var_value.Bind(LoadObjectField(iter_result, JSIteratorResult::kValueOffset));
- var_done.Bind(LoadObjectField(iter_result, JSIteratorResult::kDoneOffset));
- Goto(&async_generator_resolve);
-
- BIND(&if_slow);
- {
- var_value.Bind(
- GetProperty(context, iter_result, factory()->value_string()));
- Node* const done =
- GetProperty(context, iter_result, factory()->done_string());
-
- var_done.Bind(Select(
- IsBoolean(done), [=]() { return done; },
- [=]() { return CallBuiltin(Builtins::kToBoolean, context, done); },
- MachineRepresentation::kTagged));
- Goto(&async_generator_resolve);
- }
-
- BIND(&async_generator_resolve);
- Node* const value = var_value.value();
- Node* const done = var_done.value();
- CallBuiltin(Builtins::kAsyncGeneratorResolve, context, generator, value,
- done);
- Return(UndefinedConstant());
-}
-
TF_BUILTIN(AsyncGeneratorAwaitResolveClosure, AsyncGeneratorBuiltinsAssembler) {
Node* value = Parameter(Descriptor::kValue);
Node* context = Parameter(Descriptor::kContext);
@@ -537,6 +475,10 @@ TF_BUILTIN(AsyncGeneratorResolve, AsyncGeneratorBuiltinsAssembler) {
Node* const done = Parameter(Descriptor::kDone);
Node* const context = Parameter(Descriptor::kContext);
+ CSA_SLOW_ASSERT(this,
+ HasInstanceType(generator, JS_ASYNC_GENERATOR_OBJECT_TYPE));
+ CSA_ASSERT(this, IsGeneratorNotSuspendedForAwait(generator));
+
Node* const next = TakeFirstAsyncGeneratorRequestFromQueue(generator);
Node* const promise = LoadPromiseFromAsyncGeneratorRequest(next);
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index 88fa2321c7..bd70865399 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -83,12 +83,11 @@ void Builtins::Generate_TailCall_ReceiverIsAny(MacroAssembler* masm) {
}
void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm) {
- Generate_CallForwardVarargs(masm, masm->isolate()->builtins()->Call());
+ Generate_ForwardVarargs(masm, masm->isolate()->builtins()->Call());
}
void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
- Generate_CallForwardVarargs(masm,
- masm->isolate()->builtins()->CallFunction());
+ Generate_ForwardVarargs(masm, masm->isolate()->builtins()->CallFunction());
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-console-gen.cc b/deps/v8/src/builtins/builtins-console-gen.cc
new file mode 100644
index 0000000000..f8475d1632
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-console-gen.cc
@@ -0,0 +1,38 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils-gen.h"
+#include "src/builtins/builtins.h"
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+TF_BUILTIN(FastConsoleAssert, CodeStubAssembler) {
+ Label runtime(this);
+ Label out(this);
+
+ // TODO(ishell): use constants from Descriptor once the JSFunction linkage
+ // arguments are reordered.
+ Node* argc = Parameter(BuiltinDescriptor::kArgumentsCount);
+ Node* context = Parameter(BuiltinDescriptor::kContext);
+ Node* new_target = Parameter(BuiltinDescriptor::kNewTarget);
+ GotoIf(Word32Equal(argc, Int32Constant(0)), &runtime);
+
+ CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
+ BranchIfToBooleanIsTrue(args.AtIndex(0), &out, &runtime);
+ BIND(&out);
+ args.PopAndReturn(UndefinedConstant());
+
+ BIND(&runtime);
+ {
+ Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
+ MachineType::TaggedPointer());
+ TailCallBuiltin(Builtins::kConsoleAssert, context, target, new_target,
+ argc);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-console.cc b/deps/v8/src/builtins/builtins-console.cc
new file mode 100644
index 0000000000..a43fe136d0
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-console.cc
@@ -0,0 +1,59 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/builtins/builtins-utils.h"
+#include "src/builtins/builtins.h"
+
+#include "src/debug/interface-types.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Console
+
+#define CONSOLE_METHOD_LIST(V) \
+ V(Debug) \
+ V(Error) \
+ V(Info) \
+ V(Log) \
+ V(Warn) \
+ V(Dir) \
+ V(DirXml) \
+ V(Table) \
+ V(Trace) \
+ V(Group) \
+ V(GroupCollapsed) \
+ V(GroupEnd) \
+ V(Clear) \
+ V(Count) \
+ V(Assert) \
+ V(MarkTimeline) \
+ V(Profile) \
+ V(ProfileEnd) \
+ V(Timeline) \
+ V(TimelineEnd) \
+ V(Time) \
+ V(TimeEnd) \
+ V(TimeStamp)
+
+#define CONSOLE_BUILTIN_IMPLEMENTATION(name) \
+ BUILTIN(Console##name) { \
+ HandleScope scope(isolate); \
+ if (isolate->console_delegate()) { \
+ debug::ConsoleCallArguments wrapper(args); \
+ isolate->console_delegate()->name(wrapper); \
+ CHECK(!isolate->has_pending_exception()); \
+ CHECK(!isolate->has_scheduled_exception()); \
+ } \
+ return isolate->heap()->undefined_value(); \
+ }
+CONSOLE_METHOD_LIST(CONSOLE_BUILTIN_IMPLEMENTATION)
+#undef CONSOLE_BUILTIN_IMPLEMENTATION
+
+#undef CONSOLE_METHOD_LIST
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 1dc04da802..1769e65e83 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -17,6 +17,15 @@
namespace v8 {
namespace internal {
+void Builtins::Generate_ConstructForwardVarargs(MacroAssembler* masm) {
+ Generate_ForwardVarargs(masm, masm->isolate()->builtins()->Construct());
+}
+
+void Builtins::Generate_ConstructFunctionForwardVarargs(MacroAssembler* masm) {
+ Generate_ForwardVarargs(masm,
+ masm->isolate()->builtins()->ConstructFunction());
+}
+
typedef compiler::Node Node;
Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
@@ -153,6 +162,29 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
BIND(&cell_done);
}
+ {
+ // If the feedback vector has optimized code, check whether it is marked
+ // for deopt and, if so, clear it.
+ Label optimized_code_ok(this);
+ Node* literals = LoadObjectField(literals_cell, Cell::kValueOffset);
+ GotoIfNot(IsFeedbackVector(literals), &optimized_code_ok);
+ Node* optimized_code_cell =
+ LoadFixedArrayElement(literals, FeedbackVector::kOptimizedCodeIndex);
+ Node* optimized_code =
+ LoadWeakCellValue(optimized_code_cell, &optimized_code_ok);
+ Node* code_flags = LoadObjectField(
+ optimized_code, Code::kKindSpecificFlags1Offset, MachineType::Uint32());
+ Node* marked_for_deopt =
+ DecodeWord32<Code::MarkedForDeoptimizationField>(code_flags);
+ GotoIf(Word32Equal(marked_for_deopt, Int32Constant(0)), &optimized_code_ok);
+
+ // Code is marked for deopt, clear the optimized code slot.
+ StoreFixedArrayElement(literals, FeedbackVector::kOptimizedCodeIndex,
+ EmptyWeakCellConstant(), SKIP_WRITE_BARRIER);
+ Goto(&optimized_code_ok);
+
+ BIND(&optimized_code_ok);
+ }
StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackVectorOffset,
literals_cell);
StoreObjectFieldNoWriteBarrier(
@@ -259,76 +291,8 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewObject(Node* context,
Node* object = AllocateJSObjectFromMap(initial_map, properties.value());
- Node* instance_size_words = ChangeUint32ToWord(LoadObjectField(
- initial_map, Map::kInstanceSizeOffset, MachineType::Uint8()));
- Node* instance_size =
- WordShl(instance_size_words, IntPtrConstant(kPointerSizeLog2));
-
// Perform in-object slack tracking if requested.
- Node* bit_field3 = LoadMapBitField3(initial_map);
- Label slack_tracking(this), finalize(this, Label::kDeferred), done(this);
- GotoIf(IsSetWord32<Map::ConstructionCounter>(bit_field3), &slack_tracking);
-
- // Initialize remaining fields.
- {
- Comment("no slack tracking");
- InitializeFieldsWithRoot(object, IntPtrConstant(JSObject::kHeaderSize),
- instance_size, Heap::kUndefinedValueRootIndex);
- Goto(&end);
- }
-
- {
- BIND(&slack_tracking);
-
- // Decrease generous allocation count.
- STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
- Comment("update allocation count");
- Node* new_bit_field3 = Int32Sub(
- bit_field3, Int32Constant(1 << Map::ConstructionCounter::kShift));
- StoreObjectFieldNoWriteBarrier(initial_map, Map::kBitField3Offset,
- new_bit_field3,
- MachineRepresentation::kWord32);
- GotoIf(IsClearWord32<Map::ConstructionCounter>(new_bit_field3), &finalize);
-
- Node* unused_fields = LoadObjectField(
- initial_map, Map::kUnusedPropertyFieldsOffset, MachineType::Uint8());
- Node* used_size =
- IntPtrSub(instance_size, WordShl(ChangeUint32ToWord(unused_fields),
- IntPtrConstant(kPointerSizeLog2)));
-
- Comment("initialize filler fields (no finalize)");
- InitializeFieldsWithRoot(object, used_size, instance_size,
- Heap::kOnePointerFillerMapRootIndex);
-
- Comment("initialize undefined fields (no finalize)");
- InitializeFieldsWithRoot(object, IntPtrConstant(JSObject::kHeaderSize),
- used_size, Heap::kUndefinedValueRootIndex);
- Goto(&end);
- }
-
- {
- // Finalize the instance size.
- BIND(&finalize);
-
- Node* unused_fields = LoadObjectField(
- initial_map, Map::kUnusedPropertyFieldsOffset, MachineType::Uint8());
- Node* used_size =
- IntPtrSub(instance_size, WordShl(ChangeUint32ToWord(unused_fields),
- IntPtrConstant(kPointerSizeLog2)));
-
- Comment("initialize filler fields (finalize)");
- InitializeFieldsWithRoot(object, used_size, instance_size,
- Heap::kOnePointerFillerMapRootIndex);
-
- Comment("initialize undefined fields (finalize)");
- InitializeFieldsWithRoot(object, IntPtrConstant(JSObject::kHeaderSize),
- used_size, Heap::kUndefinedValueRootIndex);
-
- CallRuntime(Runtime::kFinalizeInstanceSize, context, initial_map);
- Goto(&end);
- }
-
- BIND(&end);
+ HandleSlackTracking(context, object, initial_map, JSObject::kHeaderSize);
return object;
}
@@ -616,56 +580,81 @@ TF_BUILTIN(FastCloneShallowArrayDontTrack, ConstructorBuiltinsAssembler) {
}
Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowObject(
- Label* call_runtime, Node* closure, Node* literals_index,
- Node* properties_count) {
- Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
- Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
- Node* allocation_site = LoadFixedArrayElement(
- feedback_vector, literals_index, 0, CodeStubAssembler::SMI_PARAMETERS);
- GotoIf(IsUndefined(allocation_site), call_runtime);
-
- // Calculate the object and allocation size based on the properties count.
- Node* object_size = IntPtrAdd(WordShl(properties_count, kPointerSizeLog2),
- IntPtrConstant(JSObject::kHeaderSize));
- Node* allocation_size = object_size;
- if (FLAG_allocation_site_pretenuring) {
- allocation_size =
- IntPtrAdd(object_size, IntPtrConstant(AllocationMemento::kSize));
+ Label* call_runtime, Node* closure, Node* literals_index) {
+ Node* allocation_site;
+ {
+ // Load the alloation site.
+ Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
+ Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
+ allocation_site = LoadFixedArrayElement(feedback_vector, literals_index, 0,
+ CodeStubAssembler::SMI_PARAMETERS);
+ GotoIf(IsUndefined(allocation_site), call_runtime);
}
+
Node* boilerplate =
LoadObjectField(allocation_site, AllocationSite::kTransitionInfoOffset);
Node* boilerplate_map = LoadMap(boilerplate);
- Node* instance_size = LoadMapInstanceSize(boilerplate_map);
- Node* size_in_words = WordShr(object_size, kPointerSizeLog2);
- GotoIfNot(WordEqual(instance_size, size_in_words), call_runtime);
-
- Node* copy = AllocateInNewSpace(allocation_size);
- // Copy boilerplate elements.
- VARIABLE(offset, MachineType::PointerRepresentation());
- offset.Bind(IntPtrConstant(-kHeapObjectTag));
- Node* end_offset = IntPtrAdd(object_size, offset.value());
- Label loop_body(this, &offset), loop_check(this, &offset);
- // We should always have an object size greater than zero.
- Goto(&loop_body);
- BIND(&loop_body);
+ VARIABLE(var_properties, MachineRepresentation::kTagged);
{
- // The Allocate above guarantees that the copy lies in new space. This
- // allows us to skip write barriers. This is necessary since we may also be
- // copying unboxed doubles.
- Node* field = Load(MachineType::IntPtr(), boilerplate, offset.value());
- StoreNoWriteBarrier(MachineType::PointerRepresentation(), copy,
- offset.value(), field);
- Goto(&loop_check);
+ // Directly copy over the property store for dict-mode boilerplates.
+ Label if_dictionary(this), if_fast(this), allocate_object(this);
+ Branch(IsDictionaryMap(boilerplate_map), &if_dictionary, &if_fast);
+ BIND(&if_dictionary);
+ {
+ var_properties.Bind(
+ CopyNameDictionary(LoadProperties(boilerplate), call_runtime));
+ // Slow objects have no in-object properties.
+ Goto(&allocate_object);
+ }
+ BIND(&if_fast);
+ {
+ // TODO(cbruni): support copying out-of-object properties.
+ Node* boilerplate_properties = LoadProperties(boilerplate);
+ GotoIfNot(IsEmptyFixedArray(boilerplate_properties), call_runtime);
+ var_properties.Bind(EmptyFixedArrayConstant());
+ Goto(&allocate_object);
+ }
+ BIND(&allocate_object);
}
- BIND(&loop_check);
+
+ Node* instance_size = TimesPointerSize(LoadMapInstanceSize(boilerplate_map));
+ Node* allocation_size = instance_size;
+ if (FLAG_allocation_site_pretenuring) {
+ // Prepare for inner-allocating the AllocationMemento.
+ allocation_size =
+ IntPtrAdd(instance_size, IntPtrConstant(AllocationMemento::kSize));
+ }
+
+ Node* copy = AllocateInNewSpace(allocation_size);
{
- offset.Bind(IntPtrAdd(offset.value(), IntPtrConstant(kPointerSize)));
- GotoIfNot(IntPtrGreaterThanOrEqual(offset.value(), end_offset), &loop_body);
+ // Initialize Object fields.
+ StoreMapNoWriteBarrier(copy, boilerplate_map);
+ StoreObjectFieldNoWriteBarrier(copy, JSObject::kPropertiesOffset,
+ var_properties.value());
+ // TODO(cbruni): support elements cloning for object literals.
+ CSA_ASSERT(this, IsEmptyFixedArray(LoadElements(boilerplate)));
+ StoreObjectFieldNoWriteBarrier(copy, JSObject::kElementsOffset,
+ EmptyFixedArrayConstant());
}
+ // Copy over in-object properties.
+ Node* start_offset = IntPtrConstant(JSObject::kHeaderSize);
+ BuildFastLoop(start_offset, instance_size,
+ [=](Node* offset) {
+ // The Allocate above guarantees that the copy lies in new
+ // space. This allows us to skip write barriers. This is
+ // necessary since we may also be copying unboxed doubles.
+ // TODO(verwaest): Allocate and fill in double boxes.
+ // TODO(cbruni): decode map information and support mutable
+ // heap numbers.
+ Node* field = LoadObjectField(boilerplate, offset);
+ StoreObjectFieldNoWriteBarrier(copy, offset, field);
+ },
+ kPointerSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+
if (FLAG_allocation_site_pretenuring) {
- Node* memento = InnerAllocate(copy, object_size);
+ Node* memento = InnerAllocate(copy, instance_size);
StoreMapNoWriteBarrier(memento, Heap::kAllocationMementoMapRootIndex);
StoreObjectFieldNoWriteBarrier(
memento, AllocationMemento::kAllocationSiteOffset, allocation_site);
@@ -677,48 +666,25 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowObject(
AllocationSite::kPretenureCreateCountOffset,
memento_create_count);
}
-
- // TODO(verwaest): Allocate and fill in double boxes.
return copy;
}
-template <typename Descriptor>
-void ConstructorBuiltinsAssembler::CreateFastCloneShallowObjectBuiltin(
- int properties_count) {
- DCHECK_GE(properties_count, 0);
- DCHECK_LE(properties_count,
- ConstructorBuiltins::kMaximumClonedShallowObjectProperties);
+TF_BUILTIN(FastCloneShallowObject, ConstructorBuiltinsAssembler) {
Label call_runtime(this);
Node* closure = Parameter(Descriptor::kClosure);
Node* literals_index = Parameter(Descriptor::kLiteralIndex);
-
- Node* properties_count_node =
- IntPtrConstant(ConstructorBuiltins::FastCloneShallowObjectPropertiesCount(
- properties_count));
- Node* copy = EmitFastCloneShallowObject(
- &call_runtime, closure, literals_index, properties_count_node);
+ Node* copy =
+ EmitFastCloneShallowObject(&call_runtime, closure, literals_index);
Return(copy);
BIND(&call_runtime);
- Node* constant_properties = Parameter(Descriptor::kConstantProperties);
+ Node* boilerplate_description =
+ Parameter(Descriptor::kBoilerplateDescription);
Node* flags = Parameter(Descriptor::kFlags);
Node* context = Parameter(Descriptor::kContext);
TailCallRuntime(Runtime::kCreateObjectLiteral, context, closure,
- literals_index, constant_properties, flags);
+ literals_index, boilerplate_description, flags);
}
-#define SHALLOW_OBJECT_BUILTIN(props) \
- TF_BUILTIN(FastCloneShallowObject##props, ConstructorBuiltinsAssembler) { \
- CreateFastCloneShallowObjectBuiltin<Descriptor>(props); \
- }
-
-SHALLOW_OBJECT_BUILTIN(0);
-SHALLOW_OBJECT_BUILTIN(1);
-SHALLOW_OBJECT_BUILTIN(2);
-SHALLOW_OBJECT_BUILTIN(3);
-SHALLOW_OBJECT_BUILTIN(4);
-SHALLOW_OBJECT_BUILTIN(5);
-SHALLOW_OBJECT_BUILTIN(6);
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h
index 5e82f18556..9b04eb378e 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.h
+++ b/deps/v8/src/builtins/builtins-constructor-gen.h
@@ -30,11 +30,7 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler {
AllocationSiteMode allocation_site_mode);
Node* EmitFastCloneShallowObject(Label* call_runtime, Node* closure,
- Node* literals_index,
- Node* properties_count);
-
- template <typename Descriptor>
- void CreateFastCloneShallowObjectBuiltin(int properties_count);
+ Node* literals_index);
Node* EmitFastNewObject(Node* context, Node* target, Node* new_target);
diff --git a/deps/v8/src/builtins/builtins-constructor.h b/deps/v8/src/builtins/builtins-constructor.h
index 59afc73853..e783e11f77 100644
--- a/deps/v8/src/builtins/builtins-constructor.h
+++ b/deps/v8/src/builtins/builtins-constructor.h
@@ -7,6 +7,7 @@
#include "src/contexts.h"
#include "src/objects.h"
+#include "src/objects/dictionary.h"
namespace v8 {
namespace internal {
@@ -22,18 +23,11 @@ class ConstructorBuiltins {
// backed by a double backing store will fit into new-space).
static const int kMaximumClonedShallowArrayElements =
JSArray::kInitialMaxFastElementArray * kPointerSize / kDoubleSize;
-
- // Maximum number of properties in copied objects.
- static const int kMaximumClonedShallowObjectProperties = 6;
- static int FastCloneShallowObjectPropertiesCount(int literal_length) {
- // This heuristic of setting empty literals to have
- // kInitialGlobalObjectUnusedPropertiesCount must remain in-sync with the
- // runtime.
- // TODO(verwaest): Unify this with the heuristic in the runtime.
- return literal_length == 0
- ? JSObject::kInitialGlobalObjectUnusedPropertiesCount
- : literal_length;
- }
+ // Maximum number of properties in copied object so that the properties store
+ // will fit into new-space. This constant is based on the assumption that
+ // NameDictionaries are 50% over-allocated.
+ static const int kMaximumClonedShallowObjectProperties =
+ NameDictionary::kMaxRegularCapacity / 3 * 2;
private:
static const int kMaximumSlots = 0x8000;
diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc
index 21d59346b5..5fe2cb03bd 100644
--- a/deps/v8/src/builtins/builtins-conversion-gen.cc
+++ b/deps/v8/src/builtins/builtins-conversion-gen.cc
@@ -133,35 +133,7 @@ TF_BUILTIN(ToString, CodeStubAssembler) {
Node* context = Parameter(Descriptor::kContext);
Node* input = Parameter(Descriptor::kArgument);
- Label is_number(this);
- Label runtime(this);
-
- GotoIf(TaggedIsSmi(input), &is_number);
-
- Node* input_map = LoadMap(input);
- Node* input_instance_type = LoadMapInstanceType(input_map);
-
- Label not_string(this);
- GotoIfNot(IsStringInstanceType(input_instance_type), &not_string);
- Return(input);
-
- Label not_heap_number(this);
-
- BIND(&not_string);
- { Branch(IsHeapNumberMap(input_map), &is_number, &not_heap_number); }
-
- BIND(&is_number);
- { Return(NumberToString(context, input)); }
-
- BIND(&not_heap_number);
- {
- GotoIf(Word32NotEqual(input_instance_type, Int32Constant(ODDBALL_TYPE)),
- &runtime);
- Return(LoadObjectField(input, Oddball::kToStringOffset));
- }
-
- BIND(&runtime);
- { Return(CallRuntime(Runtime::kToString, context, input)); }
+ Return(ToString(context, input));
}
// 7.1.1.1 OrdinaryToPrimitive ( O, hint )
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index c1c84cc243..bce8eebb0f 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -90,10 +90,12 @@ namespace internal {
/* ES6 section 7.3.13 Construct (F, [argumentsList], [newTarget]) */ \
ASM(Construct) \
ASM(ConstructWithSpread) \
+ ASM(ConstructForwardVarargs) \
+ ASM(ConstructFunctionForwardVarargs) \
ASM(JSConstructStubApi) \
- ASM(JSConstructStubGeneric) \
+ ASM(JSConstructStubGenericRestrictedReturn) \
+ ASM(JSConstructStubGenericUnrestrictedReturn) \
ASM(JSBuiltinsConstructStub) \
- ASM(JSBuiltinsConstructStubForDerived) \
TFC(FastNewClosure, FastNewClosure, 1) \
TFC(FastNewFunctionContextEval, FastNewFunctionContext, 1) \
TFC(FastNewFunctionContextFunction, FastNewFunctionContext, 1) \
@@ -103,13 +105,7 @@ namespace internal {
TFC(FastCloneRegExp, FastCloneRegExp, 1) \
TFC(FastCloneShallowArrayTrack, FastCloneShallowArray, 1) \
TFC(FastCloneShallowArrayDontTrack, FastCloneShallowArray, 1) \
- TFC(FastCloneShallowObject0, FastCloneShallowObject, 1) \
- TFC(FastCloneShallowObject1, FastCloneShallowObject, 1) \
- TFC(FastCloneShallowObject2, FastCloneShallowObject, 1) \
- TFC(FastCloneShallowObject3, FastCloneShallowObject, 1) \
- TFC(FastCloneShallowObject4, FastCloneShallowObject, 1) \
- TFC(FastCloneShallowObject5, FastCloneShallowObject, 1) \
- TFC(FastCloneShallowObject6, FastCloneShallowObject, 1) \
+ TFC(FastCloneShallowObject, FastCloneShallowObject, 1) \
\
/* Apply and entries */ \
ASM(Apply) \
@@ -242,6 +238,8 @@ namespace internal {
CPP(UnsupportedThrower) \
TFJ(ReturnReceiver, 0) \
\
+ TFS(DeleteProperty, kObject, kKey, kLanguageMode) \
+ \
/* Array */ \
ASM(ArrayCode) \
ASM(InternalArrayCode) \
@@ -249,16 +247,18 @@ namespace internal {
/* ES6 #sec-array.isarray */ \
TFJ(ArrayIsArray, 1, kArg) \
/* ES7 #sec-array.prototype.includes */ \
- TFJ(ArrayIncludes, 2, kSearchElement, kFromIndex) \
+ TFJ(ArrayIncludes, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.indexof */ \
- TFJ(ArrayIndexOf, 2, kSearchElement, kFromIndex) \
+ TFJ(ArrayIndexOf, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.pop */ \
CPP(ArrayPop) \
+ TFJ(FastArrayPop, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.push */ \
CPP(ArrayPush) \
TFJ(FastArrayPush, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.shift */ \
CPP(ArrayShift) \
+ TFJ(FastArrayShift, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.slice */ \
CPP(ArraySlice) \
/* ES6 #sec-array.prototype.splice */ \
@@ -266,33 +266,33 @@ namespace internal {
/* ES6 #sec-array.prototype.unshift */ \
CPP(ArrayUnshift) \
/* ES6 #sec-array.prototype.foreach */ \
- TFJ(ArrayForEachLoopContinuation, 7, kCallbackFn, kThisArg, kArray, kObject, \
- kInitialK, kLength, kTo) \
- TFJ(ArrayForEach, 2, kCallbackFn, kThisArg) \
+ TFS(ArrayForEachLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
+ kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayForEach, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.every */ \
- TFJ(ArrayEveryLoopContinuation, 7, kCallbackFn, kThisArg, kArray, kObject, \
- kInitialK, kLength, kTo) \
- TFJ(ArrayEvery, 2, kCallbackFn, kThisArg) \
+ TFS(ArrayEveryLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
+ kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayEvery, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.some */ \
- TFJ(ArraySomeLoopContinuation, 7, kCallbackFn, kThisArg, kArray, kObject, \
- kInitialK, kLength, kTo) \
- TFJ(ArraySome, 2, kCallbackFn, kThisArg) \
+ TFS(ArraySomeLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
+ kObject, kInitialK, kLength, kTo) \
+ TFJ(ArraySome, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.filter */ \
- TFJ(ArrayFilterLoopContinuation, 7, kCallbackFn, kThisArg, kArray, kObject, \
- kInitialK, kLength, kTo) \
- TFJ(ArrayFilter, 2, kCallbackFn, kThisArg) \
+ TFS(ArrayFilterLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
+ kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayFilter, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.foreach */ \
- TFJ(ArrayMapLoopContinuation, 7, kCallbackFn, kThisArg, kArray, kObject, \
- kInitialK, kLength, kTo) \
- TFJ(ArrayMap, 2, kCallbackFn, kThisArg) \
- /* ES6 #sec-array.prototype.reduce */ \
- TFJ(ArrayReduceLoopContinuation, 7, kCallbackFn, kThisArg, kAccumulator, \
+ TFS(ArrayMapLoopContinuation, kReceiver, kCallbackFn, kThisArg, kArray, \
kObject, kInitialK, kLength, kTo) \
- TFJ(ArrayReduce, 2, kCallbackFn, kInitialValue) \
+ TFJ(ArrayMap, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 #sec-array.prototype.reduce */ \
+ TFS(ArrayReduceLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
+ kAccumulator, kObject, kInitialK, kLength, kTo) \
+ TFJ(ArrayReduce, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.reduceRight */ \
- TFJ(ArrayReduceRightLoopContinuation, 7, kCallbackFn, kThisArg, \
+ TFS(ArrayReduceRightLoopContinuation, kReceiver, kCallbackFn, kThisArg, \
kAccumulator, kObject, kInitialK, kLength, kTo) \
- TFJ(ArrayReduceRight, 2, kCallbackFn, kInitialValue) \
+ TFJ(ArrayReduceRight, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-array.prototype.entries */ \
TFJ(ArrayPrototypeEntries, 0) \
/* ES6 #sec-array.prototype.keys */ \
@@ -344,6 +344,32 @@ namespace internal {
CPP(CallSitePrototypeIsToplevel) \
CPP(CallSitePrototypeToString) \
\
+ /* Console */ \
+ CPP(ConsoleDebug) \
+ CPP(ConsoleError) \
+ CPP(ConsoleInfo) \
+ CPP(ConsoleLog) \
+ CPP(ConsoleWarn) \
+ CPP(ConsoleDir) \
+ CPP(ConsoleDirXml) \
+ CPP(ConsoleTable) \
+ CPP(ConsoleTrace) \
+ CPP(ConsoleGroup) \
+ CPP(ConsoleGroupCollapsed) \
+ CPP(ConsoleGroupEnd) \
+ CPP(ConsoleClear) \
+ CPP(ConsoleCount) \
+ CPP(ConsoleAssert) \
+ TFJ(FastConsoleAssert, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ CPP(ConsoleMarkTimeline) \
+ CPP(ConsoleProfile) \
+ CPP(ConsoleProfileEnd) \
+ CPP(ConsoleTimeline) \
+ CPP(ConsoleTimelineEnd) \
+ CPP(ConsoleTime) \
+ CPP(ConsoleTimeEnd) \
+ CPP(ConsoleTimeStamp) \
+ \
/* DataView */ \
CPP(DataViewConstructor) \
CPP(DataViewConstructor_ConstructStub) \
@@ -464,6 +490,7 @@ namespace internal {
TFS(CreateIterResultObject, kValue, kDone) \
\
/* Generator and Async */ \
+ TFS(CreateGeneratorObject, kClosure, kReceiver) \
CPP(GeneratorFunctionConstructor) \
/* ES6 #sec-generator.prototype.next */ \
TFJ(GeneratorPrototypeNext, 1, kValue) \
@@ -652,7 +679,7 @@ namespace internal {
CPP(ObjectIsExtensible) \
CPP(ObjectIsFrozen) \
CPP(ObjectIsSealed) \
- CPP(ObjectKeys) \
+ TFJ(ObjectKeys, 1, kObject) \
CPP(ObjectLookupGetter) \
CPP(ObjectLookupSetter) \
CPP(ObjectPreventExtensions) \
@@ -777,11 +804,11 @@ namespace internal {
\
TFS(RegExpReplace, kRegExp, kString, kReplaceValue) \
/* ES #sec-regexp.prototype-@@replace */ \
- TFJ(RegExpPrototypeReplace, 2, kString, kReplaceValue) \
+ TFJ(RegExpPrototypeReplace, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
TFS(RegExpSplit, kRegExp, kString, kLimit) \
/* ES #sec-regexp.prototype-@@split */ \
- TFJ(RegExpPrototypeSplit, 2, kString, kLimit) \
+ TFJ(RegExpPrototypeSplit, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
/* SharedArrayBuffer */ \
CPP(SharedArrayBufferPrototypeGetByteLength) \
@@ -823,6 +850,8 @@ namespace internal {
CPP(StringPrototypeLocaleCompare) \
/* ES6 #sec-string.prototype.replace */ \
TFJ(StringPrototypeReplace, 2, kSearch, kReplace) \
+ /* ES6 #sec-string.prototype.slice */ \
+ TFJ(StringPrototypeSlice, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.prototype.split */ \
TFJ(StringPrototypeSplit, 2, kSeparator, kLimit) \
/* ES6 #sec-string.prototype.substr */ \
@@ -868,6 +897,9 @@ namespace internal {
TFJ(SymbolPrototypeValueOf, 0) \
\
/* TypedArray */ \
+ TFS(TypedArrayInitialize, kHolder, kLength, kElementSize, kInitialize) \
+ TFS(TypedArrayInitializeWithBuffer, kHolder, kLength, kBuffer, kElementSize, \
+ kByteOffset) \
/* ES6 #sec-typedarray-buffer-byteoffset-length */ \
TFJ(TypedArrayConstructByArrayBuffer, 5, kHolder, kBuffer, kByteOffset, \
kLength, kElementSize) \
@@ -875,8 +907,6 @@ namespace internal {
kElementSize) \
/* ES6 #sec-typedarray-length */ \
TFJ(TypedArrayConstructByLength, 3, kHolder, kLength, kElementSize) \
- TFJ(TypedArrayInitialize, 6, kHolder, kLength, kBuffer, kByteOffset, \
- kByteLength, kInitialize) \
CPP(TypedArrayPrototypeBuffer) \
/* ES6 #sec-get-%typedarray%.prototype.bytelength */ \
TFJ(TypedArrayPrototypeByteLength, 0) \
@@ -905,13 +935,19 @@ namespace internal {
/* ES6 #sec-%typedarray%.prototype.slice */ \
CPP(TypedArrayPrototypeSlice) \
/* ES6 %TypedArray%.prototype.every */ \
- TFJ(TypedArrayPrototypeEvery, 2, kCallbackFn, kThisArg) \
+ TFJ(TypedArrayPrototypeEvery, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 %TypedArray%.prototype.some */ \
- TFJ(TypedArrayPrototypeSome, 2, kCallbackFn, kThisArg) \
+ TFJ(TypedArrayPrototypeSome, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 %TypedArray%.prototype.reduce */ \
- TFJ(TypedArrayPrototypeReduce, 2, kCallbackFn, kInitialValue) \
+ TFJ(TypedArrayPrototypeReduce, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
/* ES6 %TypedArray%.prototype.reduceRight */ \
- TFJ(TypedArrayPrototypeReduceRight, 2, kCallbackFn, kInitialValue) \
+ TFJ(TypedArrayPrototypeReduceRight, \
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
+ /* ES6 %TypedArray%.prototype.map */ \
+ TFJ(TypedArrayPrototypeMap, SharedFunctionInfo::kDontAdaptArgumentsSentinel) \
\
/* Wasm */ \
ASM(WasmCompileLazy) \
@@ -951,12 +987,6 @@ namespace internal {
TFJ(AsyncGeneratorAwaitResolveClosure, 1, kValue) \
TFJ(AsyncGeneratorAwaitRejectClosure, 1, kValue) \
\
- /* GeneratorYield (proposal-async-iteration/#sec-generatoryield) with */ \
- /* resume behaviour specific to Async Generators. Internal / not exposed */ \
- /* to JS code. */ \
- TFJ(AsyncGeneratorYield, 1, kValue) \
- TFJ(AsyncGeneratorRawYield, 1, kValue) \
- \
/* Async-from-Sync Iterator */ \
\
/* %AsyncFromSyncIteratorPrototype% */ \
@@ -970,23 +1000,23 @@ namespace internal {
/* #sec-async-iterator-value-unwrap-functions */ \
TFJ(AsyncIteratorValueUnwrap, 1, kValue)
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, ASM, DBG) \
BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM, DBG) \
\
/* ES #sec-string.prototype.tolowercase */ \
- CPP(StringPrototypeToLowerCaseI18N) \
+ TFJ(StringPrototypeToLowerCaseIntl, 0) \
/* ES #sec-string.prototype.touppercase */ \
- CPP(StringPrototypeToUpperCaseI18N) \
+ CPP(StringPrototypeToUpperCaseIntl) \
/* ES #sec-string.prototype.normalize */ \
- CPP(StringPrototypeNormalizeI18N)
+ CPP(StringPrototypeNormalizeIntl)
#else
#define BUILTIN_LIST(CPP, API, TFJ, TFC, TFS, TFH, ASM, DBG) \
BUILTIN_LIST_BASE(CPP, API, TFJ, TFC, TFS, TFH, ASM, DBG) \
\
- /* (obsolete) Unibrow version */ \
+ /* no-op fallback version */ \
CPP(StringPrototypeNormalize)
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
#define BUILTIN_PROMISE_REJECTION_PREDICTION_LIST(V) \
V(AsyncFromSyncIteratorPrototypeNext) \
diff --git a/deps/v8/src/builtins/builtins-forin-gen.cc b/deps/v8/src/builtins/builtins-forin-gen.cc
index 289a5363a1..476d3766dc 100644
--- a/deps/v8/src/builtins/builtins-forin-gen.cc
+++ b/deps/v8/src/builtins/builtins-forin-gen.cc
@@ -52,7 +52,7 @@ std::tuple<Node*, Node*, Node*> ForInBuiltinsAssembler::EmitForInPrepare(
GotoIf(WordEqual(enum_length, SmiConstant(0)), nothing_to_iterate);
Node* descriptors = LoadMapDescriptors(map);
Node* cache_offset =
- LoadObjectField(descriptors, DescriptorArray::kEnumCacheOffset);
+ LoadObjectField(descriptors, DescriptorArray::kEnumCacheBridgeOffset);
Node* enum_cache = LoadObjectField(
cache_offset, DescriptorArray::kEnumCacheBridgeCacheOffset);
diff --git a/deps/v8/src/builtins/builtins-function.cc b/deps/v8/src/builtins/builtins-function.cc
index d93c2ee80b..7db1899b64 100644
--- a/deps/v8/src/builtins/builtins-function.cc
+++ b/deps/v8/src/builtins/builtins-function.cc
@@ -312,7 +312,8 @@ BUILTIN(FunctionPrototypeToString) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotGeneric,
isolate->factory()->NewStringFromAsciiChecked(
- "Function.prototype.toString")));
+ "Function.prototype.toString"),
+ isolate->factory()->Function_string()));
}
} // namespace internal
diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc
index e6b377f35f..8f5ab699ac 100644
--- a/deps/v8/src/builtins/builtins-handler-gen.cc
+++ b/deps/v8/src/builtins/builtins-handler-gen.cc
@@ -129,21 +129,8 @@ TF_BUILTIN(LoadIC_FunctionPrototype, CodeStubAssembler) {
Node* vector = Parameter(Descriptor::kVector);
Node* context = Parameter(Descriptor::kContext);
- Label miss(this);
-
- Node* proto_or_map =
- LoadObjectField(receiver, JSFunction::kPrototypeOrInitialMapOffset);
- GotoIf(IsTheHole(proto_or_map), &miss);
-
- VARIABLE(var_result, MachineRepresentation::kTagged, proto_or_map);
- Label done(this, &var_result);
- GotoIfNot(IsMap(proto_or_map), &done);
-
- var_result.Bind(LoadMapPrototype(proto_or_map));
- Goto(&done);
-
- BIND(&done);
- Return(var_result.value());
+ Label miss(this, Label::kDeferred);
+ Return(LoadJSFunctionPrototype(receiver, &miss));
BIND(&miss);
TailCallRuntime(Runtime::kLoadIC_Miss, context, receiver, name, slot, vector);
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index d1d000ce9e..abd961998c 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -136,8 +136,7 @@ TF_BUILTIN(NewUnmappedArgumentsElements, CodeStubAssembler) {
// Load the parameter at the given {index}.
Node* value = Load(MachineType::AnyTagged(), frame,
- WordShl(IntPtrSub(offset, index),
- IntPtrConstant(kPointerSizeLog2)));
+ TimesPointerSize(IntPtrSub(offset, index)));
// Store the {value} into the {result}.
StoreFixedArrayElement(result, index, value, SKIP_WRITE_BARRIER);
@@ -164,5 +163,127 @@ TF_BUILTIN(ReturnReceiver, CodeStubAssembler) {
Return(Parameter(Descriptor::kReceiver));
}
+class DeletePropertyBaseAssembler : public CodeStubAssembler {
+ public:
+ explicit DeletePropertyBaseAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ void DeleteDictionaryProperty(Node* receiver, Node* properties, Node* name,
+ Node* context, Label* dont_delete,
+ Label* notfound) {
+ VARIABLE(var_name_index, MachineType::PointerRepresentation());
+ Label dictionary_found(this, &var_name_index);
+ NameDictionaryLookup<NameDictionary>(properties, name, &dictionary_found,
+ &var_name_index, notfound);
+
+ BIND(&dictionary_found);
+ Node* key_index = var_name_index.value();
+ Node* details =
+ LoadDetailsByKeyIndex<NameDictionary>(properties, key_index);
+ GotoIf(IsSetWord32(details, PropertyDetails::kAttributesDontDeleteMask),
+ dont_delete);
+ // Overwrite the entry itself (see NameDictionary::SetEntry).
+ Node* filler = TheHoleConstant();
+ DCHECK(Heap::RootIsImmortalImmovable(Heap::kTheHoleValueRootIndex));
+ StoreFixedArrayElement(properties, key_index, filler, SKIP_WRITE_BARRIER);
+ StoreValueByKeyIndex<NameDictionary>(properties, key_index, filler,
+ SKIP_WRITE_BARRIER);
+ StoreDetailsByKeyIndex<NameDictionary>(properties, key_index,
+ SmiConstant(Smi::kZero));
+
+ // Update bookkeeping information (see NameDictionary::ElementRemoved).
+ Node* nof = GetNumberOfElements<NameDictionary>(properties);
+ Node* new_nof = SmiSub(nof, SmiConstant(1));
+ SetNumberOfElements<NameDictionary>(properties, new_nof);
+ Node* num_deleted = GetNumberOfDeletedElements<NameDictionary>(properties);
+ Node* new_deleted = SmiAdd(num_deleted, SmiConstant(1));
+ SetNumberOfDeletedElements<NameDictionary>(properties, new_deleted);
+
+ // Shrink the dictionary if necessary (see NameDictionary::Shrink).
+ Label shrinking_done(this);
+ Node* capacity = GetCapacity<NameDictionary>(properties);
+ GotoIf(SmiGreaterThan(new_nof, SmiShr(capacity, 2)), &shrinking_done);
+ GotoIf(SmiLessThan(new_nof, SmiConstant(16)), &shrinking_done);
+ CallRuntime(Runtime::kShrinkPropertyDictionary, context, receiver, name);
+ Goto(&shrinking_done);
+ BIND(&shrinking_done);
+
+ Return(TrueConstant());
+ }
+};
+
+TF_BUILTIN(DeleteProperty, DeletePropertyBaseAssembler) {
+ Node* receiver = Parameter(Descriptor::kObject);
+ Node* key = Parameter(Descriptor::kKey);
+ Node* language_mode = Parameter(Descriptor::kLanguageMode);
+ Node* context = Parameter(Descriptor::kContext);
+
+ VARIABLE(var_index, MachineType::PointerRepresentation());
+ VARIABLE(var_unique, MachineRepresentation::kTagged, key);
+ Label if_index(this), if_unique_name(this), if_notunique(this),
+ if_notfound(this), slow(this);
+
+ GotoIf(TaggedIsSmi(receiver), &slow);
+ Node* receiver_map = LoadMap(receiver);
+ Node* instance_type = LoadMapInstanceType(receiver_map);
+ GotoIf(Int32LessThanOrEqual(instance_type,
+ Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
+ &slow);
+ TryToName(key, &if_index, &var_index, &if_unique_name, &var_unique, &slow,
+ &if_notunique);
+
+ BIND(&if_index);
+ {
+ Comment("integer index");
+ Goto(&slow); // TODO(jkummerow): Implement more smarts here.
+ }
+
+ BIND(&if_unique_name);
+ {
+ Comment("key is unique name");
+ Node* unique = var_unique.value();
+ CheckForAssociatedProtector(unique, &slow);
+
+ Label dictionary(this), dont_delete(this);
+ Node* properties = LoadProperties(receiver);
+ Node* properties_map = LoadMap(properties);
+ GotoIf(WordEqual(properties_map, LoadRoot(Heap::kHashTableMapRootIndex)),
+ &dictionary);
+ // Fast properties need to clear recorded slots, which can only be done
+ // in C++.
+ Goto(&slow);
+
+ BIND(&dictionary);
+ {
+ DeleteDictionaryProperty(receiver, properties, unique, context,
+ &dont_delete, &if_notfound);
+ }
+
+ BIND(&dont_delete);
+ {
+ STATIC_ASSERT(LANGUAGE_END == 2);
+ GotoIf(SmiNotEqual(language_mode, SmiConstant(SLOPPY)), &slow);
+ Return(FalseConstant());
+ }
+ }
+
+ BIND(&if_notunique);
+ {
+ // If the string was not found in the string table, then no object can
+ // have a property with that name.
+ TryInternalizeString(key, &if_index, &var_index, &if_unique_name,
+ &var_unique, &if_notfound, &slow);
+ }
+
+ BIND(&if_notfound);
+ Return(TrueConstant());
+
+ BIND(&slow);
+ {
+ TailCallRuntime(Runtime::kDeleteProperty, context, receiver, key,
+ language_mode);
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-intl-gen.cc b/deps/v8/src/builtins/builtins-intl-gen.cc
new file mode 100644
index 0000000000..3782d43a9a
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-intl-gen.cc
@@ -0,0 +1,124 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#include "src/builtins/builtins-utils-gen.h"
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class IntlBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit IntlBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+};
+
+TF_BUILTIN(StringPrototypeToLowerCaseIntl, IntlBuiltinsAssembler) {
+ Node* const maybe_string = Parameter(Descriptor::kReceiver);
+ Node* const context = Parameter(Descriptor::kContext);
+
+ Node* const string =
+ ToThisString(context, maybe_string, "String.prototype.toLowerCase");
+
+ Label call_c(this), return_string(this), runtime(this, Label::kDeferred);
+
+ // Early exit on empty strings.
+ Node* const length = SmiUntag(LoadStringLength(string));
+ GotoIf(IntPtrEqual(length, IntPtrConstant(0)), &return_string);
+
+ // Unpack strings if possible, and bail to runtime unless we get a one-byte
+ // flat string.
+ ToDirectStringAssembler to_direct(
+ state(), string, ToDirectStringAssembler::kDontUnpackSlicedStrings);
+ to_direct.TryToDirect(&runtime);
+
+ Node* const instance_type = to_direct.instance_type();
+ CSA_ASSERT(this,
+ Word32BinaryNot(IsIndirectStringInstanceType(instance_type)));
+ GotoIfNot(IsOneByteStringInstanceType(instance_type), &runtime);
+
+ // For short strings, do the conversion in CSA through the lookup table.
+
+ Node* const dst = AllocateSeqOneByteString(context, length);
+
+ const int kMaxShortStringLength = 24; // Determined empirically.
+ GotoIf(IntPtrGreaterThan(length, IntPtrConstant(kMaxShortStringLength)),
+ &call_c);
+
+ {
+ Node* const dst_ptr = PointerToSeqStringData(dst);
+ VARIABLE(var_cursor, MachineType::PointerRepresentation(),
+ IntPtrConstant(0));
+
+ Node* const start_address = to_direct.PointerToData(&call_c);
+ Node* const end_address = IntPtrAdd(start_address, length);
+
+ Node* const to_lower_table_addr = ExternalConstant(
+ ExternalReference::intl_to_latin1_lower_table(isolate()));
+
+ VARIABLE(var_did_change, MachineRepresentation::kWord32, Int32Constant(0));
+
+ VariableList push_vars({&var_cursor, &var_did_change}, zone());
+ BuildFastLoop(
+ push_vars, start_address, end_address,
+ [=, &var_cursor, &var_did_change](Node* current) {
+ Node* c = Load(MachineType::Uint8(), current);
+ Node* lower = Load(MachineType::Uint8(), to_lower_table_addr,
+ ChangeInt32ToIntPtr(c));
+ StoreNoWriteBarrier(MachineRepresentation::kWord8, dst_ptr,
+ var_cursor.value(), lower);
+
+ var_did_change.Bind(
+ Word32Or(Word32NotEqual(c, lower), var_did_change.value()));
+
+ Increment(var_cursor);
+ },
+ kCharSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+
+ // Return the original string if it remained unchanged in order to preserve
+ // e.g. internalization and private symbols (such as the preserved object
+ // hash) on the source string.
+ GotoIfNot(var_did_change.value(), &return_string);
+
+ Return(dst);
+ }
+
+ // Call into C for case conversion. The signature is:
+ // Object* ConvertOneByteToLower(String* src, String* dst, Isolate* isolate);
+ BIND(&call_c);
+ {
+ Node* const src = to_direct.string();
+
+ Node* const function_addr = ExternalConstant(
+ ExternalReference::intl_convert_one_byte_to_lower(isolate()));
+ Node* const isolate_ptr =
+ ExternalConstant(ExternalReference::isolate_address(isolate()));
+
+ MachineType type_ptr = MachineType::Pointer();
+ MachineType type_tagged = MachineType::AnyTagged();
+
+ Node* const result =
+ CallCFunction3(type_tagged, type_tagged, type_tagged, type_ptr,
+ function_addr, src, dst, isolate_ptr);
+
+ Return(result);
+ }
+
+ BIND(&return_string);
+ Return(string);
+
+ BIND(&runtime);
+ {
+ Node* const result =
+ CallRuntime(Runtime::kStringToLowerCaseIntl, context, string);
+ Return(result);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index 157c42bcff..c14d73b3b6 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -2,11 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifdef V8_I18N_SUPPORT
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
-#include "src/i18n.h"
+#include "src/intl.h"
#include "src/objects-inl.h"
#include "unicode/normalizer2.h"
@@ -14,21 +16,14 @@
namespace v8 {
namespace internal {
-BUILTIN(StringPrototypeToLowerCaseI18N) {
- HandleScope scope(isolate);
- TO_THIS_STRING(string, "String.prototype.toLowerCase");
- string = String::Flatten(string);
- return ConvertCase(string, false, isolate);
-}
-
-BUILTIN(StringPrototypeToUpperCaseI18N) {
+BUILTIN(StringPrototypeToUpperCaseIntl) {
HandleScope scope(isolate);
TO_THIS_STRING(string, "String.prototype.toUpperCase");
string = String::Flatten(string);
return ConvertCase(string, true, isolate);
}
-BUILTIN(StringPrototypeNormalizeI18N) {
+BUILTIN(StringPrototypeNormalizeIntl) {
HandleScope handle_scope(isolate);
TO_THIS_STRING(string, "String.prototype.normalize");
@@ -104,5 +99,3 @@ BUILTIN(StringPrototypeNormalizeI18N) {
} // namespace internal
} // namespace v8
-
-#endif // V8_I18N_SUPPORT
diff --git a/deps/v8/src/builtins/builtins-number.cc b/deps/v8/src/builtins/builtins-number.cc
index 25a1bad240..346bafa1ae 100644
--- a/deps/v8/src/builtins/builtins-number.cc
+++ b/deps/v8/src/builtins/builtins-number.cc
@@ -29,7 +29,8 @@ BUILTIN(NumberPrototypeToExponential) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotGeneric,
isolate->factory()->NewStringFromAsciiChecked(
- "Number.prototype.toExponential")));
+ "Number.prototype.toExponential"),
+ isolate->factory()->Number_string()));
}
double const value_number = value->Number();
@@ -72,7 +73,8 @@ BUILTIN(NumberPrototypeToFixed) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotGeneric,
isolate->factory()->NewStringFromAsciiChecked(
- "Number.prototype.toFixed")));
+ "Number.prototype.toFixed"),
+ isolate->factory()->Number_string()));
}
double const value_number = value->Number();
@@ -114,7 +116,8 @@ BUILTIN(NumberPrototypeToLocaleString) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotGeneric,
isolate->factory()->NewStringFromAsciiChecked(
- "Number.prototype.toLocaleString")));
+ "Number.prototype.toLocaleString"),
+ isolate->factory()->Number_string()));
}
// Turn the {value} into a String.
@@ -135,7 +138,8 @@ BUILTIN(NumberPrototypeToPrecision) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotGeneric,
isolate->factory()->NewStringFromAsciiChecked(
- "Number.prototype.toPrecision")));
+ "Number.prototype.toPrecision"),
+ isolate->factory()->Number_string()));
}
double const value_number = value->Number();
@@ -179,7 +183,8 @@ BUILTIN(NumberPrototypeToString) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotGeneric,
isolate->factory()->NewStringFromAsciiChecked(
- "Number.prototype.toString")));
+ "Number.prototype.toString"),
+ isolate->factory()->Number_string()));
}
double const value_number = value->Number();
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index be83f5d1ef..6173bb79ab 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -54,11 +54,13 @@ TF_BUILTIN(ObjectHasOwnProperty, ObjectBuiltinsAssembler) {
Node* key = Parameter(Descriptor::kKey);
Node* context = Parameter(Descriptor::kContext);
- Label call_runtime(this), return_true(this), return_false(this);
+ Label call_runtime(this), return_true(this), return_false(this),
+ to_primitive(this);
- // Smi receivers do not have own properties.
+ // Smi receivers do not have own properties, just perform ToPrimitive on the
+ // key.
Label if_objectisnotsmi(this);
- Branch(TaggedIsSmi(object), &return_false, &if_objectisnotsmi);
+ Branch(TaggedIsSmi(object), &to_primitive, &if_objectisnotsmi);
BIND(&if_objectisnotsmi);
Node* map = LoadMap(object);
@@ -68,20 +70,44 @@ TF_BUILTIN(ObjectHasOwnProperty, ObjectBuiltinsAssembler) {
VARIABLE(var_index, MachineType::PointerRepresentation());
VARIABLE(var_unique, MachineRepresentation::kTagged);
- Label keyisindex(this), if_iskeyunique(this);
- TryToName(key, &keyisindex, &var_index, &if_iskeyunique, &var_unique,
- &call_runtime);
+ Label if_index(this), if_unique_name(this), if_notunique_name(this);
+ TryToName(key, &if_index, &var_index, &if_unique_name, &var_unique,
+ &call_runtime, &if_notunique_name);
- BIND(&if_iskeyunique);
+ BIND(&if_unique_name);
TryHasOwnProperty(object, map, instance_type, var_unique.value(),
&return_true, &return_false, &call_runtime);
- BIND(&keyisindex);
- // Handle negative keys in the runtime.
- GotoIf(IntPtrLessThan(var_index.value(), IntPtrConstant(0)), &call_runtime);
- TryLookupElement(object, map, instance_type, var_index.value(),
- &return_true, &return_false, &return_false, &call_runtime);
+ BIND(&if_index);
+ {
+ // Handle negative keys in the runtime.
+ GotoIf(IntPtrLessThan(var_index.value(), IntPtrConstant(0)),
+ &call_runtime);
+ TryLookupElement(object, map, instance_type, var_index.value(),
+ &return_true, &return_false, &return_false,
+ &call_runtime);
+ }
+
+ BIND(&if_notunique_name);
+ {
+ Label not_in_string_table(this);
+ TryInternalizeString(key, &if_index, &var_index, &if_unique_name,
+ &var_unique, &not_in_string_table, &call_runtime);
+
+ BIND(&not_in_string_table);
+ {
+ // If the string was not found in the string table, then no regular
+ // object can have a property with that name, so return |false|.
+ // "Special API objects" with interceptors must take the slow path.
+ Branch(IsSpecialReceiverInstanceType(instance_type), &call_runtime,
+ &return_false);
+ }
+ }
}
+ BIND(&to_primitive);
+ GotoIf(IsNumber(key), &return_false);
+ Branch(IsName(key), &return_false, &call_runtime);
+
BIND(&return_true);
Return(BooleanConstant(true));
@@ -92,6 +118,88 @@ TF_BUILTIN(ObjectHasOwnProperty, ObjectBuiltinsAssembler) {
Return(CallRuntime(Runtime::kObjectHasOwnProperty, context, object, key));
}
+// ES #sec-object.keys
+TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) {
+ Node* object = Parameter(Descriptor::kObject);
+ Node* context = Parameter(Descriptor::kContext);
+
+ VARIABLE(var_length, MachineRepresentation::kTagged);
+ VARIABLE(var_elements, MachineRepresentation::kTagged);
+ Label if_empty(this, Label::kDeferred), if_fast(this),
+ if_slow(this, Label::kDeferred), if_join(this);
+
+ // Check if the {object} has a usable enum cache.
+ GotoIf(TaggedIsSmi(object), &if_slow);
+ Node* object_map = LoadMap(object);
+ Node* object_bit_field3 = LoadMapBitField3(object_map);
+ Node* object_enum_length =
+ DecodeWordFromWord32<Map::EnumLengthBits>(object_bit_field3);
+ GotoIf(
+ WordEqual(object_enum_length, IntPtrConstant(kInvalidEnumCacheSentinel)),
+ &if_slow);
+
+ // Ensure that the {object} doesn't have any elements.
+ CSA_ASSERT(this, IsJSObjectMap(object_map));
+ Node* object_elements = LoadObjectField(object, JSObject::kElementsOffset);
+ GotoIfNot(IsEmptyFixedArray(object_elements), &if_slow);
+ Branch(WordEqual(object_enum_length, IntPtrConstant(0)), &if_empty, &if_fast);
+
+ BIND(&if_fast);
+ {
+ // The {object} has a usable enum cache, use that.
+ Node* object_descriptors = LoadMapDescriptors(object_map);
+ Node* object_enum_cache_bridge = LoadObjectField(
+ object_descriptors, DescriptorArray::kEnumCacheBridgeOffset);
+ Node* object_enum_cache = LoadObjectField(
+ object_enum_cache_bridge, DescriptorArray::kEnumCacheBridgeCacheOffset);
+
+ // Allocate a JSArray and copy the elements from the {object_enum_cache}.
+ Node* array = nullptr;
+ Node* elements = nullptr;
+ Node* native_context = LoadNativeContext(context);
+ Node* array_map = LoadJSArrayElementsMap(FAST_ELEMENTS, native_context);
+ Node* array_length = SmiTag(object_enum_length);
+ std::tie(array, elements) = AllocateUninitializedJSArrayWithElements(
+ FAST_ELEMENTS, array_map, array_length, nullptr, object_enum_length,
+ INTPTR_PARAMETERS);
+ StoreMapNoWriteBarrier(elements, Heap::kFixedArrayMapRootIndex);
+ StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset,
+ array_length);
+ CopyFixedArrayElements(FAST_ELEMENTS, object_enum_cache, elements,
+ object_enum_length, SKIP_WRITE_BARRIER);
+ Return(array);
+ }
+
+ BIND(&if_empty);
+ {
+ // The {object} doesn't have any enumerable keys.
+ var_length.Bind(SmiConstant(0));
+ var_elements.Bind(EmptyFixedArrayConstant());
+ Goto(&if_join);
+ }
+
+ BIND(&if_slow);
+ {
+ // Let the runtime compute the elements.
+ Node* elements = CallRuntime(Runtime::kObjectKeys, context, object);
+ var_length.Bind(LoadObjectField(elements, FixedArray::kLengthOffset));
+ var_elements.Bind(elements);
+ Goto(&if_join);
+ }
+
+ BIND(&if_join);
+ {
+ // Wrap the elements into a proper JSArray and return that.
+ Node* native_context = LoadNativeContext(context);
+ Node* array_map = LoadJSArrayElementsMap(FAST_ELEMENTS, native_context);
+ Node* array = AllocateUninitializedJSArrayWithoutElements(
+ FAST_ELEMENTS, array_map, var_length.value(), nullptr);
+ StoreObjectFieldNoWriteBarrier(array, JSArray::kElementsOffset,
+ var_elements.value());
+ Return(array);
+ }
+}
+
// ES6 #sec-object.prototype.tostring
TF_BUILTIN(ObjectProtoToString, ObjectBuiltinsAssembler) {
Label return_undefined(this, Label::kDeferred),
@@ -400,5 +508,51 @@ TF_BUILTIN(GetSuperConstructor, ObjectBuiltinsAssembler) {
Return(GetSuperConstructor(object, context));
}
+TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
+ Node* closure = Parameter(Descriptor::kClosure);
+ Node* receiver = Parameter(Descriptor::kReceiver);
+ Node* context = Parameter(Descriptor::kContext);
+
+ // Get the initial map from the function, jumping to the runtime if we don't
+ // have one.
+ Node* maybe_map =
+ LoadObjectField(closure, JSFunction::kPrototypeOrInitialMapOffset);
+ Label runtime(this);
+ GotoIf(DoesntHaveInstanceType(maybe_map, MAP_TYPE), &runtime);
+
+ Node* shared =
+ LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset);
+ Node* bytecode_array =
+ LoadObjectField(shared, SharedFunctionInfo::kFunctionDataOffset);
+ Node* frame_size = ChangeInt32ToIntPtr(LoadObjectField(
+ bytecode_array, BytecodeArray::kFrameSizeOffset, MachineType::Int32()));
+ Node* size = WordSar(frame_size, IntPtrConstant(kPointerSizeLog2));
+ Node* register_file = AllocateFixedArray(FAST_HOLEY_ELEMENTS, size);
+ FillFixedArrayWithValue(FAST_HOLEY_ELEMENTS, register_file, IntPtrConstant(0),
+ size, Heap::kUndefinedValueRootIndex);
+
+ Node* const result = AllocateJSObjectFromMap(maybe_map);
+
+ StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kFunctionOffset,
+ closure);
+ StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kContextOffset,
+ context);
+ StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kReceiverOffset,
+ receiver);
+ StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kRegisterFileOffset,
+ register_file);
+ Node* executing = SmiConstant(JSGeneratorObject::kGeneratorExecuting);
+ StoreObjectFieldNoWriteBarrier(result, JSGeneratorObject::kContinuationOffset,
+ executing);
+ HandleSlackTracking(context, result, maybe_map, JSGeneratorObject::kSize);
+ Return(result);
+
+ BIND(&runtime);
+ {
+ Return(CallRuntime(Runtime::kCreateJSGeneratorObject, context, closure,
+ receiver));
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc
index 8be615013c..95d2149f31 100644
--- a/deps/v8/src/builtins/builtins-object.cc
+++ b/deps/v8/src/builtins/builtins-object.cc
@@ -86,8 +86,11 @@ Object* ObjectDefineAccessor(Isolate* isolate, Handle<Object> object,
Handle<Object> name, Handle<Object> accessor) {
// 1. Let O be ? ToObject(this value).
Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ConvertReceiver(isolate, object));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, receiver,
+ FLAG_harmony_strict_legacy_accessor_builtins
+ ? Object::ToObject(isolate, object)
+ : Object::ConvertReceiver(isolate, object));
// 2. If IsCallable(getter) is false, throw a TypeError exception.
if (!accessor->IsCallable()) {
MessageTemplate::Template message =
@@ -114,7 +117,9 @@ Object* ObjectDefineAccessor(Isolate* isolate, Handle<Object> object,
// To preserve legacy behavior, we ignore errors silently rather than
// throwing an exception.
Maybe<bool> success = JSReceiver::DefineOwnProperty(
- isolate, receiver, name, &desc, Object::DONT_THROW);
+ isolate, receiver, name, &desc,
+ FLAG_harmony_strict_legacy_accessor_builtins ? Object::THROW_ON_ERROR
+ : Object::DONT_THROW);
MAYBE_RETURN(success, isolate->heap()->exception());
if (!success.FromJust()) {
isolate->CountUsage(v8::Isolate::kDefineGetterOrSetterWouldThrow);
@@ -125,8 +130,11 @@ Object* ObjectDefineAccessor(Isolate* isolate, Handle<Object> object,
Object* ObjectLookupAccessor(Isolate* isolate, Handle<Object> object,
Handle<Object> key, AccessorComponent component) {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, object,
- Object::ConvertReceiver(isolate, object));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, object,
+ FLAG_harmony_strict_legacy_accessor_builtins
+ ? Object::ToObject(isolate, object)
+ : Object::ConvertReceiver(isolate, object));
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, key,
Object::ToPropertyKey(isolate, key));
bool success = false;
@@ -429,41 +437,6 @@ BUILTIN(ObjectIsSealed) {
return isolate->heap()->ToBoolean(result.FromJust());
}
-// ES6 section 19.1.2.14 Object.keys ( O )
-BUILTIN(ObjectKeys) {
- HandleScope scope(isolate);
- Handle<Object> object = args.atOrUndefined(isolate, 1);
- Handle<JSReceiver> receiver;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
- Object::ToObject(isolate, object));
-
- Handle<FixedArray> keys;
- int enum_length = receiver->map()->EnumLength();
- if (enum_length != kInvalidEnumCacheSentinel &&
- JSObject::cast(*receiver)->elements() ==
- isolate->heap()->empty_fixed_array()) {
- DCHECK(receiver->IsJSObject());
- DCHECK(!JSObject::cast(*receiver)->HasNamedInterceptor());
- DCHECK(!JSObject::cast(*receiver)->IsAccessCheckNeeded());
- DCHECK(!receiver->map()->has_hidden_prototype());
- DCHECK(JSObject::cast(*receiver)->HasFastProperties());
- if (enum_length == 0) {
- keys = isolate->factory()->empty_fixed_array();
- } else {
- Handle<FixedArray> cache(
- receiver->map()->instance_descriptors()->GetEnumCache());
- keys = isolate->factory()->CopyFixedArrayUpTo(cache, enum_length);
- }
- } else {
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, keys,
- KeyAccumulator::GetKeys(receiver, KeyCollectionMode::kOwnOnly,
- ENUMERABLE_STRINGS,
- GetKeysConversion::kConvertToString));
- }
- return *isolate->factory()->NewJSArrayWithElements(keys, FAST_ELEMENTS);
-}
-
BUILTIN(ObjectValues) {
HandleScope scope(isolate);
Handle<Object> object = args.atOrUndefined(isolate, 1);
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index f1169c0b94..04a35bd000 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -231,11 +231,11 @@ void RegExpBuiltinsAssembler::GetStringPointers(
var_string_end->Bind(IntPtrAdd(string_data, to_offset));
}
-Node* RegExpBuiltinsAssembler::IrregexpExec(Node* const context,
- Node* const regexp,
- Node* const string,
- Node* const last_index,
- Node* const match_info) {
+Node* RegExpBuiltinsAssembler::RegExpExecInternal(Node* const context,
+ Node* const regexp,
+ Node* const string,
+ Node* const last_index,
+ Node* const match_info) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
// at compilation.
@@ -260,12 +260,14 @@ Node* RegExpBuiltinsAssembler::IrregexpExec(Node* const context,
Label out(this), runtime(this, Label::kDeferred);
// External constants.
+ Node* const isolate_address =
+ ExternalConstant(ExternalReference::isolate_address(isolate()));
+ Node* const regexp_stack_memory_address_address = ExternalConstant(
+ ExternalReference::address_of_regexp_stack_memory_address(isolate()));
Node* const regexp_stack_memory_size_address = ExternalConstant(
ExternalReference::address_of_regexp_stack_memory_size(isolate()));
Node* const static_offsets_vector_address = ExternalConstant(
ExternalReference::address_of_static_offsets_vector(isolate()));
- Node* const pending_exception_address = ExternalConstant(
- ExternalReference(Isolate::kPendingExceptionAddress, isolate()));
// Ensure that a RegExp stack is allocated.
{
@@ -304,9 +306,16 @@ Node* RegExpBuiltinsAssembler::IrregexpExec(Node* const context,
Node* const smi_string_length = LoadStringLength(string);
- // Bail out to runtime for invalid {last_index} values.
- GotoIfNot(TaggedIsSmi(last_index), &runtime);
- GotoIf(SmiAboveOrEqual(last_index, smi_string_length), &runtime);
+ // At this point, last_index is definitely a canonicalized non-negative
+ // number, which implies that any non-Smi last_index is greater than
+ // the maximal string length. If lastIndex > string.length then the matcher
+ // must fail.
+
+ Label if_failure(this);
+ CSA_ASSERT(this, IsNumberNormalized(last_index));
+ CSA_ASSERT(this, IsNumberPositive(last_index));
+ GotoIfNot(TaggedIsSmi(last_index), &if_failure); // Outside Smi range.
+ GotoIf(SmiGreaterThan(last_index, smi_string_length), &if_failure);
// Load the irregexp code object and offsets into the subject string. Both
// depend on whether the string is one- or two-byte.
@@ -356,27 +365,89 @@ Node* RegExpBuiltinsAssembler::IrregexpExec(Node* const context,
GotoIf(TaggedIsSmi(code), &runtime);
CSA_ASSERT(this, HasInstanceType(code, CODE_TYPE));
- Label if_success(this), if_failure(this),
- if_exception(this, Label::kDeferred);
+ Label if_success(this), if_exception(this, Label::kDeferred);
{
IncrementCounter(isolate()->counters()->regexp_entry_native(), 1);
- Callable exec_callable = CodeFactory::RegExpExec(isolate());
- Node* const result = CallStub(
- exec_callable, context, string, TruncateWordToWord32(int_last_index),
- var_string_start.value(), var_string_end.value(), code);
+ // Set up args for the final call into generated Irregexp code.
+
+ MachineType type_int32 = MachineType::Int32();
+ MachineType type_tagged = MachineType::AnyTagged();
+ MachineType type_ptr = MachineType::Pointer();
+
+ // Result: A NativeRegExpMacroAssembler::Result return code.
+ MachineType retval_type = type_int32;
+
+ // Argument 0: Original subject string.
+ MachineType arg0_type = type_tagged;
+ Node* const arg0 = string;
+
+ // Argument 1: Previous index.
+ MachineType arg1_type = type_int32;
+ Node* const arg1 = TruncateWordToWord32(int_last_index);
+
+ // Argument 2: Start of string data.
+ MachineType arg2_type = type_ptr;
+ Node* const arg2 = var_string_start.value();
+
+ // Argument 3: End of string data.
+ MachineType arg3_type = type_ptr;
+ Node* const arg3 = var_string_end.value();
+
+ // Argument 4: static offsets vector buffer.
+ MachineType arg4_type = type_ptr;
+ Node* const arg4 = static_offsets_vector_address;
+
+ // Argument 5: Set the number of capture registers to zero to force global
+ // regexps to behave as non-global. This does not affect non-global
+ // regexps.
+ MachineType arg5_type = type_int32;
+ Node* const arg5 = Int32Constant(0);
+
+ // Argument 6: Start (high end) of backtracking stack memory area.
+ Node* const stack_start =
+ Load(MachineType::Pointer(), regexp_stack_memory_address_address);
+ Node* const stack_size =
+ Load(MachineType::IntPtr(), regexp_stack_memory_size_address);
+ Node* const stack_end = IntPtrAdd(stack_start, stack_size);
+
+ MachineType arg6_type = type_ptr;
+ Node* const arg6 = stack_end;
+
+ // Argument 7: Indicate that this is a direct call from JavaScript.
+ MachineType arg7_type = type_int32;
+ Node* const arg7 = Int32Constant(1);
+
+ // Argument 8: Pass current isolate address.
+ MachineType arg8_type = type_ptr;
+ Node* const arg8 = isolate_address;
+
+ Node* const code_entry =
+ IntPtrAdd(BitcastTaggedToWord(code),
+ IntPtrConstant(Code::kHeaderSize - kHeapObjectTag));
+
+ Node* const result = CallCFunction9(
+ retval_type, arg0_type, arg1_type, arg2_type, arg3_type, arg4_type,
+ arg5_type, arg6_type, arg7_type, arg8_type, code_entry, arg0, arg1,
+ arg2, arg3, arg4, arg5, arg6, arg7, arg8);
// Check the result.
- // We expect exactly one result since the stub forces the called regexp to
- // behave as non-global.
- GotoIf(SmiEqual(result, SmiConstant(1)), &if_success);
- GotoIf(SmiEqual(result, SmiConstant(NativeRegExpMacroAssembler::FAILURE)),
+ // We expect exactly one result since we force the called regexp to behave
+ // as non-global.
+ Node* const int_result = ChangeInt32ToIntPtr(result);
+ GotoIf(IntPtrEqual(int_result,
+ IntPtrConstant(NativeRegExpMacroAssembler::SUCCESS)),
+ &if_success);
+ GotoIf(IntPtrEqual(int_result,
+ IntPtrConstant(NativeRegExpMacroAssembler::FAILURE)),
&if_failure);
- GotoIf(SmiEqual(result, SmiConstant(NativeRegExpMacroAssembler::EXCEPTION)),
+ GotoIf(IntPtrEqual(int_result,
+ IntPtrConstant(NativeRegExpMacroAssembler::EXCEPTION)),
&if_exception);
- CSA_ASSERT(
- this, SmiEqual(result, SmiConstant(NativeRegExpMacroAssembler::RETRY)));
+ CSA_ASSERT(this,
+ IntPtrEqual(int_result,
+ IntPtrConstant(NativeRegExpMacroAssembler::RETRY)));
Goto(&runtime);
}
@@ -440,22 +511,15 @@ Node* RegExpBuiltinsAssembler::IrregexpExec(Node* const context,
BIND(&if_exception);
{
- Node* const pending_exception =
- Load(MachineType::AnyTagged(), pending_exception_address);
-
- // If there is no pending exception, a
- // stack overflow (on the backtrack stack) was detected in RegExp code.
-
- Label stack_overflow(this), rethrow(this);
- Branch(IsTheHole(pending_exception), &stack_overflow, &rethrow);
-
- BIND(&stack_overflow);
+// A stack overflow was detected in RegExp code.
+#ifdef DEBUG
+ Node* const pending_exception_address = ExternalConstant(
+ ExternalReference(Isolate::kPendingExceptionAddress, isolate()));
+ CSA_ASSERT(this, IsTheHole(Load(MachineType::AnyTagged(),
+ pending_exception_address)));
+#endif // DEBUG
CallRuntime(Runtime::kThrowStackOverflow, context);
Unreachable();
-
- BIND(&rethrow);
- CallRuntime(Runtime::kRegExpExecReThrow, context);
- Unreachable();
}
BIND(&runtime);
@@ -513,8 +577,7 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
BIND(&call_tolength);
{
- var_lastindex.Bind(
- CallBuiltin(Builtins::kToLength, context, regexp_lastindex));
+ var_lastindex.Bind(ToLength_Inline(context, regexp_lastindex));
Goto(&next);
}
@@ -571,8 +634,8 @@ Node* RegExpBuiltinsAssembler::RegExpPrototypeExecBodyWithoutResult(
native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
// Call the exec stub.
- match_indices = IrregexpExec(context, regexp, string, var_lastindex.value(),
- last_match_info);
+ match_indices = RegExpExecInternal(context, regexp, string,
+ var_lastindex.value(), last_match_info);
var_result.Bind(match_indices);
// {match_indices} is either null or the RegExpMatchInfo array.
@@ -1877,7 +1940,7 @@ void RegExpBuiltinsAssembler::RegExpPrototypeMatchBody(Node* const context,
if (is_fastpath) {
CSA_ASSERT(this, TaggedIsPositiveSmi(last_index));
} else {
- last_index = CallBuiltin(Builtins::kToLength, context, last_index);
+ last_index = ToLength_Inline(context, last_index);
}
Node* const new_last_index =
@@ -2104,8 +2167,8 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
Node* const last_match_info = LoadContextElement(
native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
- Node* const match_indices =
- IrregexpExec(context, regexp, string, smi_zero, last_match_info);
+ Node* const match_indices = RegExpExecInternal(context, regexp, string,
+ smi_zero, last_match_info);
Label return_singleton_array(this);
Branch(WordEqual(match_indices, null), &return_singleton_array,
@@ -2166,8 +2229,8 @@ void RegExpBuiltinsAssembler::RegExpPrototypeSplitBody(Node* const context,
Node* const last_match_info = LoadContextElement(
native_context, Context::REGEXP_LAST_MATCH_INFO_INDEX);
- Node* const match_indices = IrregexpExec(context, regexp, string,
- next_search_from, last_match_info);
+ Node* const match_indices = RegExpExecInternal(
+ context, regexp, string, next_search_from, last_match_info);
// We're done if no match was found.
{
@@ -2373,10 +2436,19 @@ TF_BUILTIN(RegExpSplit, RegExpBuiltinsAssembler) {
// ES#sec-regexp.prototype-@@split
// RegExp.prototype [ @@split ] ( string, limit )
TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) {
- Node* const maybe_receiver = Parameter(Descriptor::kReceiver);
- Node* const maybe_string = Parameter(Descriptor::kString);
- Node* const maybe_limit = Parameter(Descriptor::kLimit);
- Node* const context = Parameter(Descriptor::kContext);
+ const int kStringArg = 0;
+ const int kLimitArg = 1;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* const maybe_receiver = args.GetReceiver();
+ Node* const maybe_string =
+ args.GetOptionalArgumentValue(kStringArg, UndefinedConstant());
+ Node* const maybe_limit =
+ args.GetOptionalArgumentValue(kLimitArg, UndefinedConstant());
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
// Ensure {maybe_receiver} is a JSReceiver.
ThrowIfNotJSReceiver(context, maybe_receiver,
@@ -2391,12 +2463,12 @@ TF_BUILTIN(RegExpPrototypeSplit, RegExpBuiltinsAssembler) {
BranchIfFastRegExp(context, receiver, &stub, &runtime);
BIND(&stub);
- Return(CallBuiltin(Builtins::kRegExpSplit, context, receiver, string,
- maybe_limit));
+ args.PopAndReturn(CallBuiltin(Builtins::kRegExpSplit, context, receiver,
+ string, maybe_limit));
BIND(&runtime);
- Return(CallRuntime(Runtime::kRegExpSplit, context, receiver, string,
- maybe_limit));
+ args.PopAndReturn(CallRuntime(Runtime::kRegExpSplit, context, receiver,
+ string, maybe_limit));
}
Node* RegExpBuiltinsAssembler::ReplaceGlobalCallableFastPath(
@@ -2736,8 +2808,7 @@ TF_BUILTIN(RegExpReplace, RegExpBuiltinsAssembler) {
// 3. Does ToString({replace_value}) contain '$'?
BIND(&checkreplacestring);
{
- Node* const replace_string =
- CallBuiltin(Builtins::kToString, context, replace_value);
+ Node* const replace_string = ToString_Inline(context, replace_value);
// ToString(replaceValue) could potentially change the shape of the RegExp
// object. Recheck that we are still on the fast path and bail to runtime
@@ -2786,10 +2857,19 @@ TF_BUILTIN(RegExpReplace, RegExpBuiltinsAssembler) {
// ES#sec-regexp.prototype-@@replace
// RegExp.prototype [ @@replace ] ( string, replaceValue )
TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
- Node* const maybe_receiver = Parameter(Descriptor::kReceiver);
- Node* const maybe_string = Parameter(Descriptor::kString);
- Node* const replace_value = Parameter(Descriptor::kReplaceValue);
- Node* const context = Parameter(Descriptor::kContext);
+ const int kStringArg = 0;
+ const int kReplaceValueArg = 1;
+
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+
+ Node* const maybe_receiver = args.GetReceiver();
+ Node* const maybe_string =
+ args.GetOptionalArgumentValue(kStringArg, UndefinedConstant());
+ Node* const replace_value =
+ args.GetOptionalArgumentValue(kReplaceValueArg, UndefinedConstant());
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
// RegExpPrototypeReplace is a bit of a beast - a summary of dispatch logic:
//
@@ -2816,19 +2896,19 @@ TF_BUILTIN(RegExpPrototypeReplace, RegExpBuiltinsAssembler) {
Node* const receiver = maybe_receiver;
// Convert {maybe_string} to a String.
- Node* const string = CallBuiltin(Builtins::kToString, context, maybe_string);
+ Node* const string = ToString_Inline(context, maybe_string);
// Fast-path checks: 1. Is the {receiver} an unmodified JSRegExp instance?
Label stub(this), runtime(this, Label::kDeferred);
BranchIfFastRegExp(context, receiver, &stub, &runtime);
BIND(&stub);
- Return(CallBuiltin(Builtins::kRegExpReplace, context, receiver, string,
- replace_value));
+ args.PopAndReturn(CallBuiltin(Builtins::kRegExpReplace, context, receiver,
+ string, replace_value));
BIND(&runtime);
- Return(CallRuntime(Runtime::kRegExpReplace, context, receiver, string,
- replace_value));
+ args.PopAndReturn(CallRuntime(Runtime::kRegExpReplace, context, receiver,
+ string, replace_value));
}
// Simple string matching functionality for internal use which does not modify
@@ -2848,8 +2928,8 @@ TF_BUILTIN(RegExpInternalMatch, RegExpBuiltinsAssembler) {
Node* const internal_match_info = LoadContextElement(
native_context, Context::REGEXP_INTERNAL_MATCH_INFO_INDEX);
- Node* const match_indices =
- IrregexpExec(context, regexp, string, smi_zero, internal_match_info);
+ Node* const match_indices = RegExpExecInternal(context, regexp, string,
+ smi_zero, internal_match_info);
Label if_matched(this), if_didnotmatch(this);
Branch(WordEqual(match_indices, null), &if_didnotmatch, &if_matched);
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h
index 0f66ebff21..3db92f1d43 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.h
+++ b/deps/v8/src/builtins/builtins-regexp-gen.h
@@ -36,10 +36,10 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
String::Encoding encoding, Variable* var_string_start,
Variable* var_string_end);
- // Low level logic around the actual call into generated Irregexp code.
- Node* IrregexpExec(Node* const context, Node* const regexp,
- Node* const string, Node* const last_index,
- Node* const match_info);
+ // Low level logic around the actual call into pattern matching code.
+ Node* RegExpExecInternal(Node* const context, Node* const regexp,
+ Node* const string, Node* const last_index,
+ Node* const match_info);
Node* ConstructNewResultFromMatchInfo(Node* const context, Node* const regexp,
Node* const match_info,
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index 056dfc6e1a..8f24a5852c 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -27,8 +27,11 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler {
Node** out_backing_store);
Node* ConvertTaggedAtomicIndexToWord32(Node* tagged, Node* context,
Node** number_index);
- void ValidateAtomicIndex(Node* index_word, Node* array_length_word,
- Node* context);
+ void ValidateAtomicIndex(Node* array, Node* index_word, Node* context);
+#if DEBUG
+ void DebugSanityCheckAtomicIndex(Node* array, Node* index_word,
+ Node* context);
+#endif
void AtomicBinopBuiltinCommon(Node* array, Node* index, Node* value,
Node* context, AssemblerFunction function,
Runtime::FunctionId runtime_function);
@@ -88,56 +91,35 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray(
Node* SharedArrayBufferBuiltinsAssembler::ConvertTaggedAtomicIndexToWord32(
Node* tagged, Node* context, Node** number_index) {
VARIABLE(var_result, MachineRepresentation::kWord32);
-
- // TODO(jkummerow): Skip ToNumber call when |tagged| is a number already.
- // Maybe this can be unified with other tagged-to-index conversions?
- // Why does this return an int32, and not an intptr?
- // Why is there the additional |number_index| output parameter?
- Callable to_number = CodeFactory::ToNumber(isolate());
- *number_index = CallStub(to_number, context, tagged);
- Label done(this, &var_result);
-
- Label if_numberissmi(this), if_numberisnotsmi(this);
- Branch(TaggedIsSmi(*number_index), &if_numberissmi, &if_numberisnotsmi);
-
- BIND(&if_numberissmi);
- {
- var_result.Bind(SmiToWord32(*number_index));
- Goto(&done);
- }
-
- BIND(&if_numberisnotsmi);
+ Label done(this), range_error(this);
+
+ // Returns word32 since index cannot be longer than a TypedArray length,
+ // which has a uint32 maximum.
+ // The |number_index| output parameter is used only for architectures that
+ // don't currently have a TF implementation and forward to runtime functions
+ // instead; they expect the value has already been coerced to an integer.
+ *number_index = ToSmiIndex(tagged, context, &range_error);
+ var_result.Bind(SmiToWord32(*number_index));
+ Goto(&done);
+
+ BIND(&range_error);
{
- Node* number_index_value = LoadHeapNumberValue(*number_index);
- Node* access_index = TruncateFloat64ToWord32(number_index_value);
- Node* test_index = ChangeInt32ToFloat64(access_index);
-
- Label if_indexesareequal(this), if_indexesarenotequal(this);
- Branch(Float64Equal(number_index_value, test_index), &if_indexesareequal,
- &if_indexesarenotequal);
-
- BIND(&if_indexesareequal);
- {
- var_result.Bind(access_index);
- Goto(&done);
- }
-
- BIND(&if_indexesarenotequal);
- {
- CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context);
- Unreachable();
- }
+ CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context);
+ Unreachable();
}
BIND(&done);
return var_result.value();
}
-void SharedArrayBufferBuiltinsAssembler::ValidateAtomicIndex(
- Node* index_word, Node* array_length_word, Node* context) {
+void SharedArrayBufferBuiltinsAssembler::ValidateAtomicIndex(Node* array,
+ Node* index_word,
+ Node* context) {
// Check if the index is in bounds. If not, throw RangeError.
Label check_passed(this);
- GotoIf(Uint32LessThan(index_word, array_length_word), &check_passed);
+ Node* array_length_word32 = TruncateTaggedToWord32(
+ context, LoadObjectField(array, JSTypedArray::kLengthOffset));
+ GotoIf(Uint32LessThan(index_word, array_length_word32), &check_passed);
CallRuntime(Runtime::kThrowInvalidAtomicAccessIndexError, context);
Unreachable();
@@ -145,22 +127,35 @@ void SharedArrayBufferBuiltinsAssembler::ValidateAtomicIndex(
BIND(&check_passed);
}
+#if DEBUG
+void SharedArrayBufferBuiltinsAssembler::DebugSanityCheckAtomicIndex(
+ Node* array, Node* index_word, Node* context) {
+ // In Debug mode, we re-validate the index as a sanity check because
+ // ToInteger above calls out to JavaScript. A SharedArrayBuffer can't be
+ // neutered and the TypedArray length can't change either, so skipping this
+ // check in Release mode is safe.
+ CSA_ASSERT(
+ this,
+ Uint32LessThan(
+ index_word,
+ TruncateTaggedToWord32(
+ context, LoadObjectField(array, JSTypedArray::kLengthOffset))));
+}
+#endif
+
TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
Node* array = Parameter(Descriptor::kArray);
Node* index = Parameter(Descriptor::kIndex);
Node* context = Parameter(Descriptor::kContext);
- Node* index_integer;
- Node* index_word32 =
- ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
-
Node* instance_type;
Node* backing_store;
ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
- Node* array_length_word32 = TruncateTaggedToWord32(
- context, LoadObjectField(array, JSTypedArray::kLengthOffset));
- ValidateAtomicIndex(index_word32, array_length_word32, context);
+ Node* index_integer;
+ Node* index_word32 =
+ ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
+ ValidateAtomicIndex(array, index_word32, context);
Node* index_word = ChangeUint32ToWord(index_word32);
Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this),
@@ -210,25 +205,22 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
Node* value = Parameter(Descriptor::kValue);
Node* context = Parameter(Descriptor::kContext);
- // The value_integer needs to be computed before the validations as the
- // ToInteger function can be potentially modified in JS to invalidate the
- // conditions. This is just a no-cost safety measure as SABs can't be neutered
- // or shrunk.
- Node* value_integer = ToInteger(context, value);
- Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
+ Node* instance_type;
+ Node* backing_store;
+ ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
Node* index_integer;
Node* index_word32 =
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
+ ValidateAtomicIndex(array, index_word32, context);
+ Node* index_word = ChangeUint32ToWord(index_word32);
- Node* instance_type;
- Node* backing_store;
- ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
+ Node* value_integer = ToInteger(context, value);
+ Node* value_word32 = TruncateTaggedToWord32(context, value_integer);
- Node* array_length_word32 = TruncateTaggedToWord32(
- context, LoadObjectField(array, JSTypedArray::kLengthOffset));
- ValidateAtomicIndex(index_word32, array_length_word32, context);
- Node* index_word = ChangeUint32ToWord(index_word32);
+#if DEBUG
+ DebugSanityCheckAtomicIndex(array, index_word32, context);
+#endif
Label u8(this), u16(this), u32(this), other(this);
int32_t case_values[] = {
@@ -267,23 +259,20 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
Node* value = Parameter(Descriptor::kValue);
Node* context = Parameter(Descriptor::kContext);
- // The value_integer needs to be computed before the validations as the
- // ToInteger function can be potentially modified in JS to invalidate the
- // conditions. This is just a no-cost safety measure as SABs can't be neutered
- // or shrunk.
- Node* value_integer = ToInteger(context, value);
+ Node* instance_type;
+ Node* backing_store;
+ ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
Node* index_integer;
Node* index_word32 =
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
+ ValidateAtomicIndex(array, index_word32, context);
- Node* instance_type;
- Node* backing_store;
- ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
+ Node* value_integer = ToInteger(context, value);
- Node* array_length_word32 = TruncateTaggedToWord32(
- context, LoadObjectField(array, JSTypedArray::kLengthOffset));
- ValidateAtomicIndex(index_word32, array_length_word32, context);
+#if DEBUG
+ DebugSanityCheckAtomicIndex(array, index_word32, context);
+#endif
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
Return(CallRuntime(Runtime::kAtomicsExchange, context, array, index_integer,
@@ -344,24 +333,21 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
Node* new_value = Parameter(Descriptor::kNewValue);
Node* context = Parameter(Descriptor::kContext);
- // The value_integers needs to be computed before the validations as the
- // ToInteger function can be potentially modified in JS to invalidate the
- // conditions. This is just a no-cost safety measure as SABs can't be neutered
- // or shrunk.
- Node* old_value_integer = ToInteger(context, old_value);
- Node* new_value_integer = ToInteger(context, new_value);
+ Node* instance_type;
+ Node* backing_store;
+ ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
Node* index_integer;
Node* index_word32 =
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
+ ValidateAtomicIndex(array, index_word32, context);
- Node* instance_type;
- Node* backing_store;
- ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
+ Node* old_value_integer = ToInteger(context, old_value);
+ Node* new_value_integer = ToInteger(context, new_value);
- Node* array_length_word32 = TruncateTaggedToWord32(
- context, LoadObjectField(array, JSTypedArray::kLengthOffset));
- ValidateAtomicIndex(index_word32, array_length_word32, context);
+#if DEBUG
+ DebugSanityCheckAtomicIndex(array, index_word32, context);
+#endif
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
@@ -443,23 +429,24 @@ BINOP_BUILTIN(Xor)
void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
Node* array, Node* index, Node* value, Node* context,
AssemblerFunction function, Runtime::FunctionId runtime_function) {
- // The value_integer needs to be computed before the validations as the
- // ToInteger function can be potentially modified in JS to invalidate the
- // conditions. This is just a no-cost safety measure as SABs can't be neutered
- // or shrunk.
- Node* value_integer = ToInteger(context, value);
+ Node* instance_type;
+ Node* backing_store;
+ ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
Node* index_integer;
Node* index_word32 =
ConvertTaggedAtomicIndexToWord32(index, context, &index_integer);
+ ValidateAtomicIndex(array, index_word32, context);
- Node* instance_type;
- Node* backing_store;
- ValidateSharedTypedArray(array, context, &instance_type, &backing_store);
+ Node* value_integer = ToInteger(context, value);
- Node* array_length_word32 = TruncateTaggedToWord32(
- context, LoadObjectField(array, JSTypedArray::kLengthOffset));
- ValidateAtomicIndex(index_word32, array_length_word32, context);
+#if DEBUG
+ // In Debug mode, we re-validate the index as a sanity check because
+ // ToInteger above calls out to JavaScript. A SharedArrayBuffer can't be
+ // neutered and the TypedArray length can't change either, so skipping this
+ // check in Release mode is safe.
+ ValidateAtomicIndex(array, index_word32, context);
+#endif
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
@@ -482,36 +469,36 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
Switch(instance_type, &other, case_values, case_labels,
arraysize(case_labels));
- Bind(&i8);
+ BIND(&i8);
Return(SmiFromWord32((this->*function)(MachineType::Int8(), backing_store,
index_word, value_word32)));
- Bind(&u8);
+ BIND(&u8);
Return(SmiFromWord32((this->*function)(MachineType::Uint8(), backing_store,
index_word, value_word32)));
- Bind(&i16);
+ BIND(&i16);
Return(
SmiFromWord32((this->*function)(MachineType::Int16(), backing_store,
WordShl(index_word, 1), value_word32)));
- Bind(&u16);
+ BIND(&u16);
Return(
SmiFromWord32((this->*function)(MachineType::Uint16(), backing_store,
WordShl(index_word, 1), value_word32)));
- Bind(&i32);
+ BIND(&i32);
Return(ChangeInt32ToTagged(
(this->*function)(MachineType::Int32(), backing_store,
WordShl(index_word, 2), value_word32)));
- Bind(&u32);
+ BIND(&u32);
Return(ChangeUint32ToTagged(
(this->*function)(MachineType::Uint32(), backing_store,
WordShl(index_word, 2), value_word32)));
// This shouldn't happen, we've already validated the type.
- Bind(&other);
+ BIND(&other);
Unreachable();
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
// || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
index 0ec8423104..d7a81a2ffe 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc
@@ -63,23 +63,15 @@ MUST_USE_RESULT MaybeHandle<JSTypedArray> ValidateSharedIntegerTypedArray(
MUST_USE_RESULT Maybe<size_t> ValidateAtomicAccess(
Isolate* isolate, Handle<JSTypedArray> typed_array,
Handle<Object> request_index) {
- // TOOD(v8:5961): Use ToIndex for indexes
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, request_index,
- Object::ToNumber(request_index),
- Nothing<size_t>());
- Handle<Object> offset;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, offset,
- Object::ToInteger(isolate, request_index),
- Nothing<size_t>());
- if (!request_index->SameValue(*offset)) {
- isolate->Throw(*isolate->factory()->NewRangeError(
- MessageTemplate::kInvalidAtomicAccessIndex));
- return Nothing<size_t>();
- }
- size_t access_index;
- uint32_t length = typed_array->length_value();
- if (!TryNumberToSize(*request_index, &access_index) ||
- access_index >= length) {
+ Handle<Object> access_index_obj;
+ ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+ isolate, access_index_obj,
+ Object::ToIndex(isolate, request_index,
+ MessageTemplate::kInvalidAtomicAccessIndex),
+ Nothing<size_t>());
+
+ size_t access_index = NumberToSize(*access_index_obj);
+ if (access_index >= typed_array->length_value()) {
isolate->Throw(*isolate->factory()->NewRangeError(
MessageTemplate::kInvalidAtomicAccessIndex));
return Nothing<size_t>();
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index ed559eadfd..ee85476401 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -2,149 +2,167 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/builtins/builtins-string-gen.h"
+
#include "src/builtins/builtins-regexp-gen.h"
#include "src/builtins/builtins-utils-gen.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
-#include "src/code-stub-assembler.h"
#include "src/objects.h"
namespace v8 {
namespace internal {
typedef CodeStubAssembler::RelationalComparisonMode RelationalComparisonMode;
+typedef compiler::Node Node;
+
+Node* StringBuiltinsAssembler::DirectStringData(Node* string,
+ Node* string_instance_type) {
+ // Compute the effective offset of the first character.
+ VARIABLE(var_data, MachineType::PointerRepresentation());
+ Label if_sequential(this), if_external(this), if_join(this);
+ Branch(Word32Equal(Word32And(string_instance_type,
+ Int32Constant(kStringRepresentationMask)),
+ Int32Constant(kSeqStringTag)),
+ &if_sequential, &if_external);
+
+ BIND(&if_sequential);
+ {
+ var_data.Bind(IntPtrAdd(
+ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag),
+ BitcastTaggedToWord(string)));
+ Goto(&if_join);
+ }
-class StringBuiltinsAssembler : public CodeStubAssembler {
- public:
- explicit StringBuiltinsAssembler(compiler::CodeAssemblerState* state)
- : CodeStubAssembler(state) {}
+ BIND(&if_external);
+ {
+ // This is only valid for ExternalStrings where the resource data
+ // pointer is cached (i.e. no short external strings).
+ CSA_ASSERT(
+ this, Word32NotEqual(Word32And(string_instance_type,
+ Int32Constant(kShortExternalStringMask)),
+ Int32Constant(kShortExternalStringTag)));
+ var_data.Bind(LoadObjectField(string, ExternalString::kResourceDataOffset,
+ MachineType::Pointer()));
+ Goto(&if_join);
+ }
- protected:
- Node* DirectStringData(Node* string, Node* string_instance_type) {
- // Compute the effective offset of the first character.
- VARIABLE(var_data, MachineType::PointerRepresentation());
- Label if_sequential(this), if_external(this), if_join(this);
- Branch(Word32Equal(Word32And(string_instance_type,
- Int32Constant(kStringRepresentationMask)),
- Int32Constant(kSeqStringTag)),
- &if_sequential, &if_external);
-
- BIND(&if_sequential);
- {
- var_data.Bind(IntPtrAdd(
- IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag),
- BitcastTaggedToWord(string)));
- Goto(&if_join);
- }
+ BIND(&if_join);
+ return var_data.value();
+}
- BIND(&if_external);
- {
- // This is only valid for ExternalStrings where the resource data
- // pointer is cached (i.e. no short external strings).
- CSA_ASSERT(this, Word32NotEqual(
- Word32And(string_instance_type,
- Int32Constant(kShortExternalStringMask)),
- Int32Constant(kShortExternalStringTag)));
- var_data.Bind(LoadObjectField(string, ExternalString::kResourceDataOffset,
- MachineType::Pointer()));
- Goto(&if_join);
- }
+void StringBuiltinsAssembler::DispatchOnStringEncodings(
+ Node* const lhs_instance_type, Node* const rhs_instance_type,
+ Label* if_one_one, Label* if_one_two, Label* if_two_one,
+ Label* if_two_two) {
+ STATIC_ASSERT(kStringEncodingMask == 0x8);
+ STATIC_ASSERT(kTwoByteStringTag == 0x0);
+ STATIC_ASSERT(kOneByteStringTag == 0x8);
- BIND(&if_join);
- return var_data.value();
- }
+ // First combine the encodings.
- Node* LoadOneByteChar(Node* string, Node* index) {
- return Load(MachineType::Uint8(), string, OneByteCharOffset(index));
- }
+ Node* const encoding_mask = Int32Constant(kStringEncodingMask);
+ Node* const lhs_encoding = Word32And(lhs_instance_type, encoding_mask);
+ Node* const rhs_encoding = Word32And(rhs_instance_type, encoding_mask);
- Node* OneByteCharAddress(Node* string, Node* index) {
- Node* offset = OneByteCharOffset(index);
- return IntPtrAdd(string, offset);
- }
+ Node* const combined_encodings =
+ Word32Or(lhs_encoding, Word32Shr(rhs_encoding, 1));
- Node* OneByteCharOffset(Node* index) {
- return CharOffset(String::ONE_BYTE_ENCODING, index);
- }
+ // Then dispatch on the combined encoding.
- Node* CharOffset(String::Encoding encoding, Node* index) {
- const int header = SeqOneByteString::kHeaderSize - kHeapObjectTag;
- Node* offset = index;
- if (encoding == String::TWO_BYTE_ENCODING) {
- offset = IntPtrAdd(offset, offset);
- }
- offset = IntPtrAdd(offset, IntPtrConstant(header));
- return offset;
- }
+ Label unreachable(this, Label::kDeferred);
- void DispatchOnStringInstanceType(Node* const instance_type,
- Label* if_onebyte_sequential,
- Label* if_onebyte_external,
- Label* if_otherwise) {
- const int kMask = kStringRepresentationMask | kStringEncodingMask;
- Node* const encoding_and_representation =
- Word32And(instance_type, Int32Constant(kMask));
-
- int32_t values[] = {
- kOneByteStringTag | kSeqStringTag,
- kOneByteStringTag | kExternalStringTag,
- };
- Label* labels[] = {
- if_onebyte_sequential, if_onebyte_external,
- };
- STATIC_ASSERT(arraysize(values) == arraysize(labels));
-
- Switch(encoding_and_representation, if_otherwise, values, labels,
- arraysize(values));
- }
+ int32_t values[] = {
+ kOneByteStringTag | (kOneByteStringTag >> 1),
+ kOneByteStringTag | (kTwoByteStringTag >> 1),
+ kTwoByteStringTag | (kOneByteStringTag >> 1),
+ kTwoByteStringTag | (kTwoByteStringTag >> 1),
+ };
+ Label* labels[] = {
+ if_one_one, if_one_two, if_two_one, if_two_two,
+ };
- void GenerateStringEqual(Node* context, Node* left, Node* right);
- void GenerateStringRelationalComparison(Node* context, Node* left,
- Node* right,
- RelationalComparisonMode mode);
+ STATIC_ASSERT(arraysize(values) == arraysize(labels));
+ Switch(combined_encodings, &unreachable, values, labels, arraysize(values));
- Node* ToSmiBetweenZeroAnd(Node* context, Node* value, Node* limit);
+ BIND(&unreachable);
+ Unreachable();
+}
+
+template <typename SubjectChar, typename PatternChar>
+Node* StringBuiltinsAssembler::CallSearchStringRaw(Node* const subject_ptr,
+ Node* const subject_length,
+ Node* const search_ptr,
+ Node* const search_length,
+ Node* const start_position) {
+ Node* const function_addr = ExternalConstant(
+ ExternalReference::search_string_raw<SubjectChar, PatternChar>(
+ isolate()));
+ Node* const isolate_ptr =
+ ExternalConstant(ExternalReference::isolate_address(isolate()));
+
+ MachineType type_ptr = MachineType::Pointer();
+ MachineType type_intptr = MachineType::IntPtr();
+
+ Node* const result = CallCFunction6(
+ type_intptr, type_ptr, type_ptr, type_intptr, type_ptr, type_intptr,
+ type_intptr, function_addr, isolate_ptr, subject_ptr, subject_length,
+ search_ptr, search_length, start_position);
+
+ return result;
+}
- Node* LoadSurrogatePairAt(Node* string, Node* length, Node* index,
- UnicodeEncoding encoding);
+Node* StringBuiltinsAssembler::PointerToStringDataAtIndex(
+ Node* const string_data, Node* const index, String::Encoding encoding) {
+ const ElementsKind kind = (encoding == String::ONE_BYTE_ENCODING)
+ ? UINT8_ELEMENTS
+ : UINT16_ELEMENTS;
+ Node* const offset_in_bytes =
+ ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS);
+ return IntPtrAdd(string_data, offset_in_bytes);
+}
- void StringIndexOf(Node* receiver, Node* instance_type, Node* search_string,
- Node* search_string_instance_type, Node* position,
- std::function<void(Node*)> f_return);
+void StringBuiltinsAssembler::ConvertAndBoundsCheckStartArgument(
+ Node* context, Variable* var_start, Node* start, Node* string_length) {
+ Node* const start_int =
+ ToInteger(context, start, CodeStubAssembler::kTruncateMinusZero);
+ Node* const zero = SmiConstant(Smi::kZero);
- Node* IsNullOrUndefined(Node* const value);
- void RequireObjectCoercible(Node* const context, Node* const value,
- const char* method_name);
+ Label done(this);
+ Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
+ Branch(TaggedIsSmi(start_int), &if_issmi, &if_isheapnumber);
- Node* SmiIsNegative(Node* const value) {
- return SmiLessThan(value, SmiConstant(0));
+ BIND(&if_issmi);
+ {
+ var_start->Bind(
+ Select(SmiLessThan(start_int, zero),
+ [&] { return SmiMax(SmiAdd(string_length, start_int), zero); },
+ [&] { return start_int; }, MachineRepresentation::kTagged));
+ Goto(&done);
}
- // Implements boilerplate logic for {match, split, replace, search} of the
- // form:
- //
- // if (!IS_NULL_OR_UNDEFINED(object)) {
- // var maybe_function = object[symbol];
- // if (!IS_UNDEFINED(maybe_function)) {
- // return %_Call(maybe_function, ...);
- // }
- // }
- //
- // Contains fast paths for Smi and RegExp objects.
- typedef std::function<Node*()> NodeFunction0;
- typedef std::function<Node*(Node* fn)> NodeFunction1;
- void MaybeCallFunctionAtSymbol(Node* const context, Node* const object,
- Handle<Symbol> symbol,
- const NodeFunction0& regexp_call,
- const NodeFunction1& generic_call);
-};
+ BIND(&if_isheapnumber);
+ {
+ // If {start} is a heap number, it is definitely out of bounds. If it is
+ // negative, {start} = max({string_length} + {start}),0) = 0'. If it is
+ // positive, set {start} to {string_length} which ultimately results in
+ // returning an empty string.
+ Node* const float_zero = Float64Constant(0.);
+ Node* const start_float = LoadHeapNumberValue(start_int);
+ var_start->Bind(SelectTaggedConstant(
+ Float64LessThan(start_float, float_zero), zero, string_length));
+ Goto(&done);
+ }
+ BIND(&done);
+}
void StringBuiltinsAssembler::GenerateStringEqual(Node* context, Node* left,
Node* right) {
// Here's pseudo-code for the algorithm below:
//
- // if (lhs == rhs) return true;
// if (lhs->length() != rhs->length()) return false;
+ // restart:
+ // if (lhs == rhs) return true;
// if (lhs->IsInternalizedString() && rhs->IsInternalizedString()) {
// return false;
// }
@@ -155,33 +173,61 @@ void StringBuiltinsAssembler::GenerateStringEqual(Node* context, Node* left,
// return true;
// }
// if (lhs and/or rhs are indirect strings) {
- // unwrap them and restart from the beginning;
+ // unwrap them and restart from the "restart:" label;
// }
// return %StringEqual(lhs, rhs);
VARIABLE(var_left, MachineRepresentation::kTagged, left);
VARIABLE(var_right, MachineRepresentation::kTagged, right);
-
Variable* input_vars[2] = {&var_left, &var_right};
- Label if_equal(this), if_notequal(this), restart(this, 2, input_vars);
+ Label if_equal(this), if_notequal(this), if_notbothdirectonebytestrings(this),
+ restart(this, 2, input_vars);
+
+ Node* lhs_length = LoadStringLength(left);
+ Node* rhs_length = LoadStringLength(right);
+
+ // Strings with different lengths cannot be equal.
+ GotoIf(WordNotEqual(lhs_length, rhs_length), &if_notequal);
+
Goto(&restart);
BIND(&restart);
Node* lhs = var_left.value();
Node* rhs = var_right.value();
- // Fast check to see if {lhs} and {rhs} refer to the same String object.
- GotoIf(WordEqual(lhs, rhs), &if_equal);
+ Node* lhs_instance_type = LoadInstanceType(lhs);
+ Node* rhs_instance_type = LoadInstanceType(rhs);
- // Load the length of {lhs} and {rhs}.
- Node* lhs_length = LoadStringLength(lhs);
- Node* rhs_length = LoadStringLength(rhs);
+ StringEqual_Core(context, lhs, lhs_instance_type, lhs_length, rhs,
+ rhs_instance_type, &if_equal, &if_notequal,
+ &if_notbothdirectonebytestrings);
- // Strings with different lengths cannot be equal.
- GotoIf(WordNotEqual(lhs_length, rhs_length), &if_notequal);
+ BIND(&if_notbothdirectonebytestrings);
+ {
+ // Try to unwrap indirect strings, restart the above attempt on success.
+ MaybeDerefIndirectStrings(&var_left, lhs_instance_type, &var_right,
+ rhs_instance_type, &restart);
+ // TODO(bmeurer): Add support for two byte string equality checks.
- // Load instance types of {lhs} and {rhs}.
- Node* lhs_instance_type = LoadInstanceType(lhs);
- Node* rhs_instance_type = LoadInstanceType(rhs);
+ TailCallRuntime(Runtime::kStringEqual, context, lhs, rhs);
+ }
+
+ BIND(&if_equal);
+ Return(TrueConstant());
+
+ BIND(&if_notequal);
+ Return(FalseConstant());
+}
+
+void StringBuiltinsAssembler::StringEqual_Core(
+ Node* context, Node* lhs, Node* lhs_instance_type, Node* lhs_length,
+ Node* rhs, Node* rhs_instance_type, Label* if_equal, Label* if_not_equal,
+ Label* if_notbothdirectonebyte) {
+ CSA_ASSERT(this, IsString(lhs));
+ CSA_ASSERT(this, IsString(rhs));
+ CSA_ASSERT(this, WordEqual(LoadStringLength(lhs), lhs_length));
+ CSA_ASSERT(this, WordEqual(LoadStringLength(rhs), lhs_length));
+ // Fast check to see if {lhs} and {rhs} refer to the same String object.
+ GotoIf(WordEqual(lhs, rhs), if_equal);
// Combine the instance types into a single 16-bit value, so we can check
// both of them at once.
@@ -196,7 +242,7 @@ void StringBuiltinsAssembler::GenerateStringEqual(Node* context, Node* left,
GotoIf(Word32Equal(Word32And(both_instance_types,
Int32Constant(kBothInternalizedMask)),
Int32Constant(kBothInternalizedTag)),
- &if_notequal);
+ if_not_equal);
// Check that both {lhs} and {rhs} are flat one-byte strings, and that
// in case of ExternalStrings the data pointer is cached..
@@ -207,61 +253,43 @@ void StringBuiltinsAssembler::GenerateStringEqual(Node* context, Node* left,
<< 8);
int const kBothDirectOneByteStringTag =
kOneByteStringTag | (kOneByteStringTag << 8);
- Label if_bothdirectonebytestrings(this), if_notbothdirectonebytestrings(this);
- Branch(Word32Equal(Word32And(both_instance_types,
- Int32Constant(kBothDirectOneByteStringMask)),
- Int32Constant(kBothDirectOneByteStringTag)),
- &if_bothdirectonebytestrings, &if_notbothdirectonebytestrings);
-
- BIND(&if_bothdirectonebytestrings);
+ GotoIfNot(Word32Equal(Word32And(both_instance_types,
+ Int32Constant(kBothDirectOneByteStringMask)),
+ Int32Constant(kBothDirectOneByteStringTag)),
+ if_notbothdirectonebyte);
+
+ // At this point we know that we have two direct one-byte strings.
+
+ // Compute the effective offset of the first character.
+ Node* lhs_data = DirectStringData(lhs, lhs_instance_type);
+ Node* rhs_data = DirectStringData(rhs, rhs_instance_type);
+
+ // Compute the first offset after the string from the length.
+ Node* length = SmiUntag(lhs_length);
+
+ // Loop over the {lhs} and {rhs} strings to see if they are equal.
+ VARIABLE(var_offset, MachineType::PointerRepresentation());
+ Label loop(this, &var_offset);
+ var_offset.Bind(IntPtrConstant(0));
+ Goto(&loop);
+ BIND(&loop);
{
- // Compute the effective offset of the first character.
- Node* lhs_data = DirectStringData(lhs, lhs_instance_type);
- Node* rhs_data = DirectStringData(rhs, rhs_instance_type);
+ // If {offset} equals {end}, no difference was found, so the
+ // strings are equal.
+ Node* offset = var_offset.value();
+ GotoIf(WordEqual(offset, length), if_equal);
- // Compute the first offset after the string from the length.
- Node* length = SmiUntag(lhs_length);
-
- // Loop over the {lhs} and {rhs} strings to see if they are equal.
- VARIABLE(var_offset, MachineType::PointerRepresentation());
- Label loop(this, &var_offset);
- var_offset.Bind(IntPtrConstant(0));
- Goto(&loop);
- BIND(&loop);
- {
- // If {offset} equals {end}, no difference was found, so the
- // strings are equal.
- Node* offset = var_offset.value();
- GotoIf(WordEqual(offset, length), &if_equal);
-
- // Load the next characters from {lhs} and {rhs}.
- Node* lhs_value = Load(MachineType::Uint8(), lhs_data, offset);
- Node* rhs_value = Load(MachineType::Uint8(), rhs_data, offset);
-
- // Check if the characters match.
- GotoIf(Word32NotEqual(lhs_value, rhs_value), &if_notequal);
-
- // Advance to next character.
- var_offset.Bind(IntPtrAdd(offset, IntPtrConstant(1)));
- Goto(&loop);
- }
- }
+ // Load the next characters from {lhs} and {rhs}.
+ Node* lhs_value = Load(MachineType::Uint8(), lhs_data, offset);
+ Node* rhs_value = Load(MachineType::Uint8(), rhs_data, offset);
- BIND(&if_notbothdirectonebytestrings);
- {
- // Try to unwrap indirect strings, restart the above attempt on success.
- MaybeDerefIndirectStrings(&var_left, lhs_instance_type, &var_right,
- rhs_instance_type, &restart);
- // TODO(bmeurer): Add support for two byte string equality checks.
+ // Check if the characters match.
+ GotoIf(Word32NotEqual(lhs_value, rhs_value), if_not_equal);
- TailCallRuntime(Runtime::kStringEqual, context, lhs, rhs);
+ // Advance to next character.
+ var_offset.Bind(IntPtrAdd(offset, IntPtrConstant(1)));
+ Goto(&loop);
}
-
- BIND(&if_equal);
- Return(TrueConstant());
-
- BIND(&if_notequal);
- Return(FalseConstant());
}
void StringBuiltinsAssembler::GenerateStringRelationalComparison(
@@ -697,7 +725,7 @@ TF_BUILTIN(StringPrototypeConcat, CodeStubAssembler) {
arguments.ForEach(
CodeStubAssembler::VariableList({&var_result}, zone()),
[this, context, &var_result](Node* arg) {
- arg = CallStub(CodeFactory::ToString(isolate()), context, arg);
+ arg = ToString_Inline(context, arg);
var_result.Bind(CallStub(CodeFactory::StringAdd(isolate()), context,
var_result.value(), arg));
});
@@ -705,103 +733,148 @@ TF_BUILTIN(StringPrototypeConcat, CodeStubAssembler) {
}
void StringBuiltinsAssembler::StringIndexOf(
- Node* receiver, Node* instance_type, Node* search_string,
- Node* search_string_instance_type, Node* position,
- std::function<void(Node*)> f_return) {
- CSA_ASSERT(this, IsString(receiver));
+ Node* const subject_string, Node* const subject_instance_type,
+ Node* const search_string, Node* const search_instance_type,
+ Node* const position, std::function<void(Node*)> f_return) {
+ CSA_ASSERT(this, IsString(subject_string));
CSA_ASSERT(this, IsString(search_string));
CSA_ASSERT(this, TaggedIsSmi(position));
- Label zero_length_needle(this),
- call_runtime_unchecked(this, Label::kDeferred), return_minus_1(this),
- check_search_string(this), continue_fast_path(this);
-
Node* const int_zero = IntPtrConstant(0);
+
VARIABLE(var_needle_byte, MachineType::PointerRepresentation(), int_zero);
VARIABLE(var_string_addr, MachineType::PointerRepresentation(), int_zero);
- Node* needle_length = SmiUntag(LoadStringLength(search_string));
- // Use faster/complex runtime fallback for long search strings.
- GotoIf(IntPtrLessThan(IntPtrConstant(1), needle_length),
- &call_runtime_unchecked);
- Node* string_length = SmiUntag(LoadStringLength(receiver));
- Node* start_position = IntPtrMax(SmiUntag(position), int_zero);
+ Node* const search_length = SmiUntag(LoadStringLength(search_string));
+ Node* const subject_length = SmiUntag(LoadStringLength(subject_string));
+ Node* const start_position = IntPtrMax(SmiUntag(position), int_zero);
- GotoIf(IntPtrEqual(int_zero, needle_length), &zero_length_needle);
- // Check that the needle fits in the start position.
- GotoIfNot(IntPtrLessThanOrEqual(needle_length,
- IntPtrSub(string_length, start_position)),
- &return_minus_1);
-
- // Load the string address.
+ Label zero_length_needle(this), return_minus_1(this);
{
- Label if_onebyte_sequential(this);
- Label if_onebyte_external(this, Label::kDeferred);
+ GotoIf(IntPtrEqual(int_zero, search_length), &zero_length_needle);
- // Only support one-byte strings on the fast path.
- DispatchOnStringInstanceType(instance_type, &if_onebyte_sequential,
- &if_onebyte_external, &call_runtime_unchecked);
+ // Check that the needle fits in the start position.
+ GotoIfNot(IntPtrLessThanOrEqual(search_length,
+ IntPtrSub(subject_length, start_position)),
+ &return_minus_1);
+ }
- BIND(&if_onebyte_sequential);
- {
- var_string_addr.Bind(
- OneByteCharAddress(BitcastTaggedToWord(receiver), start_position));
- Goto(&check_search_string);
- }
+ // Try to unpack subject and search strings. Bail to runtime if either needs
+ // to be flattened.
+ ToDirectStringAssembler subject_to_direct(state(), subject_string);
+ ToDirectStringAssembler search_to_direct(state(), search_string);
- BIND(&if_onebyte_external);
- {
- Node* const unpacked = TryDerefExternalString(receiver, instance_type,
- &call_runtime_unchecked);
- var_string_addr.Bind(OneByteCharAddress(unpacked, start_position));
- Goto(&check_search_string);
- }
- }
+ Label call_runtime_unchecked(this, Label::kDeferred);
- // Load the needle character.
- BIND(&check_search_string);
- {
- Label if_onebyte_sequential(this);
- Label if_onebyte_external(this, Label::kDeferred);
+ subject_to_direct.TryToDirect(&call_runtime_unchecked);
+ search_to_direct.TryToDirect(&call_runtime_unchecked);
+
+ // Load pointers to string data.
+ Node* const subject_ptr =
+ subject_to_direct.PointerToData(&call_runtime_unchecked);
+ Node* const search_ptr =
+ search_to_direct.PointerToData(&call_runtime_unchecked);
+
+ Node* const subject_offset = subject_to_direct.offset();
+ Node* const search_offset = search_to_direct.offset();
+
+ // Like String::IndexOf, the actual matching is done by the optimized
+ // SearchString method in string-search.h. Dispatch based on string instance
+ // types, then call straight into C++ for matching.
- DispatchOnStringInstanceType(search_string_instance_type,
- &if_onebyte_sequential, &if_onebyte_external,
- &call_runtime_unchecked);
+ CSA_ASSERT(this, IntPtrGreaterThan(search_length, int_zero));
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(start_position, int_zero));
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(subject_length, start_position));
+ CSA_ASSERT(this,
+ IntPtrLessThanOrEqual(search_length,
+ IntPtrSub(subject_length, start_position)));
- BIND(&if_onebyte_sequential);
+ Label one_one(this), one_two(this), two_one(this), two_two(this);
+ DispatchOnStringEncodings(subject_to_direct.instance_type(),
+ search_to_direct.instance_type(), &one_one,
+ &one_two, &two_one, &two_two);
+
+ typedef const uint8_t onebyte_t;
+ typedef const uc16 twobyte_t;
+
+ BIND(&one_one);
+ {
+ Node* const adjusted_subject_ptr = PointerToStringDataAtIndex(
+ subject_ptr, subject_offset, String::ONE_BYTE_ENCODING);
+ Node* const adjusted_search_ptr = PointerToStringDataAtIndex(
+ search_ptr, search_offset, String::ONE_BYTE_ENCODING);
+
+ Label direct_memchr_call(this), generic_fast_path(this);
+ Branch(IntPtrEqual(search_length, IntPtrConstant(1)), &direct_memchr_call,
+ &generic_fast_path);
+
+ // An additional fast path that calls directly into memchr for 1-length
+ // search strings.
+ BIND(&direct_memchr_call);
{
- var_needle_byte.Bind(
- ChangeInt32ToIntPtr(LoadOneByteChar(search_string, int_zero)));
- Goto(&continue_fast_path);
+ Node* const string_addr = IntPtrAdd(adjusted_subject_ptr, start_position);
+ Node* const search_length = IntPtrSub(subject_length, start_position);
+ Node* const search_byte =
+ ChangeInt32ToIntPtr(Load(MachineType::Uint8(), adjusted_search_ptr));
+
+ Node* const memchr =
+ ExternalConstant(ExternalReference::libc_memchr_function(isolate()));
+ Node* const result_address =
+ CallCFunction3(MachineType::Pointer(), MachineType::Pointer(),
+ MachineType::IntPtr(), MachineType::UintPtr(), memchr,
+ string_addr, search_byte, search_length);
+ GotoIf(WordEqual(result_address, int_zero), &return_minus_1);
+ Node* const result_index =
+ IntPtrAdd(IntPtrSub(result_address, string_addr), start_position);
+ f_return(SmiTag(result_index));
}
- BIND(&if_onebyte_external);
+ BIND(&generic_fast_path);
{
- Node* const unpacked = TryDerefExternalString(
- search_string, search_string_instance_type, &call_runtime_unchecked);
- var_needle_byte.Bind(
- ChangeInt32ToIntPtr(LoadOneByteChar(unpacked, int_zero)));
- Goto(&continue_fast_path);
+ Node* const result = CallSearchStringRaw<onebyte_t, onebyte_t>(
+ adjusted_subject_ptr, subject_length, adjusted_search_ptr,
+ search_length, start_position);
+ f_return(SmiTag(result));
}
}
- BIND(&continue_fast_path);
+ BIND(&one_two);
+ {
+ Node* const adjusted_subject_ptr = PointerToStringDataAtIndex(
+ subject_ptr, subject_offset, String::ONE_BYTE_ENCODING);
+ Node* const adjusted_search_ptr = PointerToStringDataAtIndex(
+ search_ptr, search_offset, String::TWO_BYTE_ENCODING);
+
+ Node* const result = CallSearchStringRaw<onebyte_t, twobyte_t>(
+ adjusted_subject_ptr, subject_length, adjusted_search_ptr,
+ search_length, start_position);
+ f_return(SmiTag(result));
+ }
+
+ BIND(&two_one);
+ {
+ Node* const adjusted_subject_ptr = PointerToStringDataAtIndex(
+ subject_ptr, subject_offset, String::TWO_BYTE_ENCODING);
+ Node* const adjusted_search_ptr = PointerToStringDataAtIndex(
+ search_ptr, search_offset, String::ONE_BYTE_ENCODING);
+
+ Node* const result = CallSearchStringRaw<twobyte_t, onebyte_t>(
+ adjusted_subject_ptr, subject_length, adjusted_search_ptr,
+ search_length, start_position);
+ f_return(SmiTag(result));
+ }
+
+ BIND(&two_two);
{
- Node* needle_byte = var_needle_byte.value();
- Node* string_addr = var_string_addr.value();
- Node* search_length = IntPtrSub(string_length, start_position);
- // Call out to the highly optimized memchr to perform the actual byte
- // search.
- Node* memchr =
- ExternalConstant(ExternalReference::libc_memchr_function(isolate()));
- Node* result_address =
- CallCFunction3(MachineType::Pointer(), MachineType::Pointer(),
- MachineType::IntPtr(), MachineType::UintPtr(), memchr,
- string_addr, needle_byte, search_length);
- GotoIf(WordEqual(result_address, int_zero), &return_minus_1);
- Node* result_index =
- IntPtrAdd(IntPtrSub(result_address, string_addr), start_position);
- f_return(SmiTag(result_index));
+ Node* const adjusted_subject_ptr = PointerToStringDataAtIndex(
+ subject_ptr, subject_offset, String::TWO_BYTE_ENCODING);
+ Node* const adjusted_search_ptr = PointerToStringDataAtIndex(
+ search_ptr, search_offset, String::TWO_BYTE_ENCODING);
+
+ Node* const result = CallSearchStringRaw<twobyte_t, twobyte_t>(
+ adjusted_subject_ptr, subject_length, adjusted_search_ptr,
+ search_length, start_position);
+ f_return(SmiTag(result));
}
BIND(&return_minus_1);
@@ -810,7 +883,7 @@ void StringBuiltinsAssembler::StringIndexOf(
BIND(&zero_length_needle);
{
Comment("0-length search_string");
- f_return(SmiTag(IntPtrMin(string_length, start_position)));
+ f_return(SmiTag(IntPtrMin(subject_length, start_position)));
}
BIND(&call_runtime_unchecked);
@@ -819,7 +892,7 @@ void StringBuiltinsAssembler::StringIndexOf(
// are already known due to type checks in this stub.
Comment("Call Runtime Unchecked");
Node* result = CallRuntime(Runtime::kStringIndexOfUnchecked, SmiConstant(0),
- receiver, search_string, position);
+ subject_string, search_string, position);
f_return(result);
}
}
@@ -979,18 +1052,78 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol(
GotoIf(IsNullOrUndefined(object), &out);
// Fall back to a slow lookup of {object[symbol]}.
+ //
+ // The spec uses GetMethod({object}, {symbol}), which has a few quirks:
+ // * null values are turned into undefined, and
+ // * an exception is thrown if the value is not undefined, null, or callable.
+ // We handle the former by jumping to {out} for null values as well, while
+ // the latter is already handled by the Call({maybe_func}) operation.
Node* const maybe_func = GetProperty(context, object, symbol);
GotoIf(IsUndefined(maybe_func), &out);
+ GotoIf(IsNull(maybe_func), &out);
// Attempt to call the function.
-
Node* const result = generic_call(maybe_func);
Return(result);
BIND(&out);
}
+compiler::Node* StringBuiltinsAssembler::IndexOfDollarChar(Node* const context,
+ Node* const string) {
+ CSA_ASSERT(this, IsString(string));
+
+ Node* const dollar_string = HeapConstant(
+ isolate()->factory()->LookupSingleCharacterStringFromCode('$'));
+ Node* const dollar_ix = CallBuiltin(Builtins::kStringIndexOf, context, string,
+ dollar_string, SmiConstant(0));
+
+ CSA_ASSERT(this, TaggedIsSmi(dollar_ix));
+ return dollar_ix;
+}
+
+compiler::Node* StringBuiltinsAssembler::GetSubstitution(
+ Node* context, Node* subject_string, Node* match_start_index,
+ Node* match_end_index, Node* replace_string) {
+ CSA_ASSERT(this, IsString(subject_string));
+ CSA_ASSERT(this, IsString(replace_string));
+ CSA_ASSERT(this, TaggedIsPositiveSmi(match_start_index));
+ CSA_ASSERT(this, TaggedIsPositiveSmi(match_end_index));
+
+ VARIABLE(var_result, MachineRepresentation::kTagged, replace_string);
+ Label runtime(this), out(this);
+
+ // In this primitive implementation we simply look for the next '$' char in
+ // {replace_string}. If it doesn't exist, we can simply return
+ // {replace_string} itself. If it does, then we delegate to
+ // String::GetSubstitution, passing in the index of the first '$' to avoid
+ // repeated scanning work.
+ // TODO(jgruber): Possibly extend this in the future to handle more complex
+ // cases without runtime calls.
+
+ Node* const dollar_index = IndexOfDollarChar(context, replace_string);
+ Branch(SmiIsNegative(dollar_index), &out, &runtime);
+
+ BIND(&runtime);
+ {
+ CSA_ASSERT(this, TaggedIsPositiveSmi(dollar_index));
+
+ Callable substring_callable = CodeFactory::SubString(isolate());
+ Node* const matched = CallStub(substring_callable, context, subject_string,
+ match_start_index, match_end_index);
+ Node* const replacement_string =
+ CallRuntime(Runtime::kGetSubstitution, context, matched, subject_string,
+ match_start_index, replace_string, dollar_index);
+ var_result.Bind(replacement_string);
+
+ Goto(&out);
+ }
+
+ BIND(&out);
+ return var_result.value();
+}
+
// ES6 #sec-string.prototype.replace
TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
Label out(this);
@@ -1009,9 +1142,7 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
MaybeCallFunctionAtSymbol(
context, search, isolate()->factory()->replace_symbol(),
[=]() {
- Callable tostring_callable = CodeFactory::ToString(isolate());
- Node* const subject_string =
- CallStub(tostring_callable, context, receiver);
+ Node* const subject_string = ToString_Inline(context, receiver);
Callable replace_callable = CodeFactory::RegExpReplace(isolate());
return CallStub(replace_callable, context, search, subject_string,
@@ -1024,16 +1155,15 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
// Convert {receiver} and {search} to strings.
- Callable tostring_callable = CodeFactory::ToString(isolate());
Callable indexof_callable = CodeFactory::StringIndexOf(isolate());
- Node* const subject_string = CallStub(tostring_callable, context, receiver);
- Node* const search_string = CallStub(tostring_callable, context, search);
+ Node* const subject_string = ToString_Inline(context, receiver);
+ Node* const search_string = ToString_Inline(context, search);
Node* const subject_length = LoadStringLength(subject_string);
Node* const search_length = LoadStringLength(search_string);
- // Fast-path single-char {search}, long {receiver}, and simple string
+ // Fast-path single-char {search}, long cons {receiver}, and simple string
// {replace}.
{
Label next(this);
@@ -1043,11 +1173,10 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
GotoIf(TaggedIsSmi(replace), &next);
GotoIfNot(IsString(replace), &next);
- Node* const dollar_string = HeapConstant(
- isolate()->factory()->LookupSingleCharacterStringFromCode('$'));
- Node* const dollar_ix =
- CallStub(indexof_callable, context, replace, dollar_string, smi_zero);
- GotoIfNot(SmiIsNegative(dollar_ix), &next);
+ Node* const subject_instance_type = LoadInstanceType(subject_string);
+ GotoIfNot(IsConsStringInstanceType(subject_instance_type), &next);
+
+ GotoIf(TaggedIsPositiveSmi(IndexOfDollarChar(context, replace)), &next);
// Searching by traversing a cons string tree and replace with cons of
// slices works only when the replaced string is a single character, being
@@ -1083,7 +1212,7 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
// TODO(jgruber): Could introduce ToStringSideeffectsStub which only
// performs observable parts of ToString.
- CallStub(tostring_callable, context, replace);
+ ToString_Inline(context, replace);
Goto(&return_subject);
BIND(&return_subject);
@@ -1126,8 +1255,7 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
Node* const replacement =
CallJS(call_callable, context, replace, UndefinedConstant(),
search_string, match_start_index, subject_string);
- Node* const replacement_string =
- CallStub(tostring_callable, context, replacement);
+ Node* const replacement_string = ToString_Inline(context, replacement);
var_result.Bind(CallStub(stringadd_callable, context, var_result.value(),
replacement_string));
Goto(&out);
@@ -1135,16 +1263,12 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
BIND(&if_notcallablereplace);
{
- Node* const replace_string = CallStub(tostring_callable, context, replace);
-
- // TODO(jgruber): Simplified GetSubstitution implementation in CSA.
- Node* const matched = CallStub(substring_callable, context, subject_string,
- match_start_index, match_end_index);
- Node* const replacement_string =
- CallRuntime(Runtime::kGetSubstitution, context, matched, subject_string,
- match_start_index, replace_string);
- var_result.Bind(CallStub(stringadd_callable, context, var_result.value(),
- replacement_string));
+ Node* const replace_string = ToString_Inline(context, replace);
+ Node* const replacement =
+ GetSubstitution(context, subject_string, match_start_index,
+ match_end_index, replace_string);
+ var_result.Bind(
+ CallStub(stringadd_callable, context, var_result.value(), replacement));
Goto(&out);
}
@@ -1158,6 +1282,89 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
}
}
+// ES6 section 21.1.3.18 String.prototype.slice ( start, end )
+TF_BUILTIN(StringPrototypeSlice, StringBuiltinsAssembler) {
+ Label out(this);
+ VARIABLE(var_start, MachineRepresentation::kTagged);
+ VARIABLE(var_end, MachineRepresentation::kTagged);
+
+ const int kStart = 0;
+ const int kEnd = 1;
+ Node* argc =
+ ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
+ CodeStubArguments args(this, argc);
+ Node* const receiver = args.GetReceiver();
+ Node* const start =
+ args.GetOptionalArgumentValue(kStart, UndefinedConstant());
+ Node* const end = args.GetOptionalArgumentValue(kEnd, UndefinedConstant());
+ Node* const context = Parameter(BuiltinDescriptor::kContext);
+
+ Node* const smi_zero = SmiConstant(0);
+
+ // 1. Let O be ? RequireObjectCoercible(this value).
+ RequireObjectCoercible(context, receiver, "String.prototype.slice");
+
+ // 2. Let S be ? ToString(O).
+ Callable tostring_callable = CodeFactory::ToString(isolate());
+ Node* const subject_string = CallStub(tostring_callable, context, receiver);
+
+ // 3. Let len be the number of elements in S.
+ Node* const length = LoadStringLength(subject_string);
+
+ // Conversions and bounds-checks for {start}.
+ ConvertAndBoundsCheckStartArgument(context, &var_start, start, length);
+
+ // 5. If end is undefined, let intEnd be len;
+ var_end.Bind(length);
+ GotoIf(WordEqual(end, UndefinedConstant()), &out);
+
+ // else let intEnd be ? ToInteger(end).
+ Node* const end_int =
+ ToInteger(context, end, CodeStubAssembler::kTruncateMinusZero);
+
+ // 7. If intEnd < 0, let to be max(len + intEnd, 0);
+ // otherwise let to be min(intEnd, len).
+ Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
+ Branch(TaggedIsSmi(end_int), &if_issmi, &if_isheapnumber);
+
+ BIND(&if_issmi);
+ {
+ Node* const length_plus_end = SmiAdd(length, end_int);
+ var_end.Bind(Select(SmiLessThan(end_int, smi_zero),
+ [&] { return SmiMax(length_plus_end, smi_zero); },
+ [&] { return SmiMin(length, end_int); },
+ MachineRepresentation::kTagged));
+ Goto(&out);
+ }
+
+ BIND(&if_isheapnumber);
+ {
+ // If {end} is a heap number, it is definitely out of bounds. If it is
+ // negative, {int_end} = max({length} + {int_end}),0) = 0'. If it is
+ // positive, set {int_end} to {length} which ultimately results in
+ // returning an empty string.
+ Node* const float_zero = Float64Constant(0.);
+ Node* const end_float = LoadHeapNumberValue(end_int);
+ var_end.Bind(SelectTaggedConstant(Float64LessThan(end_float, float_zero),
+ smi_zero, length));
+ Goto(&out);
+ }
+
+ Label return_emptystring(this);
+ BIND(&out);
+ {
+ GotoIf(SmiLessThanOrEqual(var_end.value(), var_start.value()),
+ &return_emptystring);
+ Node* const result =
+ SubString(context, subject_string, var_start.value(), var_end.value(),
+ SubStringFlags::FROM_TO_ARE_BOUNDED);
+ args.PopAndReturn(result);
+ }
+
+ BIND(&return_emptystring);
+ args.PopAndReturn(EmptyStringConstant());
+}
+
// ES6 section 21.1.3.19 String.prototype.split ( separator, limit )
TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
Label out(this);
@@ -1176,9 +1383,7 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
MaybeCallFunctionAtSymbol(
context, separator, isolate()->factory()->split_symbol(),
[=]() {
- Callable tostring_callable = CodeFactory::ToString(isolate());
- Node* const subject_string =
- CallStub(tostring_callable, context, receiver);
+ Node* const subject_string = ToString_Inline(context, receiver);
Callable split_callable = CodeFactory::RegExpSplit(isolate());
return CallStub(split_callable, context, separator, subject_string,
@@ -1191,14 +1396,12 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
// String and integer conversions.
- Callable tostring_callable = CodeFactory::ToString(isolate());
- Node* const subject_string = CallStub(tostring_callable, context, receiver);
+ Node* const subject_string = ToString_Inline(context, receiver);
Node* const limit_number =
Select(IsUndefined(limit), [=]() { return NumberConstant(kMaxUInt32); },
[=]() { return ToUint32(context, limit); },
MachineRepresentation::kTagged);
- Node* const separator_string =
- CallStub(tostring_callable, context, separator);
+ Node* const separator_string = ToString_Inline(context, separator);
// Shortcut for {limit} == 0.
{
@@ -1259,8 +1462,8 @@ TF_BUILTIN(StringPrototypeSplit, StringBuiltinsAssembler) {
}
// ES6 #sec-string.prototype.substr
-TF_BUILTIN(StringPrototypeSubstr, CodeStubAssembler) {
- Label out(this), handle_length(this);
+TF_BUILTIN(StringPrototypeSubstr, StringBuiltinsAssembler) {
+ Label out(this);
VARIABLE(var_start, MachineRepresentation::kTagged);
VARIABLE(var_length, MachineRepresentation::kTagged);
@@ -1279,94 +1482,62 @@ TF_BUILTIN(StringPrototypeSubstr, CodeStubAssembler) {
Node* const string_length = LoadStringLength(string);
// Conversions and bounds-checks for {start}.
- {
- Node* const start_int =
- ToInteger(context, start, CodeStubAssembler::kTruncateMinusZero);
-
- Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
- Branch(TaggedIsSmi(start_int), &if_issmi, &if_isheapnumber);
-
- BIND(&if_issmi);
- {
- Node* const length_plus_start = SmiAdd(string_length, start_int);
- var_start.Bind(Select(SmiLessThan(start_int, zero),
- [&] { return SmiMax(length_plus_start, zero); },
- [&] { return start_int; },
- MachineRepresentation::kTagged));
- Goto(&handle_length);
- }
-
- BIND(&if_isheapnumber);
- {
- // If {start} is a heap number, it is definitely out of bounds. If it is
- // negative, {start} = max({string_length} + {start}),0) = 0'. If it is
- // positive, set {start} to {string_length} which ultimately results in
- // returning an empty string.
- Node* const float_zero = Float64Constant(0.);
- Node* const start_float = LoadHeapNumberValue(start_int);
- var_start.Bind(SelectTaggedConstant(
- Float64LessThan(start_float, float_zero), zero, string_length));
- Goto(&handle_length);
- }
- }
+ ConvertAndBoundsCheckStartArgument(context, &var_start, start, string_length);
// Conversions and bounds-checks for {length}.
- BIND(&handle_length);
+ Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
+
+ // Default to {string_length} if {length} is undefined.
{
- Label if_issmi(this), if_isheapnumber(this, Label::kDeferred);
+ Label if_isundefined(this, Label::kDeferred), if_isnotundefined(this);
+ Branch(WordEqual(length, UndefinedConstant()), &if_isundefined,
+ &if_isnotundefined);
- // Default to {string_length} if {length} is undefined.
- {
- Label if_isundefined(this, Label::kDeferred), if_isnotundefined(this);
- Branch(WordEqual(length, UndefinedConstant()), &if_isundefined,
- &if_isnotundefined);
+ BIND(&if_isundefined);
+ var_length.Bind(string_length);
+ Goto(&if_issmi);
- BIND(&if_isundefined);
- var_length.Bind(string_length);
- Goto(&if_issmi);
+ BIND(&if_isnotundefined);
+ var_length.Bind(
+ ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero));
+ }
- BIND(&if_isnotundefined);
- var_length.Bind(
- ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero));
- }
+ Branch(TaggedIsSmi(var_length.value()), &if_issmi, &if_isheapnumber);
- Branch(TaggedIsSmi(var_length.value()), &if_issmi, &if_isheapnumber);
+ // Set {length} to min(max({length}, 0), {string_length} - {start}
+ BIND(&if_issmi);
+ {
+ Node* const positive_length = SmiMax(var_length.value(), zero);
- // Set {length} to min(max({length}, 0), {string_length} - {start}
- BIND(&if_issmi);
- {
- Node* const positive_length = SmiMax(var_length.value(), zero);
+ Node* const minimal_length = SmiSub(string_length, var_start.value());
+ var_length.Bind(SmiMin(positive_length, minimal_length));
- Node* const minimal_length = SmiSub(string_length, var_start.value());
- var_length.Bind(SmiMin(positive_length, minimal_length));
+ GotoIfNot(SmiLessThanOrEqual(var_length.value(), zero), &out);
+ Return(EmptyStringConstant());
+ }
- GotoIfNot(SmiLessThanOrEqual(var_length.value(), zero), &out);
- Return(EmptyStringConstant());
- }
+ BIND(&if_isheapnumber);
+ {
+ // If {length} is a heap number, it is definitely out of bounds. There are
+ // two cases according to the spec: if it is negative, "" is returned; if
+ // it is positive, then length is set to {string_length} - {start}.
- BIND(&if_isheapnumber);
- {
- // If {length} is a heap number, it is definitely out of bounds. There are
- // two cases according to the spec: if it is negative, "" is returned; if
- // it is positive, then length is set to {string_length} - {start}.
+ CSA_ASSERT(this, IsHeapNumberMap(LoadMap(var_length.value())));
- CSA_ASSERT(this, IsHeapNumberMap(LoadMap(var_length.value())));
+ Label if_isnegative(this), if_ispositive(this);
+ Node* const float_zero = Float64Constant(0.);
+ Node* const length_float = LoadHeapNumberValue(var_length.value());
+ Branch(Float64LessThan(length_float, float_zero), &if_isnegative,
+ &if_ispositive);
- Label if_isnegative(this), if_ispositive(this);
- Node* const float_zero = Float64Constant(0.);
- Node* const length_float = LoadHeapNumberValue(var_length.value());
- Branch(Float64LessThan(length_float, float_zero), &if_isnegative,
- &if_ispositive);
+ BIND(&if_isnegative);
+ Return(EmptyStringConstant());
- BIND(&if_isnegative);
+ BIND(&if_ispositive);
+ {
+ var_length.Bind(SmiSub(string_length, var_start.value()));
+ GotoIfNot(SmiLessThanOrEqual(var_length.value(), zero), &out);
Return(EmptyStringConstant());
-
- BIND(&if_ispositive);
- {
- var_length.Bind(SmiSub(string_length, var_start.value()));
- GotoIfNot(SmiLessThanOrEqual(var_length.value(), zero), &out);
- Return(EmptyStringConstant());
- }
}
}
diff --git a/deps/v8/src/builtins/builtins-string-gen.h b/deps/v8/src/builtins/builtins-string-gen.h
new file mode 100644
index 0000000000..399f565e55
--- /dev/null
+++ b/deps/v8/src/builtins/builtins-string-gen.h
@@ -0,0 +1,95 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BUILTINS_BUILTINS_STRING_GEN_H_
+#define V8_BUILTINS_BUILTINS_STRING_GEN_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class StringBuiltinsAssembler : public CodeStubAssembler {
+ public:
+ explicit StringBuiltinsAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ // ES#sec-getsubstitution
+ Node* GetSubstitution(Node* context, Node* subject_string,
+ Node* match_start_index, Node* match_end_index,
+ Node* replace_string);
+ void StringEqual_Core(Node* context, Node* lhs, Node* lhs_instance_type,
+ Node* lhs_length, Node* rhs, Node* rhs_instance_type,
+ Label* if_equal, Label* if_not_equal,
+ Label* if_notbothdirectonebyte);
+
+ protected:
+ Node* DirectStringData(Node* string, Node* string_instance_type);
+
+ void DispatchOnStringEncodings(Node* const lhs_instance_type,
+ Node* const rhs_instance_type,
+ Label* if_one_one, Label* if_one_two,
+ Label* if_two_one, Label* if_two_two);
+
+ template <typename SubjectChar, typename PatternChar>
+ Node* CallSearchStringRaw(Node* const subject_ptr, Node* const subject_length,
+ Node* const search_ptr, Node* const search_length,
+ Node* const start_position);
+
+ Node* PointerToStringDataAtIndex(Node* const string_data, Node* const index,
+ String::Encoding encoding);
+
+ // substr and slice have a common way of handling the {start} argument.
+ void ConvertAndBoundsCheckStartArgument(Node* context, Variable* var_start,
+ Node* start, Node* string_length);
+
+ void GenerateStringEqual(Node* context, Node* left, Node* right);
+ void GenerateStringRelationalComparison(Node* context, Node* left,
+ Node* right,
+ RelationalComparisonMode mode);
+
+ Node* ToSmiBetweenZeroAnd(Node* context, Node* value, Node* limit);
+
+ Node* LoadSurrogatePairAt(Node* string, Node* length, Node* index,
+ UnicodeEncoding encoding);
+
+ void StringIndexOf(Node* const subject_string,
+ Node* const subject_instance_type,
+ Node* const search_string,
+ Node* const search_instance_type, Node* const position,
+ std::function<void(Node*)> f_return);
+
+ Node* IndexOfDollarChar(Node* const context, Node* const string);
+
+ Node* IsNullOrUndefined(Node* const value);
+ void RequireObjectCoercible(Node* const context, Node* const value,
+ const char* method_name);
+
+ Node* SmiIsNegative(Node* const value) {
+ return SmiLessThan(value, SmiConstant(0));
+ }
+
+ // Implements boilerplate logic for {match, split, replace, search} of the
+ // form:
+ //
+ // if (!IS_NULL_OR_UNDEFINED(object)) {
+ // var maybe_function = object[symbol];
+ // if (!IS_UNDEFINED(maybe_function)) {
+ // return %_Call(maybe_function, ...);
+ // }
+ // }
+ //
+ // Contains fast paths for Smi and RegExp objects.
+ typedef std::function<Node*()> NodeFunction0;
+ typedef std::function<Node*(Node* fn)> NodeFunction1;
+ void MaybeCallFunctionAtSymbol(Node* const context, Node* const object,
+ Handle<Symbol> symbol,
+ const NodeFunction0& regexp_call,
+ const NodeFunction1& generic_call);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BUILTINS_BUILTINS_STRING_GEN_H_
diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc
index f064f0bf9e..a6b1d02fa9 100644
--- a/deps/v8/src/builtins/builtins-string.cc
+++ b/deps/v8/src/builtins/builtins-string.cc
@@ -217,7 +217,7 @@ BUILTIN(StringPrototypeLastIndexOf) {
//
// This function is implementation specific. For now, we do not
// do anything locale specific.
-// If internationalization is enabled, then i18n.js will override this function
+// If internationalization is enabled, then intl.js will override this function
// and provide the proper functionality, so this is just a fallback.
BUILTIN(StringPrototypeLocaleCompare) {
HandleScope handle_scope(isolate);
@@ -264,11 +264,11 @@ BUILTIN(StringPrototypeLocaleCompare) {
return Smi::FromInt(str1_length - str2_length);
}
-#ifndef V8_I18N_SUPPORT
+#ifndef V8_INTL_SUPPORT
// ES6 section 21.1.3.12 String.prototype.normalize ( [form] )
//
// Simply checks the argument is valid and returns the string itself.
-// If internationalization is enabled, then i18n.js will override this function
+// If internationalization is enabled, then intl.js will override this function
// and provide the proper functionality, so this is just a fallback.
BUILTIN(StringPrototypeNormalize) {
HandleScope handle_scope(isolate);
@@ -298,7 +298,7 @@ BUILTIN(StringPrototypeNormalize) {
return *string;
}
-#endif // !V8_I18N_SUPPORT
+#endif // !V8_INTL_SUPPORT
BUILTIN(StringPrototypeStartsWith) {
HandleScope handle_scope(isolate);
diff --git a/deps/v8/src/builtins/builtins-typedarray-gen.cc b/deps/v8/src/builtins/builtins-typedarray-gen.cc
index c066a5e935..870be3b216 100644
--- a/deps/v8/src/builtins/builtins-typedarray-gen.cc
+++ b/deps/v8/src/builtins/builtins-typedarray-gen.cc
@@ -25,25 +25,20 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
const char* method_name,
IterationKind iteration_kind);
- void LoadMapAndElementsSize(Node* const array, Variable* typed_map,
- Variable* size);
-
- Node* CalculateExternalPointer(Node* const backing_store,
- Node* const byte_offset);
- void DoInitialize(Node* const holder, Node* length, Node* const maybe_buffer,
- Node* const byte_offset, Node* byte_length,
- Node* const initialize, Node* const context);
- void InitializeBasedOnLength(Node* const holder, Node* const length,
- Node* const element_size,
- Node* const byte_offset, Node* const initialize,
- Node* const context);
+ void SetupTypedArray(Node* holder, Node* length, Node* byte_offset,
+ Node* byte_length);
+ void AttachBuffer(Node* holder, Node* buffer, Node* map, Node* length,
+ Node* byte_offset);
+
+ Node* LoadMapForType(Node* array);
+ Node* CalculateExternalPointer(Node* backing_store, Node* byte_offset);
Node* LoadDataPtr(Node* typed_array);
Node* ByteLengthIsValid(Node* byte_length);
};
-void TypedArrayBuiltinsAssembler::LoadMapAndElementsSize(Node* const array,
- Variable* typed_map,
- Variable* size) {
+compiler::Node* TypedArrayBuiltinsAssembler::LoadMapForType(Node* array) {
+ CSA_ASSERT(this, IsJSTypedArray(array));
+
Label unreachable(this), done(this);
Label uint8_elements(this), uint8_clamped_elements(this), int8_elements(this),
uint16_elements(this), int16_elements(this), uint32_elements(this),
@@ -62,21 +57,21 @@ void TypedArrayBuiltinsAssembler::LoadMapAndElementsSize(Node* const array,
DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
+ VARIABLE(var_typed_map, MachineRepresentation::kTagged);
+
Node* array_map = LoadMap(array);
Node* elements_kind = LoadMapElementsKind(array_map);
Switch(elements_kind, &unreachable, elements_kinds, elements_kind_labels,
kTypedElementsKindCount);
for (int i = 0; i < static_cast<int>(kTypedElementsKindCount); i++) {
- Bind(elements_kind_labels[i]);
+ BIND(elements_kind_labels[i]);
{
ElementsKind kind = static_cast<ElementsKind>(elements_kinds[i]);
ExternalArrayType type =
isolate()->factory()->GetArrayTypeFromElementsKind(kind);
Handle<Map> map(isolate()->heap()->MapForFixedTypedArray(type));
- typed_map->Bind(HeapConstant(map));
- size->Bind(SmiConstant(static_cast<int>(
- isolate()->factory()->GetExternalArrayElementSize(type))));
+ var_typed_map.Bind(HeapConstant(map));
Goto(&done);
}
}
@@ -84,6 +79,7 @@ void TypedArrayBuiltinsAssembler::LoadMapAndElementsSize(Node* const array,
BIND(&unreachable);
{ Unreachable(); }
BIND(&done);
+ return var_typed_map.value();
}
// The byte_offset can be higher than Smi range, in which case to perform the
@@ -94,70 +90,131 @@ void TypedArrayBuiltinsAssembler::LoadMapAndElementsSize(Node* const array,
// bit platforms could theoretically have an offset up to 2^35 - 1, so we may
// need to convert the float heap number to an intptr.
compiler::Node* TypedArrayBuiltinsAssembler::CalculateExternalPointer(
- Node* const backing_store, Node* const byte_offset) {
+ Node* backing_store, Node* byte_offset) {
return IntPtrAdd(backing_store, ChangeNumberToIntPtr(byte_offset));
}
-void TypedArrayBuiltinsAssembler::DoInitialize(Node* const holder, Node* length,
- Node* const maybe_buffer,
- Node* const byte_offset,
- Node* byte_length,
- Node* const initialize,
- Node* const context) {
- static const int32_t fta_base_data_offset =
- FixedTypedArrayBase::kDataOffset - kHeapObjectTag;
+// Setup the TypedArray which is under construction.
+// - Set the length.
+// - Set the byte_offset.
+// - Set the byte_length.
+// - Set EmbedderFields to 0.
+void TypedArrayBuiltinsAssembler::SetupTypedArray(Node* holder, Node* length,
+ Node* byte_offset,
+ Node* byte_length) {
+ CSA_ASSERT(this, IsJSTypedArray(holder));
+ CSA_ASSERT(this, TaggedIsSmi(length));
+ CSA_ASSERT(this, IsNumber(byte_offset));
+ CSA_ASSERT(this, IsNumber(byte_length));
+
+ StoreObjectField(holder, JSTypedArray::kLengthOffset, length);
+ StoreObjectField(holder, JSArrayBufferView::kByteOffsetOffset, byte_offset);
+ StoreObjectField(holder, JSArrayBufferView::kByteLengthOffset, byte_length);
+ for (int offset = JSTypedArray::kSize;
+ offset < JSTypedArray::kSizeWithEmbedderFields; offset += kPointerSize) {
+ StoreObjectField(holder, offset, SmiConstant(Smi::kZero));
+ }
+}
- Label setup_holder(this), alloc_array_buffer(this), aligned(this),
- allocate_elements(this), attach_buffer(this), done(this);
- VARIABLE(fixed_typed_map, MachineRepresentation::kTagged);
- VARIABLE(element_size, MachineRepresentation::kTagged);
- VARIABLE(total_size, MachineType::PointerRepresentation());
+// Attach an off-heap buffer to a TypedArray.
+void TypedArrayBuiltinsAssembler::AttachBuffer(Node* holder, Node* buffer,
+ Node* map, Node* length,
+ Node* byte_offset) {
+ CSA_ASSERT(this, IsJSTypedArray(holder));
+ CSA_ASSERT(this, IsJSArrayBuffer(buffer));
+ CSA_ASSERT(this, IsMap(map));
+ CSA_ASSERT(this, TaggedIsSmi(length));
+ CSA_ASSERT(this, IsNumber(byte_offset));
- // Make sure length is a Smi. The caller guarantees this is the case.
- length = ToInteger(context, length, CodeStubAssembler::kTruncateMinusZero);
+ StoreObjectField(holder, JSArrayBufferView::kBufferOffset, buffer);
+
+ Node* elements = Allocate(FixedTypedArrayBase::kHeaderSize);
+ StoreMapNoWriteBarrier(elements, map);
+ StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
+ StoreObjectFieldNoWriteBarrier(
+ elements, FixedTypedArrayBase::kBasePointerOffset, SmiConstant(0));
+
+ Node* backing_store = LoadObjectField(
+ buffer, JSArrayBuffer::kBackingStoreOffset, MachineType::Pointer());
+
+ Node* external_pointer = CalculateExternalPointer(backing_store, byte_offset);
+ StoreObjectFieldNoWriteBarrier(
+ elements, FixedTypedArrayBase::kExternalPointerOffset, external_pointer,
+ MachineType::PointerRepresentation());
+
+ StoreObjectField(holder, JSObject::kElementsOffset, elements);
+}
+
+TF_BUILTIN(TypedArrayInitializeWithBuffer, TypedArrayBuiltinsAssembler) {
+ Node* holder = Parameter(Descriptor::kHolder);
+ Node* length = Parameter(Descriptor::kLength);
+ Node* buffer = Parameter(Descriptor::kBuffer);
+ Node* element_size = Parameter(Descriptor::kElementSize);
+ Node* byte_offset = Parameter(Descriptor::kByteOffset);
+
+ CSA_ASSERT(this, IsJSTypedArray(holder));
CSA_ASSERT(this, TaggedIsSmi(length));
+ CSA_ASSERT(this, IsJSArrayBuffer(buffer));
+ CSA_ASSERT(this, TaggedIsSmi(element_size));
+ CSA_ASSERT(this, IsNumber(byte_offset));
+
+ Node* fixed_typed_map = LoadMapForType(holder);
- // byte_length can be -0, get rid of it.
- byte_length =
- ToInteger(context, byte_length, CodeStubAssembler::kTruncateMinusZero);
+ // SmiMul returns a heap number in case of Smi overflow.
+ Node* byte_length = SmiMul(length, element_size);
+ CSA_ASSERT(this, IsNumber(byte_length));
- GotoIfNot(IsNull(maybe_buffer), &setup_holder);
- // If the buffer is null, then we need a Smi byte_length. The caller
- // guarantees this is the case, because when byte_length >
- // TypedArrayMaxSizeInHeap, a buffer is allocated and passed in here.
- CSA_ASSERT(this, TaggedIsSmi(byte_length));
- Goto(&setup_holder);
+ SetupTypedArray(holder, length, byte_offset, byte_length);
+ AttachBuffer(holder, buffer, fixed_typed_map, length, byte_offset);
+ Return(UndefinedConstant());
+}
- BIND(&setup_holder);
- {
- LoadMapAndElementsSize(holder, &fixed_typed_map, &element_size);
- // Setup the holder (JSArrayBufferView).
- // - Set the length.
- // - Set the byte_offset.
- // - Set the byte_length.
- // - Set EmbedderFields to 0.
- StoreObjectField(holder, JSTypedArray::kLengthOffset, length);
- StoreObjectField(holder, JSArrayBufferView::kByteOffsetOffset, byte_offset);
- StoreObjectField(holder, JSArrayBufferView::kByteLengthOffset, byte_length);
- for (int offset = JSTypedArray::kSize;
- offset < JSTypedArray::kSizeWithEmbedderFields;
- offset += kPointerSize) {
- StoreObjectField(holder, offset, SmiConstant(Smi::kZero));
- }
+TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
+ Node* holder = Parameter(Descriptor::kHolder);
+ Node* length = Parameter(Descriptor::kLength);
+ Node* element_size = Parameter(Descriptor::kElementSize);
+ Node* initialize = Parameter(Descriptor::kInitialize);
+ Node* context = Parameter(Descriptor::kContext);
- Branch(IsNull(maybe_buffer), &alloc_array_buffer, &attach_buffer);
- }
+ CSA_ASSERT(this, IsJSTypedArray(holder));
+ CSA_ASSERT(this, TaggedIsPositiveSmi(length));
+ CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
+ CSA_ASSERT(this, IsBoolean(initialize));
+
+ Node* byte_offset = SmiConstant(0);
+
+ static const int32_t fta_base_data_offset =
+ FixedTypedArrayBase::kDataOffset - kHeapObjectTag;
+
+ Label setup_holder(this), allocate_on_heap(this), aligned(this),
+ allocate_elements(this), allocate_off_heap(this),
+ allocate_off_heap_no_init(this), attach_buffer(this), done(this);
+ VARIABLE(var_total_size, MachineType::PointerRepresentation());
+
+ // SmiMul returns a heap number in case of Smi overflow.
+ Node* byte_length = SmiMul(length, element_size);
+ CSA_ASSERT(this, IsNumber(byte_length));
+
+ SetupTypedArray(holder, length, byte_offset, byte_length);
- BIND(&alloc_array_buffer);
+ Node* fixed_typed_map = LoadMapForType(holder);
+ GotoIf(TaggedIsNotSmi(byte_length), &allocate_off_heap);
+ GotoIf(SmiGreaterThan(byte_length,
+ SmiConstant(FLAG_typed_array_max_size_in_heap)),
+ &allocate_off_heap);
+ Goto(&allocate_on_heap);
+
+ BIND(&allocate_on_heap);
{
+ CSA_ASSERT(this, TaggedIsPositiveSmi(byte_length));
// Allocate a new ArrayBuffer and initialize it with empty properties and
// elements.
- Node* const native_context = LoadNativeContext(context);
- Node* const map =
+ Node* native_context = LoadNativeContext(context);
+ Node* map =
LoadContextElement(native_context, Context::ARRAY_BUFFER_MAP_INDEX);
Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
- Node* const buffer = Allocate(JSArrayBuffer::kSizeWithEmbedderFields);
+ Node* buffer = Allocate(JSArrayBuffer::kSizeWithEmbedderFields);
StoreMapNoWriteBarrier(buffer, map);
StoreObjectFieldNoWriteBarrier(buffer, JSArray::kPropertiesOffset,
empty_fixed_array);
@@ -189,7 +246,7 @@ void TypedArrayBuiltinsAssembler::DoInitialize(Node* const holder, Node* length,
StoreObjectField(holder, JSArrayBufferView::kBufferOffset, buffer);
// Check the alignment.
- GotoIf(SmiEqual(SmiMod(element_size.value(), SmiConstant(kObjectAlignment)),
+ GotoIf(SmiEqual(SmiMod(element_size, SmiConstant(kObjectAlignment)),
SmiConstant(0)),
&aligned);
@@ -198,14 +255,14 @@ void TypedArrayBuiltinsAssembler::DoInitialize(Node* const holder, Node* length,
Node* aligned_header_size =
IntPtrConstant(FixedTypedArrayBase::kHeaderSize + kObjectAlignmentMask);
Node* size = IntPtrAdd(SmiToWord(byte_length), aligned_header_size);
- total_size.Bind(WordAnd(size, IntPtrConstant(~kObjectAlignmentMask)));
+ var_total_size.Bind(WordAnd(size, IntPtrConstant(~kObjectAlignmentMask)));
Goto(&allocate_elements);
}
BIND(&aligned);
{
Node* header_size = IntPtrConstant(FixedTypedArrayBase::kHeaderSize);
- total_size.Bind(IntPtrAdd(SmiToWord(byte_length), header_size));
+ var_total_size.Bind(IntPtrAdd(SmiToWord(byte_length), header_size));
Goto(&allocate_elements);
}
@@ -213,7 +270,7 @@ void TypedArrayBuiltinsAssembler::DoInitialize(Node* const holder, Node* length,
{
// Allocate a FixedTypedArray and set the length, base pointer and external
// pointer.
- CSA_ASSERT(this, IsRegularHeapObjectSize(total_size.value()));
+ CSA_ASSERT(this, IsRegularHeapObjectSize(var_total_size.value()));
Node* elements;
int heap_alignment =
@@ -221,12 +278,12 @@ void TypedArrayBuiltinsAssembler::DoInitialize(Node* const holder, Node* length,
if (UnalignedLoadSupported(MachineType::Float64(), heap_alignment) &&
UnalignedStoreSupported(MachineType::Float64(), heap_alignment)) {
- elements = AllocateInNewSpace(total_size.value());
+ elements = AllocateInNewSpace(var_total_size.value());
} else {
- elements = AllocateInNewSpace(total_size.value(), kDoubleAlignment);
+ elements = AllocateInNewSpace(var_total_size.value(), kDoubleAlignment);
}
- StoreMapNoWriteBarrier(elements, fixed_typed_map.value());
+ StoreMapNoWriteBarrier(elements, fixed_typed_map);
StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
StoreObjectFieldNoWriteBarrier(
elements, FixedTypedArrayBase::kBasePointerOffset, elements);
@@ -250,102 +307,50 @@ void TypedArrayBuiltinsAssembler::DoInitialize(Node* const holder, Node* length,
Goto(&done);
}
- BIND(&attach_buffer);
- {
- StoreObjectField(holder, JSArrayBufferView::kBufferOffset, maybe_buffer);
-
- Node* elements = Allocate(FixedTypedArrayBase::kHeaderSize);
- StoreMapNoWriteBarrier(elements, fixed_typed_map.value());
- StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset, length);
- StoreObjectFieldNoWriteBarrier(
- elements, FixedTypedArrayBase::kBasePointerOffset, SmiConstant(0));
-
- Node* backing_store =
- LoadObjectField(maybe_buffer, JSArrayBuffer::kBackingStoreOffset,
- MachineType::Pointer());
-
- Node* external_pointer =
- CalculateExternalPointer(backing_store, byte_offset);
- StoreObjectFieldNoWriteBarrier(
- elements, FixedTypedArrayBase::kExternalPointerOffset, external_pointer,
- MachineType::PointerRepresentation());
-
- StoreObjectField(holder, JSObject::kElementsOffset, elements);
- Goto(&done);
- }
-
- BIND(&done);
-}
-
-TF_BUILTIN(TypedArrayInitialize, TypedArrayBuiltinsAssembler) {
- Node* const holder = Parameter(Descriptor::kHolder);
- Node* length = Parameter(Descriptor::kLength);
- Node* const maybe_buffer = Parameter(Descriptor::kBuffer);
- Node* const byte_offset = Parameter(Descriptor::kByteOffset);
- Node* byte_length = Parameter(Descriptor::kByteLength);
- Node* const initialize = Parameter(Descriptor::kInitialize);
- Node* const context = Parameter(Descriptor::kContext);
-
- DoInitialize(holder, length, maybe_buffer, byte_offset, byte_length,
- initialize, context);
- Return(UndefinedConstant());
-}
-
-// Small buffers with byte_length <= typed_array_max_size_in_heap are allocated
-// on the heap, but larger buffer must be externally allocated with the
-// ArrayBuffer constructor. This helper allocates the buffer externally if
-// necessary, and then calls into DoInitialize, which will allocate small
-// on-heap buffers.
-void TypedArrayBuiltinsAssembler::InitializeBasedOnLength(
- Node* const holder, Node* const length, Node* const element_size,
- Node* const byte_offset, Node* const initialize, Node* const context) {
- Label allocate_buffer(this), allocate_buffer_noinit(this), do_init(this);
+ VARIABLE(var_buffer, MachineRepresentation::kTagged);
- VARIABLE(maybe_buffer, MachineRepresentation::kTagged, NullConstant());
-
- // SmiMul returns a heap number in case of Smi overflow.
- Node* byte_length = SmiMul(length, element_size);
- GotoIf(TaggedIsNotSmi(byte_length), &allocate_buffer);
- GotoIf(SmiLessThanOrEqual(byte_length,
- SmiConstant(FLAG_typed_array_max_size_in_heap)),
- &do_init);
- Branch(IsTrue(initialize), &allocate_buffer, &allocate_buffer_noinit);
-
- BIND(&allocate_buffer);
+ BIND(&allocate_off_heap);
{
- Node* const buffer_constructor = LoadContextElement(
+ GotoIf(IsFalse(initialize), &allocate_off_heap_no_init);
+
+ Node* buffer_constructor = LoadContextElement(
LoadNativeContext(context), Context::ARRAY_BUFFER_FUN_INDEX);
- maybe_buffer.Bind(ConstructJS(CodeFactory::Construct(isolate()), context,
- buffer_constructor, byte_length));
- Goto(&do_init);
+ var_buffer.Bind(ConstructJS(CodeFactory::Construct(isolate()), context,
+ buffer_constructor, byte_length));
+ Goto(&attach_buffer);
}
- Bind(&allocate_buffer_noinit);
+ BIND(&allocate_off_heap_no_init);
{
- Node* const buffer_constructor_noinit = LoadContextElement(
+ Node* buffer_constructor_noinit = LoadContextElement(
LoadNativeContext(context), Context::ARRAY_BUFFER_NOINIT_FUN_INDEX);
- maybe_buffer.Bind(CallJS(CodeFactory::Call(isolate()), context,
- buffer_constructor_noinit, UndefinedConstant(),
- byte_length));
- Goto(&do_init);
+ var_buffer.Bind(CallJS(CodeFactory::Call(isolate()), context,
+ buffer_constructor_noinit, UndefinedConstant(),
+ byte_length));
+ Goto(&attach_buffer);
}
- Bind(&do_init);
+ BIND(&attach_buffer);
{
- DoInitialize(holder, length, maybe_buffer.value(), byte_offset, byte_length,
- initialize, context);
+ AttachBuffer(holder, var_buffer.value(), fixed_typed_map, length,
+ byte_offset);
+ Goto(&done);
}
+
+ BIND(&done);
+ Return(UndefinedConstant());
}
// ES6 #sec-typedarray-length
TF_BUILTIN(TypedArrayConstructByLength, TypedArrayBuiltinsAssembler) {
- // We know that holder cannot be an object if this builtin was called.
Node* holder = Parameter(Descriptor::kHolder);
Node* length = Parameter(Descriptor::kLength);
Node* element_size = Parameter(Descriptor::kElementSize);
Node* context = Parameter(Descriptor::kContext);
- Node* byte_offset = SmiConstant(0);
+ CSA_ASSERT(this, IsJSTypedArray(holder));
+ CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
+
Node* initialize = BooleanConstant(true);
Label invalid_length(this);
@@ -357,8 +362,8 @@ TF_BUILTIN(TypedArrayConstructByLength, TypedArrayBuiltinsAssembler) {
GotoIf(TaggedIsNotSmi(length), &invalid_length);
GotoIf(SmiLessThan(length, SmiConstant(0)), &invalid_length);
- InitializeBasedOnLength(holder, length, element_size, byte_offset, initialize,
- context);
+ CallBuiltin(Builtins::kTypedArrayInitialize, context, holder, length,
+ element_size, initialize);
Return(UndefinedConstant());
BIND(&invalid_length);
@@ -371,14 +376,16 @@ TF_BUILTIN(TypedArrayConstructByLength, TypedArrayBuiltinsAssembler) {
// ES6 #sec-typedarray-buffer-byteoffset-length
TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
- Node* const holder = Parameter(Descriptor::kHolder);
- Node* const buffer = Parameter(Descriptor::kBuffer);
- Node* const byte_offset = Parameter(Descriptor::kByteOffset);
- Node* const length = Parameter(Descriptor::kLength);
- Node* const element_size = Parameter(Descriptor::kElementSize);
- CSA_ASSERT(this, TaggedIsSmi(element_size));
- Node* const context = Parameter(Descriptor::kContext);
- Node* const initialize = BooleanConstant(true);
+ Node* holder = Parameter(Descriptor::kHolder);
+ Node* buffer = Parameter(Descriptor::kBuffer);
+ Node* byte_offset = Parameter(Descriptor::kByteOffset);
+ Node* length = Parameter(Descriptor::kLength);
+ Node* element_size = Parameter(Descriptor::kElementSize);
+ Node* context = Parameter(Descriptor::kContext);
+
+ CSA_ASSERT(this, IsJSTypedArray(holder));
+ CSA_ASSERT(this, IsJSArrayBuffer(buffer));
+ CSA_ASSERT(this, TaggedIsPositiveSmi(element_size));
VARIABLE(new_byte_length, MachineRepresentation::kTagged, SmiConstant(0));
VARIABLE(offset, MachineRepresentation::kTagged, SmiConstant(0));
@@ -467,8 +474,8 @@ TF_BUILTIN(TypedArrayConstructByArrayBuffer, TypedArrayBuiltinsAssembler) {
// Force the result into a Smi, or throw a range error if it doesn't fit.
new_length = ToSmiIndex(new_length, context, &invalid_length);
- DoInitialize(holder, new_length, buffer, offset.value(),
- new_byte_length.value(), initialize, context);
+ CallBuiltin(Builtins::kTypedArrayInitializeWithBuffer, context, holder,
+ new_length, buffer, element_size, offset.value());
Return(UndefinedConstant());
}
@@ -543,14 +550,13 @@ compiler::Node* TypedArrayBuiltinsAssembler::ByteLengthIsValid(
}
TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) {
- Node* const holder = Parameter(Descriptor::kHolder);
- Node* const array_like = Parameter(Descriptor::kArrayLike);
+ Node* holder = Parameter(Descriptor::kHolder);
+ Node* array_like = Parameter(Descriptor::kArrayLike);
Node* initial_length = Parameter(Descriptor::kLength);
- Node* const element_size = Parameter(Descriptor::kElementSize);
+ Node* element_size = Parameter(Descriptor::kElementSize);
CSA_ASSERT(this, TaggedIsSmi(element_size));
- Node* const context = Parameter(Descriptor::kContext);
+ Node* context = Parameter(Descriptor::kContext);
- Node* byte_offset = SmiConstant(0);
Node* initialize = BooleanConstant(false);
Label invalid_length(this), fill(this), fast_copy(this);
@@ -558,8 +564,8 @@ TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) {
// The caller has looked up length on array_like, which is observable.
Node* length = ToSmiLength(initial_length, context, &invalid_length);
- InitializeBasedOnLength(holder, length, element_size, byte_offset, initialize,
- context);
+ CallBuiltin(Builtins::kTypedArrayInitialize, context, holder, length,
+ element_size, initialize);
GotoIf(SmiNotEqual(length, SmiConstant(0)), &fill);
Return(UndefinedConstant());
@@ -568,12 +574,9 @@ TF_BUILTIN(TypedArrayConstructByArrayLike, TypedArrayBuiltinsAssembler) {
Node* source_kind = LoadMapElementsKind(LoadMap(array_like));
GotoIf(Word32Equal(holder_kind, source_kind), &fast_copy);
- // Call to JS to copy the contents of the array in.
- Callable callable = CodeFactory::Call(isolate());
- Node* copy_array_contents = LoadContextElement(
- LoadNativeContext(context), Context::TYPED_ARRAY_SET_FROM_ARRAY_LIKE);
- CallJS(callable, context, copy_array_contents, UndefinedConstant(), holder,
- array_like, length, SmiConstant(0));
+ // Copy using the elements accessor.
+ CallRuntime(Runtime::kTypedArrayCopyElements, context, holder, array_like,
+ length);
Return(UndefinedConstant());
BIND(&fast_copy);
diff --git a/deps/v8/src/builtins/builtins-typedarray.cc b/deps/v8/src/builtins/builtins-typedarray.cc
index 73e739fcbd..773e5480ac 100644
--- a/deps/v8/src/builtins/builtins-typedarray.cc
+++ b/deps/v8/src/builtins/builtins-typedarray.cc
@@ -17,7 +17,8 @@ namespace internal {
// ES6 section 22.2.3.1 get %TypedArray%.prototype.buffer
BUILTIN(TypedArrayPrototypeBuffer) {
HandleScope scope(isolate);
- CHECK_RECEIVER(JSTypedArray, typed_array, "get TypedArray.prototype.buffer");
+ CHECK_RECEIVER(JSTypedArray, typed_array,
+ "get %TypedArray%.prototype.buffer");
return *typed_array->GetBuffer();
}
@@ -41,81 +42,14 @@ int64_t CapRelativeIndex(Handle<Object> num, int64_t minimum, int64_t maximum) {
: std::min<int64_t>(relative, maximum);
}
-// ES7 section 22.2.4.6 TypedArrayCreate ( constructor, argumentList )
-MaybeHandle<JSTypedArray> TypedArrayCreate(Isolate* isolate,
- Handle<JSFunction> default_ctor,
- int argc, Handle<Object>* argv,
- const char* method_name) {
- // 1. Let newTypedArray be ? Construct(constructor, argumentList).
- Handle<Object> new_obj;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, new_obj, Execution::New(default_ctor, argc, argv), JSTypedArray);
-
- // 2. Perform ? ValidateTypedArray(newTypedArray).
- Handle<JSTypedArray> new_array;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, new_array, JSTypedArray::Validate(isolate, new_obj, method_name),
- JSTypedArray);
-
- // 3. If argumentList is a List of a single Number, then
- // If newTypedArray.[[ArrayLength]] < size, throw a TypeError exception.
- DCHECK_IMPLIES(argc == 1, argv[0]->IsSmi());
- if (argc == 1 && new_array->length_value() < argv[0]->Number()) {
- const MessageTemplate::Template message =
- MessageTemplate::kTypedArrayTooShort;
- THROW_NEW_ERROR(isolate, NewTypeError(message), JSTypedArray);
- }
-
- // 4. Return newTypedArray.
- return new_array;
-}
-
-// ES7 section 22.2.4.7 TypedArraySpeciesCreate ( exemplar, argumentList )
-MaybeHandle<JSTypedArray> TypedArraySpeciesCreate(Isolate* isolate,
- Handle<JSTypedArray> exemplar,
- int argc,
- Handle<Object>* argv,
- const char* method_name) {
- // 1. Assert: exemplar is an Object that has a [[TypedArrayName]] internal
- // slot.
- DCHECK(exemplar->IsJSTypedArray());
-
- // 2. Let defaultConstructor be the intrinsic object listed in column one of
- // Table 51 for exemplar.[[TypedArrayName]].
- Handle<JSFunction> default_ctor = isolate->uint8_array_fun();
- switch (exemplar->type()) {
-#define TYPED_ARRAY_CTOR(Type, type, TYPE, ctype, size) \
- case kExternal##Type##Array: { \
- default_ctor = isolate->type##_array_fun(); \
- break; \
- }
-
- TYPED_ARRAYS(TYPED_ARRAY_CTOR)
-#undef TYPED_ARRAY_CTOR
- default:
- UNREACHABLE();
- }
-
- // 3. Let constructor be ? SpeciesConstructor(exemplar, defaultConstructor).
- Handle<Object> ctor;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, ctor,
- Object::SpeciesConstructor(isolate, exemplar, default_ctor),
- JSTypedArray);
-
- // 4. Return ? TypedArrayCreate(constructor, argumentList).
- return TypedArrayCreate(isolate, Handle<JSFunction>::cast(ctor), argc, argv,
- method_name);
-}
-
MaybeHandle<JSTypedArray> TypedArraySpeciesCreateByLength(
Isolate* isolate, Handle<JSTypedArray> exemplar, const char* method_name,
int64_t length) {
const int argc = 1;
ScopedVector<Handle<Object>> argv(argc);
argv[0] = isolate->factory()->NewNumberFromInt64(length);
- return TypedArraySpeciesCreate(isolate, exemplar, argc, argv.start(),
- method_name);
+ return JSTypedArray::SpeciesCreate(isolate, exemplar, argc, argv.start(),
+ method_name);
}
} // namespace
@@ -128,8 +62,6 @@ BUILTIN(TypedArrayPrototypeCopyWithin) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
- if (V8_UNLIKELY(array->WasNeutered())) return *array;
-
int64_t len = array->length_value();
int64_t to = 0;
int64_t from = 0;
@@ -193,8 +125,6 @@ BUILTIN(TypedArrayPrototypeFill) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
- if (V8_UNLIKELY(array->WasNeutered())) return *array;
-
Handle<Object> obj_value = args.atOrUndefined(isolate, 1);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, obj_value, Object::ToNumber(obj_value));
@@ -342,8 +272,6 @@ BUILTIN(TypedArrayPrototypeReverse) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, array, JSTypedArray::Validate(isolate, args.receiver(), method));
- if (V8_UNLIKELY(array->WasNeutered())) return *array;
-
ElementsAccessor* elements = array->GetElementsAccessor();
elements->Reverse(*array);
return *array;
diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc
index 8fb4844e37..4d5e83a9e0 100644
--- a/deps/v8/src/builtins/builtins.cc
+++ b/deps/v8/src/builtins/builtins.cc
@@ -10,6 +10,7 @@
#include "src/isolate.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
+#include "src/visitors.h"
namespace v8 {
namespace internal {
@@ -27,8 +28,9 @@ Builtins::~Builtins() {}
void Builtins::TearDown() { initialized_ = false; }
-void Builtins::IterateBuiltins(ObjectVisitor* v) {
- v->VisitPointers(&builtins_[0], &builtins_[0] + builtin_count);
+void Builtins::IterateBuiltins(RootVisitor* v) {
+ v->VisitRootPointers(Root::kBuiltins, &builtins_[0],
+ &builtins_[0] + builtin_count);
}
const char* Builtins::Lookup(byte* pc) {
@@ -67,28 +69,6 @@ Handle<Code> Builtins::NewCloneShallowArray(
return Handle<Code>::null();
}
-Handle<Code> Builtins::NewCloneShallowObject(int length) {
- switch (length) {
- case 0:
- return FastCloneShallowObject0();
- case 1:
- return FastCloneShallowObject1();
- case 2:
- return FastCloneShallowObject2();
- case 3:
- return FastCloneShallowObject3();
- case 4:
- return FastCloneShallowObject4();
- case 5:
- return FastCloneShallowObject5();
- case 6:
- return FastCloneShallowObject6();
- default:
- UNREACHABLE();
- }
- return Handle<Code>::null();
-}
-
Handle<Code> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
switch (hint) {
case ToPrimitiveHint::kDefault:
@@ -114,21 +94,46 @@ Handle<Code> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
}
// static
+int Builtins::GetBuiltinParameterCount(Name name) {
+ switch (name) {
+#define TFJ_CASE(Name, ParamCount, ...) \
+ case k##Name: { \
+ return ParamCount; \
+ }
+ BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, TFJ_CASE, IGNORE_BUILTIN,
+ IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN)
+#undef TFJ_CASE
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+
+// static
Callable Builtins::CallableFor(Isolate* isolate, Name name) {
+ Handle<Code> code(
+ reinterpret_cast<Code**>(isolate->builtins()->builtin_address(name)));
+ CallDescriptors::Key key;
switch (name) {
-#define CASE(Name, ...) \
- case k##Name: { \
- Handle<Code> code(Code::cast(isolate->builtins()->builtins_[name])); \
- auto descriptor = Builtin_##Name##_InterfaceDescriptor(isolate); \
- return Callable(code, descriptor); \
+// This macro is deliberately crafted so as to emit very little code,
+// in order to keep binary size of this function under control.
+#define CASE(Name, ...) \
+ case k##Name: { \
+ key = Builtin_##Name##_InterfaceDescriptor::key(); \
+ break; \
}
BUILTIN_LIST(IGNORE_BUILTIN, IGNORE_BUILTIN, IGNORE_BUILTIN, CASE, CASE,
CASE, IGNORE_BUILTIN, IGNORE_BUILTIN)
#undef CASE
+ case kConsoleAssert: {
+ return Callable(code, BuiltinDescriptor(isolate));
+ }
default:
UNREACHABLE();
return Callable(Handle<Code>::null(), VoidDescriptor(isolate));
}
+ CallInterfaceDescriptor descriptor(isolate, key);
+ return Callable(code, descriptor);
}
// static
@@ -222,6 +227,12 @@ bool Builtins::HasCppImplementation(int index) {
BUILTIN_LIST_ALL(DEFINE_BUILTIN_ACCESSOR)
#undef DEFINE_BUILTIN_ACCESSOR
+Handle<Code> Builtins::JSConstructStubGeneric() {
+ return FLAG_harmony_restrict_constructor_return
+ ? JSConstructStubGenericRestrictedReturn()
+ : JSConstructStubGenericUnrestrictedReturn();
+}
+
// static
bool Builtins::AllowDynamicFunction(Isolate* isolate, Handle<JSFunction> target,
Handle<JSObject> target_global_proxy) {
diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h
index 9da714bc0f..b5eebff73b 100644
--- a/deps/v8/src/builtins/builtins.h
+++ b/deps/v8/src/builtins/builtins.h
@@ -18,7 +18,7 @@ class Handle;
class Isolate;
// Forward declarations.
-class ObjectVisitor;
+class RootVisitor;
enum class InterpreterPushArgsMode : unsigned;
namespace compiler {
class CodeAssemblerState;
@@ -31,7 +31,7 @@ class Builtins {
void TearDown();
// Garbage collection support.
- void IterateBuiltins(ObjectVisitor* v);
+ void IterateBuiltins(RootVisitor* v);
// Disassembler support.
const char* Lookup(byte* pc);
@@ -64,7 +64,7 @@ class Builtins {
Handle<Code> InterpreterPushArgsThenConstruct(InterpreterPushArgsMode mode);
Handle<Code> NewFunctionContext(ScopeType scope_type);
Handle<Code> NewCloneShallowArray(AllocationSiteMode allocation_mode);
- Handle<Code> NewCloneShallowObject(int length);
+ Handle<Code> JSConstructStubGeneric();
Code* builtin(Name name) {
// Code::cast cannot be used here since we access builtins
@@ -76,6 +76,8 @@ class Builtins {
return reinterpret_cast<Address>(&builtins_[name]);
}
+ static int GetBuiltinParameterCount(Name name);
+
static Callable CallableFor(Isolate* isolate, Name name);
static const char* name(int index);
@@ -90,6 +92,12 @@ class Builtins {
bool is_initialized() const { return initialized_; }
+ // Used by SetupIsolateDelegate and Deserializer.
+ void MarkInitialized() {
+ DCHECK(!initialized_);
+ initialized_ = true;
+ }
+
MUST_USE_RESULT static MaybeHandle<Object> InvokeApiFunction(
Isolate* isolate, bool is_construct, Handle<HeapObject> function,
Handle<Object> receiver, int argc, Handle<Object> args[],
@@ -105,11 +113,6 @@ class Builtins {
private:
Builtins();
- // Used by SetupIsolateDelegate.
- void MarkInitialized() {
- DCHECK(!initialized_);
- initialized_ = true;
- }
static void Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode,
@@ -120,8 +123,8 @@ class Builtins {
static void Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
TailCallMode tail_call_mode);
- static void Generate_CallForwardVarargs(MacroAssembler* masm,
- Handle<Code> code);
+
+ static void Generate_ForwardVarargs(MacroAssembler* masm, Handle<Code> code);
static void Generate_InterpreterPushArgsThenCallImpl(
MacroAssembler* masm, ConvertReceiverMode receiver_mode,
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 00e9e720ae..bcffedfef2 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -112,16 +112,12 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
namespace {
-void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
- Label post_instantiation_deopt_entry;
-
+void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax: number of arguments
- // -- esi: context
// -- edi: constructor function
// -- edx: new target
+ // -- esi: context
// -----------------------------------
// Enter a construct frame.
@@ -132,41 +128,10 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ SmiTag(eax);
__ push(esi);
__ push(eax);
-
- if (create_implicit_receiver) {
- // Allocate the new receiver object.
- __ Push(edi);
- __ Push(edx);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
- RelocInfo::CODE_TARGET);
- __ mov(ebx, eax);
- __ Pop(edx);
- __ Pop(edi);
-
- // ----------- S t a t e -------------
- // -- edi: constructor function
- // -- ebx: newly allocated object
- // -- edx: new target
- // -----------------------------------
-
- // Retrieve smi-tagged arguments count from the stack.
- __ mov(eax, Operand(esp, 0));
- }
-
__ SmiUntag(eax);
- if (create_implicit_receiver) {
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ push(ebx);
- __ push(ebx);
- } else {
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- }
-
- // Deoptimizer re-enters stub code here.
- __ bind(&post_instantiation_deopt_entry);
+ // The receiver for the builtin/api call.
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
// Set up pointer to last argument.
__ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
@@ -174,6 +139,16 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Copy arguments and receiver to the expression stack.
Label loop, entry;
__ mov(ecx, eax);
+ // ----------- S t a t e -------------
+ // -- eax: number of arguments (untagged)
+ // -- edi: constructor function
+ // -- edx: new target
+ // -- ebx: pointer to last argument
+ // -- ecx: counter
+ // -- sp[0*kPointerSize]: the hole (receiver)
+ // -- sp[1*kPointerSize]: number of arguments (tagged)
+ // -- sp[2*kPointerSize]: context
+ // -----------------------------------
__ jmp(&entry);
__ bind(&loop);
__ push(Operand(ebx, ecx, times_4, 0));
@@ -182,122 +157,223 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ j(greater_equal, &loop);
// Call the function.
+ // eax: number of arguments (untagged)
+ // edi: constructor function
+ // edx: new target
ParameterCount actual(eax);
__ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
CheckDebugStepCallWrapper());
- // Store offset of return address for deoptimizer.
- if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
- masm->pc_offset());
- }
-
// Restore context from the frame.
__ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
-
- if (create_implicit_receiver) {
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(eax, &use_receiver, Label::kNear);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
- __ j(above_equal, &exit, Label::kNear);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ mov(eax, Operand(esp, 0));
-
- // Restore the arguments count and leave the construct frame. The
- // arguments count is stored below the receiver.
- __ bind(&exit);
- __ mov(ebx, Operand(esp, 1 * kPointerSize));
- } else {
- __ mov(ebx, Operand(esp, 0));
- }
-
+ // Restore smi-tagged arguments count from the frame.
+ __ mov(ebx, Operand(ebp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
- // ES6 9.2.2. Step 13+
- // Check that the result is not a Smi, indicating that the constructor result
- // from a derived class is neither undefined nor an Object.
- if (check_derived_construct) {
- Label do_throw, dont_throw;
- __ JumpIfSmi(eax, &do_throw, Label::kNear);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
- __ j(above_equal, &dont_throw, Label::kNear);
- __ bind(&do_throw);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
- }
- __ bind(&dont_throw);
- }
-
// Remove caller arguments from the stack and return.
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ pop(ecx);
__ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
__ push(ecx);
- if (create_implicit_receiver) {
- __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
- }
__ ret(0);
+}
+
+// The construct stub for ES5 constructor functions and ES6 class constructors.
+void Generate_JSConstructStubGeneric(MacroAssembler* masm,
+ bool restrict_constructor_return) {
+ // ----------- S t a t e -------------
+ // -- eax: number of arguments (untagged)
+ // -- edi: constructor function
+ // -- edx: new target
+ // -- esi: context
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
- // Store offset of trampoline address for deoptimizer. This is the bailout
- // point after the receiver instantiation but before the function invocation.
- // We need to restore some registers in order to continue the above code.
- if (create_implicit_receiver && !is_api_function) {
+ // Enter a construct frame.
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
+
+ // Preserve the incoming parameters on the stack.
+ __ mov(ecx, eax);
+ __ SmiTag(ecx);
+ __ Push(esi);
+ __ Push(ecx);
+ __ Push(edi);
+ __ Push(edx);
+
+ // ----------- S t a t e -------------
+ // -- sp[0*kPointerSize]: new target
+ // -- edi and sp[1*kPointerSize]: constructor function
+ // -- sp[2*kPointerSize]: argument count
+ // -- sp[3*kPointerSize]: context
+ // -----------------------------------
+
+ __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(ebx, SharedFunctionInfo::kFunctionKindByteOffset),
+ Immediate(SharedFunctionInfo::kDerivedConstructorBitsWithinByte));
+ __ j(not_zero, &not_create_implicit_receiver);
+
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
+ __ jmp(&post_instantiation_deopt_entry, Label::kNear);
+
+ // Else: use TheHoleValue as receiver for constructor call
+ __ bind(&not_create_implicit_receiver);
+ __ LoadRoot(eax, Heap::kTheHoleValueRootIndex);
+
+ // ----------- S t a t e -------------
+ // -- eax: implicit receiver
+ // -- Slot 3 / sp[0*kPointerSize]: new target
+ // -- Slot 2 / sp[1*kPointerSize]: constructor function
+ // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[3*kPointerSize]: context
+ // -----------------------------------
+ // Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
masm->pc_offset());
+ __ bind(&post_instantiation_deopt_entry);
+
+ // Restore new target.
+ __ Pop(edx);
+
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ Push(eax);
+ __ Push(eax);
// ----------- S t a t e -------------
- // -- eax : newly allocated object
- // -- esp[0] : constructor function
+ // -- edx: new target
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
- __ pop(edi);
- __ push(eax);
- __ push(eax);
-
- // Retrieve smi-tagged arguments count from the stack.
+ // Restore constructor function and argument count.
+ __ mov(edi, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
__ mov(eax, Operand(ebp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(eax);
- // Retrieve the new target value from the stack. This was placed into the
- // frame description in place of the receiver by the optimizing compiler.
- __ mov(edx, Operand(ebp, eax, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
+ // Set up pointer to last argument.
+ __ lea(ebx, Operand(ebp, StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ mov(ecx, eax);
+ // ----------- S t a t e -------------
+ // -- eax: number of arguments (untagged)
+ // -- edx: new target
+ // -- ebx: pointer to last argument
+ // -- ecx: counter (tagged)
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- edi and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
+ __ jmp(&entry, Label::kNear);
+ __ bind(&loop);
+ __ Push(Operand(ebx, ecx, times_pointer_size, 0));
+ __ bind(&entry);
+ __ dec(ecx);
+ __ j(greater_equal, &loop);
+
+ // Call the function.
+ ParameterCount actual(eax);
+ __ InvokeFunction(edi, edx, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
+
+ // ----------- S t a t e -------------
+ // -- eax: constructor result
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: constructor function
+ // -- sp[2*kPointerSize]: number of arguments
+ // -- sp[3*kPointerSize]: context
+ // -----------------------------------
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
+
+ // Restore context from the frame.
+ __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, do_throw, other_result, leave_frame;
+
+ // If the result is undefined, we jump out to using the implicit receiver.
+ __ JumpIfRoot(eax, Heap::kUndefinedValueRootIndex, &use_receiver,
+ Label::kNear);
- // Continue with constructor function invocation.
- __ jmp(&post_instantiation_deopt_entry);
+ // Otherwise we do a smi check and fall through to check if the return value
+ // is a valid receiver.
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(eax, &other_result, Label::kNear);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(eax, FIRST_JS_RECEIVER_TYPE, ecx);
+ __ j(above_equal, &leave_frame, Label::kNear);
+
+ __ bind(&other_result);
+ // The result is now neither undefined nor an object.
+ if (restrict_constructor_return) {
+ // Throw if constructor function is a class constructor
+ __ mov(ebx, Operand(ebp, ConstructFrameConstants::kConstructorOffset));
+ __ mov(ebx, FieldOperand(ebx, JSFunction::kSharedFunctionInfoOffset));
+ __ test_b(FieldOperand(ebx, SharedFunctionInfo::kFunctionKindByteOffset),
+ Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ j(Condition::zero, &use_receiver, Label::kNear);
+ } else {
+ __ jmp(&use_receiver, Label::kNear);
+ }
+
+ __ bind(&do_throw);
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ mov(eax, Operand(esp, 0 * kPointerSize));
+ __ JumpIfRoot(eax, Heap::kTheHoleValueRootIndex, &do_throw);
+
+ __ bind(&leave_frame);
+ // Restore smi-tagged arguments count from the frame.
+ __ mov(ebx, Operand(ebp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
}
+ // Remove caller arguments from the stack and return.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ __ pop(ecx);
+ __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
+ __ push(ecx);
+ __ ret(0);
}
-
} // namespace
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, false);
+void Builtins::Generate_JSConstructStubGenericRestrictedReturn(
+ MacroAssembler* masm) {
+ return Generate_JSConstructStubGeneric(masm, true);
+}
+void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
+ MacroAssembler* masm) {
+ return Generate_JSConstructStubGeneric(masm, false);
}
-
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
+ Generate_JSBuiltinsConstructStubHelper(masm);
}
-
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, false);
-}
-
-void Builtins::Generate_JSBuiltinsConstructStubForDerived(
- MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, true);
+ Generate_JSBuiltinsConstructStubHelper(masm);
}
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
@@ -541,6 +617,37 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ jmp(&stepping_prepared);
}
+static void ReplaceClosureEntryWithOptimizedCode(
+ MacroAssembler* masm, Register optimized_code_entry, Register closure,
+ Register scratch1, Register scratch2, Register scratch3) {
+ Register native_context = scratch1;
+
+ // Store the optimized code in the closure.
+ __ lea(optimized_code_entry,
+ FieldOperand(optimized_code_entry, Code::kHeaderSize));
+ __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset),
+ optimized_code_entry);
+ __ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
+
+ // Link the closure into the optimized function list.
+ __ mov(native_context, NativeContextOperand());
+ __ mov(scratch3,
+ ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ mov(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), scratch3);
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch3,
+ scratch2, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ mov(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
+ closure);
+ // Save closure before the write barrier.
+ __ mov(scratch3, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, closure,
+ scratch2, kDontSaveFPRegs);
+ __ mov(closure, scratch3);
+}
+
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
Register scratch2) {
Register args_count = scratch1;
@@ -588,6 +695,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ push(edi); // Callee's JS function.
__ push(edx); // Callee's new target.
+ // First check if there is optimized code in the feedback vector which we
+ // could call instead.
+ Label switch_to_optimized_code;
+ Register optimized_code_entry = ecx;
+ __ mov(ebx, FieldOperand(edi, JSFunction::kFeedbackVectorOffset));
+ __ mov(ebx, FieldOperand(ebx, Cell::kValueOffset));
+ __ mov(optimized_code_entry,
+ FieldOperand(ebx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
+ __ mov(optimized_code_entry,
+ FieldOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
__ mov(eax, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
@@ -704,6 +824,31 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(FieldOperand(edi, JSFunction::kCodeEntryOffset), ecx);
__ RecordWriteCodeEntryField(edi, ecx, ebx);
__ jmp(ecx);
+
+ // If there is optimized code on the type feedback vector, check if it is good
+ // to run, and if so, self heal the closure and call the optimized code.
+ __ bind(&switch_to_optimized_code);
+ Label gotta_call_runtime;
+
+ // Check if the optimized code is marked for deopt.
+ __ test(FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
+ Immediate(1 << Code::kMarkedForDeoptimizationBit));
+ __ j(not_zero, &gotta_call_runtime);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ __ push(edx);
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, edi, edx,
+ eax, ebx);
+ __ pop(edx);
+ __ leave();
+ __ jmp(optimized_code_entry);
+
+ // Optimized code is marked for deopt, bailout to the CompileLazy runtime
+ // function which will clear the feedback vector's optimized code slot.
+ __ bind(&gotta_call_runtime);
+ __ leave();
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1081,9 +1226,8 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -- edi : target function (preserved for callee)
// -----------------------------------
// First lookup code, maybe we don't need to compile!
- Label gotta_call_runtime, gotta_call_runtime_no_stack;
+ Label gotta_call_runtime;
Label try_shared;
- Label loop_top, loop_bottom;
Register closure = edi;
Register new_target = edx;
@@ -1092,96 +1236,43 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// Do we have a valid feedback vector?
__ mov(ebx, FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
__ mov(ebx, FieldOperand(ebx, Cell::kValueOffset));
- __ JumpIfRoot(ebx, Heap::kUndefinedValueRootIndex,
- &gotta_call_runtime_no_stack);
-
- __ push(argument_count);
- __ push(new_target);
- __ push(closure);
-
- Register map = argument_count;
- Register index = ebx;
- __ mov(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ mov(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ mov(index, FieldOperand(map, FixedArray::kLengthOffset));
- __ cmp(index, Immediate(Smi::FromInt(2)));
- __ j(less, &try_shared);
-
- // edx : native context
- // ebx : length / index
- // eax : optimized code map
- // stack[0] : new target
- // stack[4] : closure
- Register native_context = edx;
- __ mov(native_context, NativeContextOperand());
-
- __ bind(&loop_top);
- Register temp = edi;
+ __ JumpIfRoot(ebx, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
- // Does the native context match?
- __ mov(temp, FieldOperand(map, index, times_half_pointer_size,
- SharedFunctionInfo::kOffsetToPreviousContext));
- __ mov(temp, FieldOperand(temp, WeakCell::kValueOffset));
- __ cmp(temp, native_context);
- __ j(not_equal, &loop_bottom);
-
- // Code available?
+ // Is optimized code available in the feedback vector?
Register entry = ecx;
- __ mov(entry, FieldOperand(map, index, times_half_pointer_size,
- SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ mov(entry,
+ FieldOperand(ebx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ mov(entry, FieldOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
- // Found code. Get it into the closure and return.
- __ pop(closure);
- // Store code entry in the closure.
- __ lea(entry, FieldOperand(entry, Code::kHeaderSize));
- __ mov(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
- __ RecordWriteCodeEntryField(closure, entry, eax);
+ // Found code, check if it is marked for deopt, if so call into runtime to
+ // clear the optimized code slot.
+ __ test(FieldOperand(entry, Code::kKindSpecificFlags1Offset),
+ Immediate(1 << Code::kMarkedForDeoptimizationBit));
+ __ j(not_zero, &gotta_call_runtime);
- // Link the closure into the optimized function list.
- // ecx : code entry
- // edx : native context
- // edi : closure
- __ mov(ebx,
- ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- __ mov(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), ebx);
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, ebx, eax,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- const int function_list_offset =
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
- __ mov(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
- closure);
- // Save closure before the write barrier.
- __ mov(ebx, closure);
- __ RecordWriteContextSlot(native_context, function_list_offset, closure, eax,
- kDontSaveFPRegs);
- __ mov(closure, ebx);
+ // Code is good, get it into the closure and tail call.
+ __ push(argument_count);
+ __ push(new_target);
+ ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, edx, eax, ebx);
__ pop(new_target);
__ pop(argument_count);
__ jmp(entry);
- __ bind(&loop_bottom);
- __ sub(index, Immediate(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ cmp(index, Immediate(Smi::FromInt(1)));
- __ j(greater, &loop_top);
-
- // We found no code.
+ // We found no optimized code.
__ bind(&try_shared);
- __ pop(closure);
- __ pop(new_target);
- __ pop(argument_count);
__ mov(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
__ test_b(FieldOperand(entry, SharedFunctionInfo::kMarkedForTierUpByteOffset),
Immediate(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
- __ j(not_zero, &gotta_call_runtime_no_stack);
+ __ j(not_zero, &gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
__ mov(entry, FieldOperand(entry, SharedFunctionInfo::kCodeOffset));
__ Move(ebx, masm->CodeObject());
__ cmp(entry, ebx);
- __ j(equal, &gotta_call_runtime_no_stack);
+ __ j(equal, &gotta_call_runtime);
// Install the SFI's code entry.
__ lea(entry, FieldOperand(entry, Code::kHeaderSize));
@@ -1190,10 +1281,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ jmp(entry);
__ bind(&gotta_call_runtime);
- __ pop(closure);
- __ pop(new_target);
- __ pop(argument_count);
- __ bind(&gotta_call_runtime_no_stack);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
@@ -2222,15 +2309,18 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- edi : the target to call (can be any Object)
- // -- ecx : start index (to support rest parameters)
- // -- esp[0] : return address.
- // -- esp[4] : thisArgument
+ // -- eax : the number of arguments (not including the receiver)
+ // -- edi : the target to call (can be any Object)
+ // -- edx : the new target (for [[Construct]] calls)
+ // -- ecx : start index (to support rest parameters)
// -----------------------------------
+ // Preserve new.target (in case of [[Construct]]).
+ __ movd(xmm0, edx);
+
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
@@ -2238,24 +2328,24 @@ void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &arguments_adaptor, Label::kNear);
{
- __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(eax, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
- __ mov(eax,
- FieldOperand(eax, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(edx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(edx, FieldOperand(edx, JSFunction::kSharedFunctionInfoOffset));
+ __ mov(edx,
+ FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(ebx, ebp);
}
__ jmp(&arguments_done, Label::kNear);
__ bind(&arguments_adaptor);
{
// Just load the length from the ArgumentsAdaptorFrame.
- __ mov(eax, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ mov(edx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
}
__ bind(&arguments_done);
- Label stack_empty, stack_done;
- __ SmiUntag(eax);
- __ sub(eax, ecx);
- __ j(less_equal, &stack_empty);
+ Label stack_done;
+ __ SmiUntag(edx);
+ __ sub(edx, ecx);
+ __ j(less_equal, &stack_done);
{
// Check for stack overflow.
{
@@ -2270,7 +2360,7 @@ void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
__ add(ecx, esp);
__ sar(ecx, kPointerSizeLog2);
// Check if the arguments will overflow the stack.
- __ cmp(ecx, eax);
+ __ cmp(ecx, edx);
__ j(greater, &done, Label::kNear); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
@@ -2279,25 +2369,23 @@ void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
// Forward the arguments from the caller frame.
{
Label loop;
- __ mov(ecx, eax);
- __ pop(edx);
+ __ add(eax, edx);
+ __ PopReturnAddressTo(ecx);
__ bind(&loop);
{
- __ Push(Operand(ebx, ecx, times_pointer_size, 1 * kPointerSize));
- __ dec(ecx);
+ __ Push(Operand(ebx, edx, times_pointer_size, 1 * kPointerSize));
+ __ dec(edx);
__ j(not_zero, &loop);
}
- __ push(edx);
+ __ PushReturnAddressFrom(ecx);
}
}
- __ jmp(&stack_done, Label::kNear);
- __ bind(&stack_empty);
- {
- // We just pass the receiver, which is already on the stack.
- __ Move(eax, Immediate(0));
- }
__ bind(&stack_done);
+ // Restore new.target (in case of [[Construct]]).
+ __ movd(edx, xmm0);
+
+ // Tail-call to the {code} handler.
__ Jump(code, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/builtins/mips/OWNERS b/deps/v8/src/builtins/mips/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/src/builtins/mips/OWNERS
+++ b/deps/v8/src/builtins/mips/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 2da3985606..24fe271cb3 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -396,8 +396,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
__ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
- __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ __ Jump(at, a2, Code::kHeaderSize - kHeapObjectTag);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
@@ -421,8 +420,7 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
__ SmiUntag(a0);
}
- __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ __ Jump(at, v0, Code::kHeaderSize - kHeapObjectTag);
}
void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
@@ -443,11 +441,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
namespace {
-void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
- Label post_instantiation_deopt_entry;
-
+void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
@@ -457,8 +451,6 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// -- sp[...]: constructor arguments
// -----------------------------------
- Isolate* isolate = masm->isolate();
-
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
@@ -466,186 +458,251 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Preserve the incoming parameters on the stack.
__ SmiTag(a0);
__ Push(cp, a0);
-
- if (create_implicit_receiver) {
- // Allocate the new receiver object.
- __ Push(a1, a3);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
- RelocInfo::CODE_TARGET);
- __ mov(t4, v0);
- __ Pop(a1, a3);
-
- // ----------- S t a t e -------------
- // -- a1: constructor function
- // -- a3: new target
- // -- t0: newly allocated object
- // -----------------------------------
-
- // Retrieve smi-tagged arguments count from the stack.
- __ lw(a0, MemOperand(sp));
- }
-
__ SmiUntag(a0);
- if (create_implicit_receiver) {
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ Push(t4, t4);
- } else {
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- }
-
- // Deoptimizer re-enters stub code here.
- __ bind(&post_instantiation_deopt_entry);
+ // The receiver for the builtin/api call.
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
// Set up pointer to last argument.
- __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ Addu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
- // a0: number of arguments
- // a1: constructor function
- // a2: address of last argument (caller sp)
- // a3: new target
- // t4: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: receiver
- // sp[2]: number of arguments (smi-tagged)
Label loop, entry;
- __ SmiTag(t4, a0);
+ __ mov(t3, a0);
+ // ----------- S t a t e -------------
+ // -- a0: number of arguments (untagged)
+ // -- a3: new target
+ // -- t2: pointer to last argument
+ // -- t3: counter
+ // -- sp[0*kPointerSize]: the hole (receiver)
+ // -- sp[1*kPointerSize]: number of arguments (tagged)
+ // -- sp[2*kPointerSize]: context
+ // -----------------------------------
__ jmp(&entry);
__ bind(&loop);
- __ Lsa(t0, a2, t4, kPointerSizeLog2 - kSmiTagSize);
+ __ Lsa(t0, t2, t3, kPointerSizeLog2);
__ lw(t1, MemOperand(t0));
__ push(t1);
__ bind(&entry);
- __ Addu(t4, t4, Operand(-2));
- __ Branch(&loop, greater_equal, t4, Operand(zero_reg));
+ __ Addu(t3, t3, Operand(-1));
+ __ Branch(&loop, greater_equal, t3, Operand(zero_reg));
// Call the function.
- // a0: number of arguments
+ // a0: number of arguments (untagged)
// a1: constructor function
// a3: new target
ParameterCount actual(a0);
__ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
CheckDebugStepCallWrapper());
- // Store offset of return address for deoptimizer.
- if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
- masm->pc_offset());
- }
-
// Restore context from the frame.
__ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
-
- if (create_implicit_receiver) {
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: number of arguments (smi-tagged)
- __ JumpIfSmi(v0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- __ GetObjectType(v0, a1, a3);
- __ Branch(&exit, greater_equal, a3, Operand(FIRST_JS_RECEIVER_TYPE));
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ lw(v0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: number of arguments (smi-tagged)
- __ lw(a1, MemOperand(sp, 1 * kPointerSize));
- } else {
- __ lw(a1, MemOperand(sp));
- }
-
+ // Restore smi-tagged arguments count from the frame.
+ __ lw(a1, MemOperand(sp));
// Leave construct frame.
}
- // ES6 9.2.2. Step 13+
- // Check that the result is not a Smi, indicating that the constructor result
- // from a derived class is neither undefined nor an Object.
- if (check_derived_construct) {
- Label do_throw, dont_throw;
- __ JumpIfSmi(v0, &do_throw);
- __ GetObjectType(v0, a3, a3);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ Branch(&dont_throw, greater_equal, a3, Operand(FIRST_JS_RECEIVER_TYPE));
- __ bind(&do_throw);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
- }
- __ bind(&dont_throw);
- }
-
+ // Remove caller arguments from the stack and return.
__ Lsa(sp, sp, a1, kPointerSizeLog2 - 1);
__ Addu(sp, sp, kPointerSize);
- if (create_implicit_receiver) {
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
- }
__ Ret();
+}
- // Store offset of trampoline address for deoptimizer. This is the bailout
- // point after the receiver instantiation but before the function invocation.
- // We need to restore some registers in order to continue the above code.
- if (create_implicit_receiver && !is_api_function) {
+// The construct stub for ES5 constructor functions and ES6 class constructors.
+void Generate_JSConstructStubGeneric(MacroAssembler* masm,
+ bool restrict_constructor_return) {
+ // ----------- S t a t e -------------
+ // -- a0: number of arguments (untagged)
+ // -- a1: constructor function
+ // -- a3: new target
+ // -- cp: context
+ // -- ra: return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Enter a construct frame.
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
+
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(a0);
+ __ Push(cp, a0, a1, a3);
+
+ // ----------- S t a t e -------------
+ // -- sp[0*kPointerSize]: new target
+ // -- a1 and sp[1*kPointerSize]: constructor function
+ // -- sp[2*kPointerSize]: number of arguments (tagged)
+ // -- sp[3*kPointerSize]: context
+ // -----------------------------------
+
+ __ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lbu(t2,
+ FieldMemOperand(t2, SharedFunctionInfo::kFunctionKindByteOffset));
+ __ And(t2, t2,
+ Operand(SharedFunctionInfo::kDerivedConstructorBitsWithinByte));
+ __ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
+
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
+ t2, t3);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
+ __ Branch(&post_instantiation_deopt_entry);
+
+ // Else: use TheHoleValue as receiver for constructor call
+ __ bind(&not_create_implicit_receiver);
+ __ LoadRoot(v0, Heap::kTheHoleValueRootIndex);
+
+ // ----------- S t a t e -------------
+ // -- v0: receiver
+ // -- Slot 3 / sp[0*kPointerSize]: new target
+ // -- Slot 2 / sp[1*kPointerSize]: constructor function
+ // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[3*kPointerSize]: context
+ // -----------------------------------
+ // Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
masm->pc_offset());
+ __ bind(&post_instantiation_deopt_entry);
+
+ // Restore new target.
+ __ Pop(a3);
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ Push(v0, v0);
// ----------- S t a t e -------------
- // -- a0 : newly allocated object
- // -- sp[0] : constructor function
+ // -- r3: new target
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
- __ Pop(a1);
- __ Push(a0, a0);
-
- // Retrieve smi-tagged arguments count from the stack.
+ // Restore constructor function and argument count.
+ __ lw(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ lw(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(a0);
- // Retrieve the new target value from the stack. This was placed into the
- // frame description in place of the receiver by the optimizing compiler.
- __ Addu(a3, fp, Operand(StandardFrameConstants::kCallerSPOffset));
- __ Lsa(a3, a3, a0, kPointerSizeLog2);
- __ lw(a3, MemOperand(a3));
+ // Set up pointer to last argument.
+ __ Addu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ mov(t3, a0);
+ // ----------- S t a t e -------------
+ // -- a0: number of arguments (untagged)
+ // -- a3: new target
+ // -- t2: pointer to last argument
+ // -- t3: counter
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- a1 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ Lsa(t0, t2, t3, kPointerSizeLog2);
+ __ lw(t1, MemOperand(t0));
+ __ push(t1);
+ __ bind(&entry);
+ __ Addu(t3, t3, Operand(-1));
+ __ Branch(&loop, greater_equal, t3, Operand(zero_reg));
- // Continue with constructor function invocation.
- __ jmp(&post_instantiation_deopt_entry);
+ // Call the function.
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
+
+ // ----------- S t a t e -------------
+ // -- v0: constructor result
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: constructor function
+ // -- sp[2*kPointerSize]: number of arguments
+ // -- sp[3*kPointerSize]: context
+ // -----------------------------------
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
+
+ // Restore the context from the frame.
+ __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, do_throw, other_result, leave_frame;
+
+ // If the result is undefined, we jump out to using the implicit receiver.
+ __ JumpIfRoot(v0, Heap::kUndefinedValueRootIndex, &use_receiver);
+
+ // Otherwise we do a smi check and fall through to check if the return value
+ // is a valid receiver.
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(v0, &other_result);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ GetObjectType(v0, t2, t2);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ Branch(&leave_frame, greater_equal, t2, Operand(FIRST_JS_RECEIVER_TYPE));
+
+ __ bind(&other_result);
+ // The result is now neither undefined nor an object.
+ if (restrict_constructor_return) {
+ // Throw if constructor function is a class constructor
+ __ lw(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lbu(t2,
+ FieldMemOperand(t2, SharedFunctionInfo::kFunctionKindByteOffset));
+ __ And(t2, t2,
+ Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ Branch(&use_receiver, eq, t2, Operand(zero_reg));
+ } else {
+ __ Branch(&use_receiver);
+ }
+
+ __ bind(&do_throw);
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ lw(v0, MemOperand(sp, 0 * kPointerSize));
+ __ JumpIfRoot(v0, Heap::kTheHoleValueRootIndex, &do_throw);
+
+ __ bind(&leave_frame);
+ // Restore smi-tagged arguments count from the frame.
+ __ lw(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
}
+ // Remove caller arguments from the stack and return.
+ __ Lsa(sp, sp, a1, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(sp, sp, kPointerSize);
+ __ Ret();
}
} // namespace
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, false);
+void Builtins::Generate_JSConstructStubGenericRestrictedReturn(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubGeneric(masm, true);
+}
+void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubGeneric(masm, false);
}
-
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
+ Generate_JSBuiltinsConstructStubHelper(masm);
}
-
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, false);
-}
-
-void Builtins::Generate_JSBuiltinsConstructStubForDerived(
- MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, true);
+ Generate_JSBuiltinsConstructStubHelper(masm);
}
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
@@ -899,6 +956,38 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
}
+static void ReplaceClosureEntryWithOptimizedCode(
+ MacroAssembler* masm, Register optimized_code_entry, Register closure,
+ Register scratch1, Register scratch2, Register scratch3) {
+ Register native_context = scratch1;
+
+ // Store code entry in the closure.
+ __ Addu(optimized_code_entry, optimized_code_entry,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ sw(optimized_code_entry,
+ FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
+
+ // Link the closure into the optimized function list.
+ __ lw(native_context, NativeContextMemOperand());
+ __ lw(scratch2,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ sw(scratch2,
+ FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
+ scratch3, kRAHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ sw(closure,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ // Save closure before the write barrier.
+ __ mov(scratch2, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, closure,
+ scratch3, kRAHasNotBeenSaved, kDontSaveFPRegs);
+ __ mov(closure, scratch2);
+}
+
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
Register args_count = scratch;
@@ -939,6 +1028,19 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(a1);
+ // First check if there is optimized code in the feedback vector which we
+ // could call instead.
+ Label switch_to_optimized_code;
+ Register optimized_code_entry = t0;
+ __ lw(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
+ __ lw(a0, FieldMemOperand(a0, Cell::kValueOffset));
+ __ lw(optimized_code_entry,
+ FieldMemOperand(a0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
+ __ lw(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
__ lw(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
@@ -1057,6 +1159,29 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ sw(t0, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(a1, t0, t1);
__ Jump(t0);
+
+ // If there is optimized code on the type feedback vector, check if it is good
+ // to run, and if so, self heal the closure and call the optimized code.
+ __ bind(&switch_to_optimized_code);
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ Label gotta_call_runtime;
+
+ // Check if the optimized code is marked for deopt.
+ __ lw(t1,
+ FieldMemOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset));
+ __ And(t1, t1, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg));
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, a1, t3, t1,
+ t2);
+ __ Jump(optimized_code_entry);
+
+ // Optimized code is marked for deopt, bailout to the CompileLazy runtime
+ // function which will clear the feedback vector's optimized code slot.
+ __ bind(&gotta_call_runtime);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1170,8 +1295,7 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// context at this point).
__ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
- __ Addu(at, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ __ Jump(at, t0, Code::kHeaderSize - kHeapObjectTag);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with a0, a1, and a3 unmodified.
__ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
@@ -1298,111 +1422,49 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -- a1 : target function (preserved for callee)
// -----------------------------------
// First lookup code, maybe we don't need to compile!
- Label gotta_call_runtime, gotta_call_runtime_no_stack;
+ Label gotta_call_runtime;
Label try_shared;
- Label loop_top, loop_bottom;
- Register argument_count = a0;
Register closure = a1;
- Register new_target = a3;
- Register map = a0;
Register index = a2;
// Do we have a valid feedback vector?
__ lw(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ lw(index, FieldMemOperand(index, Cell::kValueOffset));
- __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex,
- &gotta_call_runtime_no_stack);
-
- __ push(argument_count);
- __ push(new_target);
- __ push(closure);
-
- __ lw(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ lw(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ lw(index, FieldMemOperand(map, FixedArray::kLengthOffset));
- __ Branch(&try_shared, lt, index, Operand(Smi::FromInt(2)));
-
- // a3 : native context
- // a2 : length / index
- // a0 : optimized code map
- // stack[0] : new target
- // stack[4] : closure
- Register native_context = a3;
- __ lw(native_context, NativeContextMemOperand());
-
- __ bind(&loop_top);
- Register temp = a1;
- Register array_pointer = t1;
-
- // Does the native context match?
- __ sll(at, index, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(array_pointer, map, Operand(at));
- __ lw(temp, FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousContext));
- __ lw(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
- __ Branch(&loop_bottom, ne, temp, Operand(native_context));
+ __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
- // Code available?
+ // Is optimized code available in the feedback vector?
Register entry = t0;
- __ lw(entry,
- FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ lw(entry, FieldMemOperand(
+ index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ lw(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
- // Found code. Get it into the closure and return.
- __ pop(closure);
- // Store code entry in the closure.
- __ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ sw(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
- __ RecordWriteCodeEntryField(closure, entry, t1);
+ // Found code, check if it is marked for deopt, if so call into runtime to
+ // clear the optimized code slot.
+ __ lw(t1, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
+ __ And(t1, t1, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg));
- // Link the closure into the optimized function list.
- // t0 : code entry
- // a3 : native context
- // a1 : closure
- __ lw(t1,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- __ sw(t1, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, t1, a0,
- kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- const int function_list_offset =
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
- __ sw(closure,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- // Save closure before the write barrier.
- __ mov(t1, closure);
- __ RecordWriteContextSlot(native_context, function_list_offset, closure, a0,
- kRAHasNotBeenSaved, kDontSaveFPRegs);
- __ mov(closure, t1);
- __ pop(new_target);
- __ pop(argument_count);
+ // Code is good, get it into the closure and tail call.
+ ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, t3, t1, t2);
__ Jump(entry);
- __ bind(&loop_bottom);
- __ Subu(index, index,
- Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1)));
-
- // We found no code.
+ // We found no optimized code.
__ bind(&try_shared);
- __ pop(closure);
- __ pop(new_target);
- __ pop(argument_count);
__ lw(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
__ lbu(t1, FieldMemOperand(entry,
SharedFunctionInfo::kMarkedForTierUpByteOffset));
__ And(t1, t1,
Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
- __ Branch(&gotta_call_runtime_no_stack, ne, t1, Operand(zero_reg));
+ __ Branch(&gotta_call_runtime, ne, t1, Operand(zero_reg));
// If SFI points to anything other than CompileLazy, install that.
__ lw(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ Move(t1, masm->CodeObject());
- __ Branch(&gotta_call_runtime_no_stack, eq, entry, Operand(t1));
+ __ Branch(&gotta_call_runtime, eq, entry, Operand(t1));
// Install the SFI's code entry.
__ Addu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -1411,10 +1473,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ Jump(entry);
__ bind(&gotta_call_runtime);
- __ pop(closure);
- __ pop(new_target);
- __ pop(argument_count);
- __ bind(&gotta_call_runtime_no_stack);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
@@ -1551,8 +1609,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
__ PushStandardFrame(a1);
// Jump to point after the code-age stub.
- __ Addu(a0, a0, Operand(kNoCodeAgeSequenceLength));
- __ Jump(a0);
+ __ Jump(a0, kNoCodeAgeSequenceLength);
}
void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
@@ -2111,68 +2168,64 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- a1 : the target to call (can be any Object)
- // -- a2 : start index (to support rest parameters)
- // -- ra : return address.
- // -- sp[0] : thisArgument
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a3 : the new.target (for [[Construct]] calls)
+ // -- a1 : the target to call (can be any Object)
+ // -- a2 : start index (to support rest parameters)
// -----------------------------------
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
- __ lw(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ lw(a0, MemOperand(a3, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Branch(&arguments_adaptor, eq, a0,
+ __ lw(t3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ lw(t2, MemOperand(t3, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ Branch(&arguments_adaptor, eq, t2,
Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
{
- __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ lw(a0, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a0,
- FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(a3, fp);
+ __ lw(t2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ lw(t2, FieldMemOperand(t2, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(t2,
+ FieldMemOperand(t2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(t3, fp);
}
__ Branch(&arguments_done);
__ bind(&arguments_adaptor);
{
// Just get the length from the ArgumentsAdaptorFrame.
- __ lw(a0, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ lw(t2, MemOperand(t3, ArgumentsAdaptorFrameConstants::kLengthOffset));
}
__ bind(&arguments_done);
- Label stack_empty, stack_done, stack_overflow;
- __ SmiUntag(a0);
- __ Subu(a0, a0, a2);
- __ Branch(&stack_empty, le, a0, Operand(zero_reg));
+ Label stack_done, stack_overflow;
+ __ SmiUntag(t2);
+ __ Subu(t2, t2, a2);
+ __ Branch(&stack_done, le, t2, Operand(zero_reg));
{
// Check for stack overflow.
- Generate_StackOverflowCheck(masm, a0, t0, t1, &stack_overflow);
+ Generate_StackOverflowCheck(masm, t2, t0, t1, &stack_overflow);
// Forward the arguments from the caller frame.
{
Label loop;
- __ mov(a2, a0);
+ __ Addu(a0, a0, t2);
__ bind(&loop);
{
- __ Lsa(at, a3, a2, kPointerSizeLog2);
+ __ Lsa(at, t3, t2, kPointerSizeLog2);
__ lw(at, MemOperand(at, 1 * kPointerSize));
__ push(at);
- __ Subu(a2, a2, Operand(1));
- __ Branch(&loop, ne, a2, Operand(zero_reg));
+ __ Subu(t2, t2, Operand(1));
+ __ Branch(&loop, ne, t2, Operand(zero_reg));
}
}
}
__ Branch(&stack_done);
__ bind(&stack_overflow);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&stack_empty);
- {
- // We just pass the receiver, which is already on the stack.
- __ li(a0, Operand(0));
- }
__ bind(&stack_done);
+ // Tail-call to the {code} handler.
__ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -2471,8 +2524,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
__ li(at, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
masm->isolate())));
__ lw(at, MemOperand(at));
- __ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ __ Jump(at, Code::kHeaderSize - kHeapObjectTag);
}
// static
@@ -2696,8 +2748,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// context at this point).
__ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
- __ Addu(at, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ __ Jump(at, t0, Code::kHeaderSize - kHeapObjectTag);
}
// static
@@ -2787,8 +2838,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
__ li(at, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
__ lw(at, MemOperand(at));
- __ Addu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ __ Jump(at, Code::kHeaderSize - kHeapObjectTag);
}
// static
@@ -3060,8 +3110,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
__ MultiPop(gp_regs);
}
// Now jump to the instructions of the returned code object.
- __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ __ Jump(at, v0, Code::kHeaderSize - kHeapObjectTag);
}
#undef __
diff --git a/deps/v8/src/builtins/mips64/OWNERS b/deps/v8/src/builtins/mips64/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/src/builtins/mips64/OWNERS
+++ b/deps/v8/src/builtins/mips64/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index ae1edcae8c..4d80993952 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -32,7 +32,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
- __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ __ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// JumpToExternalReference expects a0 to contain the number of arguments
// including the receiver and the extra arguments.
@@ -74,7 +74,7 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin InternalArray functions should be maps.
- __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(a2, a4);
__ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, a4,
Operand(zero_reg));
@@ -103,7 +103,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
- __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(a2, a4);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction1, a4,
Operand(zero_reg));
@@ -139,7 +139,7 @@ void Builtins::Generate_NumberConstructor(MacroAssembler* masm) {
__ Dsubu(t1, a0, Operand(1)); // In delay slot.
__ mov(t0, a0); // Store argc in t0.
__ Dlsa(at, sp, t1, kPointerSizeLog2);
- __ ld(a0, MemOperand(at));
+ __ Ld(a0, MemOperand(at));
}
// 2a. Convert first argument to number.
@@ -176,7 +176,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
// -----------------------------------
// 1. Make sure we operate in the context of the called function.
- __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ __ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// 2. Load the first argument into a0 and get rid of the rest (including the
// receiver).
@@ -186,7 +186,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Dsubu(a0, a0, Operand(1)); // In delay slot.
__ Dlsa(at, sp, a0, kPointerSizeLog2);
- __ ld(a0, MemOperand(at));
+ __ Ld(a0, MemOperand(at));
__ jmp(&done);
__ bind(&no_arguments);
__ Move(a0, Smi::kZero);
@@ -234,7 +234,7 @@ void Builtins::Generate_NumberConstructor_ConstructStub(MacroAssembler* masm) {
__ LeaveBuiltinFrame(cp, a1, t0);
__ SmiUntag(t0);
}
- __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset));
+ __ Sd(a0, FieldMemOperand(v0, JSValue::kValueOffset));
__ bind(&drop_frame_and_ret);
{
@@ -262,7 +262,7 @@ void Builtins::Generate_StringConstructor(MacroAssembler* masm) {
__ Dsubu(t1, a0, Operand(1)); // In delay slot.
__ mov(t0, a0); // Store argc in t0.
__ Dlsa(at, sp, t1, kPointerSizeLog2);
- __ ld(a0, MemOperand(at));
+ __ Ld(a0, MemOperand(at));
}
// 2a. At least one argument, return a0 if it's a string, otherwise
@@ -326,7 +326,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
// -----------------------------------
// 1. Make sure we operate in the context of the called function.
- __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ __ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// 2. Load the first argument into a0 and get rid of the rest (including the
// receiver).
@@ -336,7 +336,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ Branch(USE_DELAY_SLOT, &no_arguments, eq, a0, Operand(zero_reg));
__ Dsubu(a0, a0, Operand(1));
__ Dlsa(at, sp, a0, kPointerSizeLog2);
- __ ld(a0, MemOperand(at));
+ __ Ld(a0, MemOperand(at));
__ jmp(&done);
__ bind(&no_arguments);
__ LoadRoot(a0, Heap::kempty_stringRootIndex);
@@ -386,7 +386,7 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
__ LeaveBuiltinFrame(cp, a1, t0);
__ SmiUntag(t0);
}
- __ sd(a0, FieldMemOperand(v0, JSValue::kValueOffset));
+ __ Sd(a0, FieldMemOperand(v0, JSValue::kValueOffset));
__ bind(&drop_frame_and_ret);
{
@@ -396,8 +396,8 @@ void Builtins::Generate_StringConstructor_ConstructStub(MacroAssembler* masm) {
}
static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
- __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
__ Daddu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
}
@@ -444,11 +444,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
namespace {
-void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
- Label post_instantiation_deopt_entry;
-
+void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
@@ -458,8 +454,6 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// -- sp[...]: constructor arguments
// -----------------------------------
- Isolate* isolate = masm->isolate();
-
// Enter a construct frame.
{
FrameScope scope(masm, StackFrame::CONSTRUCT);
@@ -467,183 +461,253 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Preserve the incoming parameters on the stack.
__ SmiTag(a0);
__ Push(cp, a0);
-
- if (create_implicit_receiver) {
- __ Push(a1, a3);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
- RelocInfo::CODE_TARGET);
- __ mov(t0, v0);
- __ Pop(a1, a3);
-
- // ----------- S t a t e -------------
- // -- a1: constructor function
- // -- a3: new target
- // -- t0: newly allocated object
- // -----------------------------------
- __ ld(a0, MemOperand(sp));
- }
__ SmiUntag(a0);
- if (create_implicit_receiver) {
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ Push(t0, t0);
- } else {
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- }
-
- // Deoptimizer re-enters stub code here.
- __ bind(&post_instantiation_deopt_entry);
+ // The receiver for the builtin/api call.
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
// Set up pointer to last argument.
- __ Daddu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ Daddu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
- // a0: number of arguments
- // a1: constructor function
- // a2: address of last argument (caller sp)
- // a3: new target
- // t0: number of arguments (smi-tagged)
- // sp[0]: receiver
- // sp[1]: receiver
- // sp[2]: number of arguments (smi-tagged)
Label loop, entry;
- __ mov(t0, a0);
+ __ mov(t3, a0);
+ // ----------- S t a t e -------------
+ // -- a0: number of arguments (untagged)
+ // -- a3: new target
+ // -- t2: pointer to last argument
+ // -- t3: counter
+ // -- sp[0*kPointerSize]: the hole (receiver)
+ // -- sp[1*kPointerSize]: number of arguments (tagged)
+ // -- sp[2*kPointerSize]: context
+ // -----------------------------------
__ jmp(&entry);
__ bind(&loop);
- __ Dlsa(a4, a2, t0, kPointerSizeLog2);
- __ ld(a5, MemOperand(a4));
- __ push(a5);
+ __ Dlsa(t0, t2, t3, kPointerSizeLog2);
+ __ Ld(t1, MemOperand(t0));
+ __ push(t1);
__ bind(&entry);
- __ Daddu(t0, t0, Operand(-1));
- __ Branch(&loop, greater_equal, t0, Operand(zero_reg));
+ __ Daddu(t3, t3, Operand(-1));
+ __ Branch(&loop, greater_equal, t3, Operand(zero_reg));
// Call the function.
- // a0: number of arguments
+ // a0: number of arguments (untagged)
// a1: constructor function
// a3: new target
ParameterCount actual(a0);
__ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
CheckDebugStepCallWrapper());
- // Store offset of return address for deoptimizer.
- if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
- masm->pc_offset());
- }
-
// Restore context from the frame.
- __ ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
-
- if (create_implicit_receiver) {
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: number of arguments (smi-tagged)
- __ JumpIfSmi(v0, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- __ GetObjectType(v0, a1, a3);
- __ Branch(&exit, greater_equal, a3, Operand(FIRST_JS_RECEIVER_TYPE));
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ ld(v0, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // v0: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: number of arguments (smi-tagged)
- __ ld(a1, MemOperand(sp, 1 * kPointerSize));
- } else {
- __ ld(a1, MemOperand(sp));
- }
-
+ __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ // Restore smi-tagged arguments count from the frame.
+ __ Ld(a1, MemOperand(sp));
// Leave construct frame.
}
- // ES6 9.2.2. Step 13+
- // Check that the result is not a Smi, indicating that the constructor result
- // from a derived class is neither undefined nor an Object.
- if (check_derived_construct) {
- Label do_throw, dont_throw;
- __ JumpIfSmi(v0, &do_throw);
- __ GetObjectType(v0, a3, a3);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ Branch(&dont_throw, greater_equal, a3, Operand(FIRST_JS_RECEIVER_TYPE));
- __ bind(&do_throw);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
- }
- __ bind(&dont_throw);
- }
-
+ // Remove caller arguments from the stack and return.
__ SmiScale(a4, a1, kPointerSizeLog2);
__ Daddu(sp, sp, a4);
__ Daddu(sp, sp, kPointerSize);
- if (create_implicit_receiver) {
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
- }
__ Ret();
+}
+
+// The construct stub for ES5 constructor functions and ES6 class constructors.
+void Generate_JSConstructStubGeneric(MacroAssembler* masm,
+ bool restrict_constructor_return) {
+ // ----------- S t a t e -------------
+ // -- a0: number of arguments (untagged)
+ // -- a1: constructor function
+ // -- a3: new target
+ // -- cp: context
+ // -- ra: return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
- // Store offset of trampoline address for deoptimizer. This is the bailout
- // point after the receiver instantiation but before the function invocation.
- // We need to restore some registers in order to continue the above code.
- if (create_implicit_receiver && !is_api_function) {
+ // Enter a construct frame.
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
+
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(a0);
+ __ Push(cp, a0, a1, a3);
+
+ // ----------- S t a t e -------------
+ // -- sp[0*kPointerSize]: new target
+ // -- a1 and sp[1*kPointerSize]: constructor function
+ // -- sp[2*kPointerSize]: number of arguments (tagged)
+ // -- sp[3*kPointerSize]: context
+ // -----------------------------------
+
+ __ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lbu(t2,
+ FieldMemOperand(t2, SharedFunctionInfo::kFunctionKindByteOffset));
+ __ And(t2, t2,
+ Operand(SharedFunctionInfo::kDerivedConstructorBitsWithinByte));
+ __ Branch(&not_create_implicit_receiver, ne, t2, Operand(zero_reg));
+
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
+ t2, t3);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
+ __ Branch(&post_instantiation_deopt_entry);
+
+ // Else: use TheHoleValue as receiver for constructor call
+ __ bind(&not_create_implicit_receiver);
+ __ LoadRoot(v0, Heap::kTheHoleValueRootIndex);
+
+ // ----------- S t a t e -------------
+ // -- v0: receiver
+ // -- Slot 3 / sp[0*kPointerSize]: new target
+ // -- Slot 2 / sp[1*kPointerSize]: constructor function
+ // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[3*kPointerSize]: context
+ // -----------------------------------
+ // Deoptimizer enters here.
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
masm->pc_offset());
+ __ bind(&post_instantiation_deopt_entry);
+
+ // Restore new target.
+ __ Pop(a3);
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ Push(v0, v0);
// ----------- S t a t e -------------
- // -- a0 : newly allocated object
- // -- sp[0] : constructor function
+ // -- r3: new target
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
// -----------------------------------
- __ Pop(a1);
- __ Push(a0, a0);
-
- // Retrieve smi-tagged arguments count from the stack.
- __ ld(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Restore constructor function and argument count.
+ __ Ld(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ Ld(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(a0);
- // Retrieve the new target value from the stack. This was placed into the
- // frame description in place of the receiver by the optimizing compiler.
- __ Daddu(a3, fp, Operand(StandardFrameConstants::kCallerSPOffset));
- __ Dlsa(a3, a3, a0, kPointerSizeLog2);
- __ ld(a3, MemOperand(a3));
+ // Set up pointer to last argument.
+ __ Daddu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ mov(t3, a0);
+ // ----------- S t a t e -------------
+ // -- a0: number of arguments (untagged)
+ // -- a3: new target
+ // -- t2: pointer to last argument
+ // -- t3: counter
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- a1 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ Dlsa(t0, t2, t3, kPointerSizeLog2);
+ __ Ld(t1, MemOperand(t0));
+ __ push(t1);
+ __ bind(&entry);
+ __ Daddu(t3, t3, Operand(-1));
+ __ Branch(&loop, greater_equal, t3, Operand(zero_reg));
- // Continue with constructor function invocation.
- __ jmp(&post_instantiation_deopt_entry);
+ // Call the function.
+ ParameterCount actual(a0);
+ __ InvokeFunction(a1, a3, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
+
+ // ----------- S t a t e -------------
+ // -- v0: constructor result
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: constructor function
+ // -- sp[2*kPointerSize]: number of arguments
+ // -- sp[3*kPointerSize]: context
+ // -----------------------------------
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
+
+ // Restore the context from the frame.
+ __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, do_throw, other_result, leave_frame;
+
+ // If the result is undefined, we jump out to using the implicit receiver.
+ __ JumpIfRoot(v0, Heap::kUndefinedValueRootIndex, &use_receiver);
+
+ // Otherwise we do a smi check and fall through to check if the return value
+ // is a valid receiver.
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(v0, &other_result);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ GetObjectType(v0, t2, t2);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ Branch(&leave_frame, greater_equal, t2, Operand(FIRST_JS_RECEIVER_TYPE));
+
+ __ bind(&other_result);
+ // The result is now neither undefined nor an object.
+ if (restrict_constructor_return) {
+ // Throw if constructor function is a class constructor
+ __ Ld(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ Ld(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lbu(t2,
+ FieldMemOperand(t2, SharedFunctionInfo::kFunctionKindByteOffset));
+ __ And(t2, t2,
+ Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ Branch(&use_receiver, eq, t2, Operand(zero_reg));
+ } else {
+ __ Branch(&use_receiver);
+ }
+
+ __ bind(&do_throw);
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ Ld(v0, MemOperand(sp, 0 * kPointerSize));
+ __ JumpIfRoot(v0, Heap::kTheHoleValueRootIndex, &do_throw);
+
+ __ bind(&leave_frame);
+ // Restore smi-tagged arguments count from the frame.
+ __ Ld(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
}
+ // Remove caller arguments from the stack and return.
+ __ SmiScale(a4, a1, kPointerSizeLog2);
+ __ Daddu(sp, sp, a4);
+ __ Daddu(sp, sp, kPointerSize);
+ __ Ret();
}
} // namespace
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, false);
+void Builtins::Generate_JSConstructStubGenericRestrictedReturn(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubGeneric(masm, true);
+}
+void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubGeneric(masm, false);
}
-
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
+ Generate_JSBuiltinsConstructStubHelper(masm);
}
-
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, false);
-}
-
-void Builtins::Generate_JSBuiltinsConstructStubForDerived(
- MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, true);
+ Generate_JSBuiltinsConstructStubHelper(masm);
}
// static
@@ -665,13 +729,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Branch(&async_await, equal, t8,
Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ sd(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
+ __ Sd(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
__ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3,
kRAHasNotBeenSaved, kDontSaveFPRegs);
__ jmp(&done_store_input);
__ bind(&async_await);
- __ sd(v0, FieldMemOperand(
+ __ Sd(v0, FieldMemOperand(
a1, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset));
__ RecordWriteField(a1, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
v0, a3, kRAHasNotBeenSaved, kDontSaveFPRegs);
@@ -680,11 +744,11 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// `a3` no longer holds SuspendFlags
// Store resume mode into generator object.
- __ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
+ __ Sd(a2, FieldMemOperand(a1, JSGeneratorObject::kResumeModeOffset));
// Load suspended function and context.
- __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
- __ ld(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
+ __ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ Ld(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
// Flood function if we are stepping.
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
@@ -692,19 +756,19 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
ExternalReference debug_hook =
ExternalReference::debug_hook_on_function_call_address(masm->isolate());
__ li(a5, Operand(debug_hook));
- __ lb(a5, MemOperand(a5));
+ __ Lb(a5, MemOperand(a5));
__ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg));
// Flood function if we need to continue stepping in the suspended generator.
ExternalReference debug_suspended_generator =
ExternalReference::debug_suspended_generator_address(masm->isolate());
__ li(a5, Operand(debug_suspended_generator));
- __ ld(a5, MemOperand(a5));
+ __ Ld(a5, MemOperand(a5));
__ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5));
__ bind(&stepping_prepared);
// Push receiver.
- __ ld(a5, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
+ __ Ld(a5, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
__ Push(a5);
// ----------- S t a t e -------------
@@ -720,8 +784,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// context allocation for any variables in generators, the actual argument
// values have already been copied into the context and these dummy values
// will never be used.
- __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a3,
+ __ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ Lw(a3,
FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
{
Label done_loop, loop;
@@ -735,23 +799,23 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Underlying function needs to have bytecode available.
if (FLAG_debug_code) {
- __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
- __ ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+ __ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
__ GetObjectType(a3, a3, a3);
__ Assert(eq, kMissingBytecodeArray, a3, Operand(BYTECODE_ARRAY_TYPE));
}
// Resume (Ignition/TurboFan) generator object.
{
- __ ld(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a0,
+ __ Ld(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ Lw(a0,
FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
__ Move(a3, a1);
__ Move(a1, a4);
- __ ld(a2, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ Jump(a2);
}
@@ -763,7 +827,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Pop(a1, a2);
}
__ Branch(USE_DELAY_SLOT, &stepping_prepared);
- __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
__ bind(&prepare_step_in_suspended_generator);
{
@@ -773,7 +837,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Pop(a1, a2);
}
__ Branch(USE_DELAY_SLOT, &stepping_prepared);
- __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
}
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
@@ -831,7 +895,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
ExternalReference context_address(Isolate::kContextAddress,
masm->isolate());
__ li(cp, Operand(context_address));
- __ ld(cp, MemOperand(cp));
+ __ Ld(cp, MemOperand(cp));
// Push the function and the receiver onto the stack.
__ Push(a1, a2);
@@ -852,9 +916,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ nop(); // Branch delay slot nop.
// a6 points past last arg.
__ bind(&loop);
- __ ld(a4, MemOperand(s0)); // Read next parameter.
+ __ Ld(a4, MemOperand(s0)); // Read next parameter.
__ daddiu(s0, s0, kPointerSize);
- __ ld(a4, MemOperand(a4)); // Dereference handle.
+ __ Ld(a4, MemOperand(a4)); // Dereference handle.
__ push(a4); // Push parameter.
__ bind(&entry);
__ Branch(&loop, ne, s0, Operand(a6));
@@ -893,13 +957,45 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+static void ReplaceClosureEntryWithOptimizedCode(
+ MacroAssembler* masm, Register optimized_code_entry, Register closure,
+ Register scratch1, Register scratch2, Register scratch3) {
+ Register native_context = scratch1;
+
+ // Store code entry in the closure.
+ __ Daddu(optimized_code_entry, optimized_code_entry,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Sd(optimized_code_entry,
+ FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
+
+ // Link the closure into the optimized function list.
+ __ Ld(native_context, NativeContextMemOperand());
+ __ Ld(scratch2,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ Sd(scratch2,
+ FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
+ scratch3, kRAHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ Sd(closure,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ // Save closure before the write barrier.
+ __ mov(scratch2, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, closure,
+ scratch3, kRAHasNotBeenSaved, kDontSaveFPRegs);
+ __ mov(closure, scratch2);
+}
+
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
Register args_count = scratch;
// Get the arguments + receiver count.
- __ ld(args_count,
+ __ Ld(args_count,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ lw(t0, FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+ __ Lw(t0, FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
// Leave the frame (also dropping the register file).
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
@@ -932,15 +1028,28 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(a1);
+ // First check if there is optimized code in the feedback vector which we
+ // could call instead.
+ Label switch_to_optimized_code;
+ Register optimized_code_entry = a4;
+ __ Ld(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
+ __ Ld(a0, FieldMemOperand(a0, Cell::kValueOffset));
+ __ Ld(optimized_code_entry,
+ FieldMemOperand(a0, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
+ __ Ld(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
- __ ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
Label load_debug_bytecode_array, bytecode_array_loaded;
Register debug_info = kInterpreterBytecodeArrayRegister;
DCHECK(!debug_info.is(a0));
- __ ld(debug_info, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
+ __ Ld(debug_info, FieldMemOperand(a0, SharedFunctionInfo::kDebugInfoOffset));
__ JumpIfNotSmi(debug_info, &load_debug_bytecode_array);
- __ ld(kInterpreterBytecodeArrayRegister,
+ __ Ld(kInterpreterBytecodeArrayRegister,
FieldMemOperand(a0, SharedFunctionInfo::kFunctionDataOffset));
__ bind(&bytecode_array_loaded);
@@ -948,18 +1057,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// TODO(rmcilroy) Remove self healing once liveedit only has to deal with
// Ignition bytecode.
Label switch_to_different_code_kind;
- __ ld(a0, FieldMemOperand(a0, SharedFunctionInfo::kCodeOffset));
+ __ Ld(a0, FieldMemOperand(a0, SharedFunctionInfo::kCodeOffset));
__ Branch(&switch_to_different_code_kind, ne, a0,
Operand(masm->CodeObject())); // Self-reference to this code.
// Increment invocation count for the function.
- __ ld(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
- __ ld(a0, FieldMemOperand(a0, Cell::kValueOffset));
- __ ld(a4, FieldMemOperand(
+ __ Ld(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
+ __ Ld(a0, FieldMemOperand(a0, Cell::kValueOffset));
+ __ Ld(a4, FieldMemOperand(
a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Daddu(a4, a4, Operand(Smi::FromInt(1)));
- __ sd(a4, FieldMemOperand(
+ __ Sd(a4, FieldMemOperand(
a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
@@ -989,7 +1098,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Allocate the local and temporary register file on the stack.
{
// Load frame size (word) from the BytecodeArray object.
- __ lw(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ __ Lw(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kFrameSizeOffset));
// Do a stack check to ensure we don't go over the limit.
@@ -1023,9 +1132,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Dispatch to the first bytecode handler for the function.
__ Daddu(a0, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
- __ lbu(a0, MemOperand(a0));
+ __ Lbu(a0, MemOperand(a0));
__ Dlsa(at, kInterpreterDispatchTableRegister, a0, kPointerSizeLog2);
- __ ld(at, MemOperand(at));
+ __ Ld(at, MemOperand(at));
__ Call(at);
masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
@@ -1035,7 +1144,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load debug copy of the bytecode array.
__ bind(&load_debug_bytecode_array);
- __ ld(kInterpreterBytecodeArrayRegister,
+ __ Ld(kInterpreterBytecodeArrayRegister,
FieldMemOperand(debug_info, DebugInfo::kDebugBytecodeArrayIndex));
__ Branch(&bytecode_array_loaded);
@@ -1044,12 +1153,35 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// closure by switching the code entry field over to the new code as well.
__ bind(&switch_to_different_code_kind);
__ LeaveFrame(StackFrame::JAVA_SCRIPT);
- __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kCodeOffset));
+ __ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kCodeOffset));
__ Daddu(a4, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ sd(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Sd(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(a1, a4, a5);
__ Jump(a4);
+
+ // If there is optimized code on the type feedback vector, check if it is good
+ // to run, and if so, self heal the closure and call the optimized code.
+ __ bind(&switch_to_optimized_code);
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ Label gotta_call_runtime;
+
+ // Check if the optimized code is marked for deopt.
+ __ Lw(a5,
+ FieldMemOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset));
+ __ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Branch(&gotta_call_runtime, ne, a5, Operand(zero_reg));
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, a1, t3, a5,
+ t0);
+ __ Jump(optimized_code_entry);
+
+ // Optimized code is marked for deopt, bailout to the CompileLazy runtime
+ // function which will clear the feedback vector's optimized code slot.
+ __ bind(&gotta_call_runtime);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1080,7 +1212,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
Label loop_header, loop_check;
__ Branch(&loop_check);
__ bind(&loop_header);
- __ ld(scratch, MemOperand(index));
+ __ Ld(scratch, MemOperand(index));
__ Daddu(index, index, Operand(-kPointerSize));
__ push(scratch);
__ bind(&loop_check);
@@ -1161,8 +1293,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
- __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
+ __ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
__ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
@@ -1199,10 +1331,10 @@ void Builtins::Generate_InterpreterPushArgsThenConstructArray(
// Push a slot for the receiver.
__ push(zero_reg);
- Generate_StackOverflowCheck(masm, a4, a5, a6, &stack_overflow);
+ Generate_StackOverflowCheck(masm, a0, a5, a6, &stack_overflow);
// This function modifies a3, a5 and a6.
- Generate_InterpreterPushArgs(masm, a4, a3, a5, a6);
+ Generate_InterpreterPushArgs(masm, a0, a3, a5, a6);
// ArrayConstructor stub expects constructor in a3. Set it here.
__ mov(a3, a1);
@@ -1234,7 +1366,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
masm->isolate())));
// Get the bytecode array pointer from the frame.
- __ ld(kInterpreterBytecodeArrayRegister,
+ __ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
if (FLAG_debug_code) {
@@ -1248,16 +1380,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
}
// Get the target bytecode offset from the frame.
- __ lw(
+ __ Lw(
kInterpreterBytecodeOffsetRegister,
UntagSmiMemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
// Dispatch to the target bytecode.
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister);
- __ lbu(a1, MemOperand(a1));
+ __ Lbu(a1, MemOperand(a1));
__ Dlsa(a1, kInterpreterDispatchTableRegister, a1, kPointerSizeLog2);
- __ ld(a1, MemOperand(a1));
+ __ Ld(a1, MemOperand(a1));
__ Jump(a1);
}
@@ -1265,9 +1397,9 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
// Advance the current bytecode offset stored within the given interpreter
// stack frame. This simulates what all bytecode handlers do upon completion
// of the underlying operation.
- __ ld(a1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
- __ ld(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Ld(a1, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Ld(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ Ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(kInterpreterAccumulatorRegister, a1, a2);
@@ -1275,7 +1407,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
__ mov(a2, v0); // Result is the new bytecode offset.
__ Pop(kInterpreterAccumulatorRegister);
}
- __ sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ Sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
}
@@ -1291,123 +1423,57 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// -- a1 : target function (preserved for callee)
// -----------------------------------
// First lookup code, maybe we don't need to compile!
- Label gotta_call_runtime, gotta_call_runtime_no_stack;
+ Label gotta_call_runtime;
Label try_shared;
- Label loop_top, loop_bottom;
- Register argument_count = a0;
Register closure = a1;
- Register new_target = a3;
- Register map = a0;
Register index = a2;
// Do we have a valid feedback vector?
- __ ld(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
- __ ld(index, FieldMemOperand(index, Cell::kValueOffset));
- __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex,
- &gotta_call_runtime_no_stack);
-
- __ push(argument_count);
- __ push(new_target);
- __ push(closure);
-
- __ ld(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ ld(map, FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ ld(index, FieldMemOperand(map, FixedArray::kLengthOffset));
- __ Branch(&try_shared, lt, index, Operand(Smi::FromInt(2)));
-
- // a3 : native context
- // a2 : length / index
- // a0 : optimized code map
- // stack[0] : new target
- // stack[4] : closure
- Register native_context = a3;
- __ ld(native_context, NativeContextMemOperand());
-
- __ bind(&loop_top);
- Register temp = a1;
- Register array_pointer = a5;
-
- // Does the native context match?
- __ SmiScale(at, index, kPointerSizeLog2);
- __ Daddu(array_pointer, map, Operand(at));
- __ ld(temp, FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousContext));
- __ ld(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
- __ Branch(&loop_bottom, ne, temp, Operand(native_context));
-
- // Code available?
+ __ Ld(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
+ __ Ld(index, FieldMemOperand(index, Cell::kValueOffset));
+ __ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
+
+ // Is optimized code available in the feedback vector?
Register entry = a4;
- __ ld(entry,
- FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousCachedCode));
- __ ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
+ __ Ld(entry, FieldMemOperand(
+ index, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
+ __ Ld(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
- // Found code. Get it into the closure and return.
- __ pop(closure);
- // Store code entry in the closure.
- __ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
- __ RecordWriteCodeEntryField(closure, entry, a5);
+ // Found code, check if it is marked for deopt, if so call into runtime to
+ // clear the optimized code slot.
+ __ Lw(a5, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
+ __ And(a5, a5, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Branch(&gotta_call_runtime, ne, a5, Operand(zero_reg));
- // Link the closure into the optimized function list.
- // a4 : code entry
- // a3 : native context
- // a1 : closure
- __ ld(a5,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- __ sd(a5, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset));
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, a5, a0,
- kRAHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- const int function_list_offset =
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
- __ sd(closure,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- // Save closure before the write barrier.
- __ mov(a5, closure);
- __ RecordWriteContextSlot(native_context, function_list_offset, closure, a0,
- kRAHasNotBeenSaved, kDontSaveFPRegs);
- __ mov(closure, a5);
- __ pop(new_target);
- __ pop(argument_count);
+ // Code is good, get it into the closure and tail call.
+ ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, t3, a5, t0);
__ Jump(entry);
- __ bind(&loop_bottom);
- __ Dsubu(index, index,
- Operand(Smi::FromInt(SharedFunctionInfo::kEntryLength)));
- __ Branch(&loop_top, gt, index, Operand(Smi::FromInt(1)));
-
- // We found no code.
+ // We found no optimized code.
__ bind(&try_shared);
- __ pop(closure);
- __ pop(new_target);
- __ pop(argument_count);
- __ ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(entry, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
- __ lbu(a5, FieldMemOperand(entry,
+ __ Lbu(a5, FieldMemOperand(entry,
SharedFunctionInfo::kMarkedForTierUpByteOffset));
__ And(a5, a5,
Operand(1 << SharedFunctionInfo::kMarkedForTierUpBitWithinByte));
- __ Branch(&gotta_call_runtime_no_stack, ne, a5, Operand(zero_reg));
+ __ Branch(&gotta_call_runtime, ne, a5, Operand(zero_reg));
// If SFI points to anything other than CompileLazy, install that.
- __ ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
+ __ Ld(entry, FieldMemOperand(entry, SharedFunctionInfo::kCodeOffset));
__ Move(t1, masm->CodeObject());
- __ Branch(&gotta_call_runtime_no_stack, eq, entry, Operand(t1));
+ __ Branch(&gotta_call_runtime, eq, entry, Operand(t1));
// Install the SFI's code entry.
__ Daddu(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
+ __ Sd(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, a5);
__ Jump(entry);
__ bind(&gotta_call_runtime);
- __ pop(closure);
- __ pop(new_target);
- __ pop(argument_count);
- __ bind(&gotta_call_runtime_no_stack);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
@@ -1443,7 +1509,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ Branch(&over, ne, t2, Operand(j));
}
for (int i = j - 1; i >= 0; --i) {
- __ ld(t2, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
+ __ Ld(t2, MemOperand(fp, StandardFrameConstants::kCallerSPOffset +
i * kPointerSize));
__ push(t2);
}
@@ -1592,7 +1658,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
}
// Get the full codegen state from the stack and untag it -> a6.
- __ lw(a6, UntagSmiMemOperand(sp, 0 * kPointerSize));
+ __ Lw(a6, UntagSmiMemOperand(sp, 0 * kPointerSize));
// Switch on the state.
Label with_tos_register, unknown_state;
__ Branch(
@@ -1604,7 +1670,7 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
__ bind(&with_tos_register);
DCHECK_EQ(kInterpreterAccumulatorRegister.code(), v0.code());
- __ ld(v0, MemOperand(sp, 1 * kPointerSize));
+ __ Ld(v0, MemOperand(sp, 1 * kPointerSize));
__ Branch(
&unknown_state, ne, a6,
Operand(static_cast<int64_t>(Deoptimizer::BailoutState::TOS_REGISTER)));
@@ -1633,10 +1699,10 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
bool has_handler_frame) {
// Lookup the function in the JavaScript frame.
if (has_handler_frame) {
- __ ld(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ld(a0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ld(a0, MemOperand(a0, JavaScriptFrameConstants::kFunctionOffset));
} else {
- __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
{
@@ -1657,11 +1723,11 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ ld(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ Ld(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ lw(a1,
+ __ Lw(a1,
UntagSmiMemOperand(a1, FixedArray::OffsetOfElementAt(
DeoptimizationInputData::kOsrPcOffsetIndex) -
kHeapObjectTag));
@@ -1715,8 +1781,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Movz(this_arg, undefined_value, scratch); // if argc == 0
__ Dsubu(scratch, scratch, Operand(1));
__ Movz(arg_array, undefined_value, scratch); // if argc == 1
- __ ld(receiver, MemOperand(sp));
- __ sd(this_arg, MemOperand(sp));
+ __ Ld(receiver, MemOperand(sp));
+ __ Sd(this_arg, MemOperand(sp));
}
// ----------- S t a t e -------------
@@ -1729,8 +1795,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// 2. Make sure the receiver is actually callable.
Label receiver_not_callable;
__ JumpIfSmi(receiver, &receiver_not_callable);
- __ ld(a4, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
+ __ Ld(a4, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
__ And(a4, a4, Operand(1 << Map::kIsCallable));
__ Branch(&receiver_not_callable, eq, a4, Operand(zero_reg));
@@ -1756,7 +1822,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// 4c. The receiver is not callable, throw an appropriate TypeError.
__ bind(&receiver_not_callable);
{
- __ sd(receiver, MemOperand(sp));
+ __ Sd(receiver, MemOperand(sp));
__ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
}
@@ -1776,7 +1842,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 2. Get the function to call (passed as receiver) from the stack.
// a0: actual number of arguments
__ Dlsa(at, sp, a0, kPointerSizeLog2);
- __ ld(a1, MemOperand(at));
+ __ Ld(a1, MemOperand(at));
// 3. Shift arguments and return address one slot down on the stack
// (overwriting the original receiver). Adjust argument count to make
@@ -1789,8 +1855,8 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ Dlsa(a2, sp, a0, kPointerSizeLog2);
__ bind(&loop);
- __ ld(at, MemOperand(a2, -kPointerSize));
- __ sd(at, MemOperand(a2));
+ __ Ld(at, MemOperand(a2, -kPointerSize));
+ __ Sd(at, MemOperand(a2));
__ Dsubu(a2, a2, Operand(kPointerSize));
__ Branch(&loop, ne, a2, Operand(sp));
// Adjust the actual number of arguments and remove the top element
@@ -1840,7 +1906,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Dsubu(scratch, scratch, Operand(1));
__ Movz(arguments_list, undefined_value, scratch); // if argc == 2
- __ sd(this_argument, MemOperand(sp, 0)); // Overwrite receiver
+ __ Sd(this_argument, MemOperand(sp, 0)); // Overwrite receiver
}
// ----------- S t a t e -------------
@@ -1853,8 +1919,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// 2. Make sure the target is actually callable.
Label target_not_callable;
__ JumpIfSmi(target, &target_not_callable);
- __ ld(a4, FieldMemOperand(target, HeapObject::kMapOffset));
- __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
+ __ Ld(a4, FieldMemOperand(target, HeapObject::kMapOffset));
+ __ Lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
__ And(a4, a4, Operand(1 << Map::kIsCallable));
__ Branch(&target_not_callable, eq, a4, Operand(zero_reg));
@@ -1866,7 +1932,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// 3b. The target is not callable, throw an appropriate TypeError.
__ bind(&target_not_callable);
{
- __ sd(target, MemOperand(sp));
+ __ Sd(target, MemOperand(sp));
__ TailCallRuntime(Runtime::kThrowApplyNonFunction);
}
}
@@ -1907,7 +1973,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Dsubu(scratch, scratch, Operand(1));
__ Movz(new_target, target, scratch); // if argc == 2
- __ sd(undefined_value, MemOperand(sp, 0)); // Overwrite receiver
+ __ Sd(undefined_value, MemOperand(sp, 0)); // Overwrite receiver
}
// ----------- S t a t e -------------
@@ -1920,16 +1986,16 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// 2. Make sure the target is actually a constructor.
Label target_not_constructor;
__ JumpIfSmi(target, &target_not_constructor);
- __ ld(a4, FieldMemOperand(target, HeapObject::kMapOffset));
- __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
+ __ Ld(a4, FieldMemOperand(target, HeapObject::kMapOffset));
+ __ Lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
__ And(a4, a4, Operand(1 << Map::kIsConstructor));
__ Branch(&target_not_constructor, eq, a4, Operand(zero_reg));
// 3. Make sure the target is actually a constructor.
Label new_target_not_constructor;
__ JumpIfSmi(new_target, &new_target_not_constructor);
- __ ld(a4, FieldMemOperand(new_target, HeapObject::kMapOffset));
- __ lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
+ __ Ld(a4, FieldMemOperand(new_target, HeapObject::kMapOffset));
+ __ Lbu(a4, FieldMemOperand(a4, Map::kBitFieldOffset));
__ And(a4, a4, Operand(1 << Map::kIsConstructor));
__ Branch(&new_target_not_constructor, eq, a4, Operand(zero_reg));
@@ -1939,14 +2005,14 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
// 4b. The target is not a constructor, throw an appropriate TypeError.
__ bind(&target_not_constructor);
{
- __ sd(target, MemOperand(sp));
+ __ Sd(target, MemOperand(sp));
__ TailCallRuntime(Runtime::kThrowNotConstructor);
}
// 4c. The new.target is not a constructor, throw an appropriate TypeError.
__ bind(&new_target_not_constructor);
{
- __ sd(new_target, MemOperand(sp));
+ __ Sd(new_target, MemOperand(sp));
__ TailCallRuntime(Runtime::kThrowNotConstructor);
}
}
@@ -1966,7 +2032,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// -----------------------------------
// Get the number of arguments passed (as a smi), tear down the frame and
// then tear down the parameters.
- __ ld(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+ __ Ld(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
kPointerSize)));
__ mov(sp, fp);
__ MultiPop(fp.bit() | ra.bit());
@@ -2000,23 +2066,23 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// Load the map of argumentsList into a2.
Register arguments_list_map = a2;
- __ ld(arguments_list_map,
+ __ Ld(arguments_list_map,
FieldMemOperand(arguments_list, HeapObject::kMapOffset));
// Load native context into a4.
Register native_context = a4;
- __ ld(native_context, NativeContextMemOperand());
+ __ Ld(native_context, NativeContextMemOperand());
// Check if argumentsList is an (unmodified) arguments object.
- __ ld(at, ContextMemOperand(native_context,
+ __ Ld(at, ContextMemOperand(native_context,
Context::SLOPPY_ARGUMENTS_MAP_INDEX));
__ Branch(&create_arguments, eq, arguments_list_map, Operand(at));
- __ ld(at, ContextMemOperand(native_context,
+ __ Ld(at, ContextMemOperand(native_context,
Context::STRICT_ARGUMENTS_MAP_INDEX));
__ Branch(&create_arguments, eq, arguments_list_map, Operand(at));
// Check if argumentsList is a fast JSArray.
- __ lbu(v0, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+ __ Lbu(v0, FieldMemOperand(a2, Map::kInstanceTypeOffset));
__ Branch(&create_array, eq, v0, Operand(JS_ARRAY_TYPE));
// Ask the runtime to create the list (actually a FixedArray).
@@ -2027,16 +2093,16 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ CallRuntime(Runtime::kCreateListFromArrayLike);
__ mov(arguments_list, v0);
__ Pop(target, new_target);
- __ lw(len, UntagSmiFieldMemOperand(v0, FixedArray::kLengthOffset));
+ __ Lw(len, UntagSmiFieldMemOperand(v0, FixedArray::kLengthOffset));
}
__ Branch(&done_create);
// Try to create the list from an arguments object.
__ bind(&create_arguments);
- __ lw(len, UntagSmiFieldMemOperand(arguments_list,
+ __ Lw(len, UntagSmiFieldMemOperand(arguments_list,
JSArgumentsObject::kLengthOffset));
- __ ld(a4, FieldMemOperand(arguments_list, JSObject::kElementsOffset));
- __ lw(at, UntagSmiFieldMemOperand(a4, FixedArray::kLengthOffset));
+ __ Ld(a4, FieldMemOperand(arguments_list, JSObject::kElementsOffset));
+ __ Lw(at, UntagSmiFieldMemOperand(a4, FixedArray::kLengthOffset));
__ Branch(&create_runtime, ne, len, Operand(at));
__ mov(args, a4);
@@ -2045,21 +2111,21 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
// For holey JSArrays we need to check that the array prototype chain
// protector is intact and our prototype is the Array.prototype actually.
__ bind(&create_holey_array);
- __ ld(a2, FieldMemOperand(a2, Map::kPrototypeOffset));
- __ ld(at, ContextMemOperand(native_context,
+ __ Ld(a2, FieldMemOperand(a2, Map::kPrototypeOffset));
+ __ Ld(at, ContextMemOperand(native_context,
Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
__ Branch(&create_runtime, ne, a2, Operand(at));
__ LoadRoot(at, Heap::kArrayProtectorRootIndex);
- __ lw(a2, FieldMemOperand(at, PropertyCell::kValueOffset));
+ __ Lw(a2, FieldMemOperand(at, PropertyCell::kValueOffset));
__ Branch(&create_runtime, ne, a2,
Operand(Smi::FromInt(Isolate::kProtectorValid)));
- __ lw(a2, UntagSmiFieldMemOperand(a0, JSArray::kLengthOffset));
- __ ld(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
+ __ Lw(a2, UntagSmiFieldMemOperand(a0, JSArray::kLengthOffset));
+ __ Ld(a0, FieldMemOperand(a0, JSArray::kElementsOffset));
__ Branch(&done_create);
// Try to create the list from a JSArray object.
__ bind(&create_array);
- __ lbu(t1, FieldMemOperand(a2, Map::kBitField2Offset));
+ __ Lbu(t1, FieldMemOperand(a2, Map::kBitField2Offset));
__ DecodeField<Map::ElementsKindBits>(t1);
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_ELEMENTS == 2);
@@ -2067,8 +2133,8 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_SMI_ELEMENTS));
__ Branch(&create_holey_array, eq, t1, Operand(FAST_HOLEY_ELEMENTS));
__ Branch(&create_runtime, hi, t1, Operand(FAST_ELEMENTS));
- __ lw(a2, UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
- __ ld(a0, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
+ __ Lw(a2, UntagSmiFieldMemOperand(arguments_list, JSArray::kLengthOffset));
+ __ Ld(a0, FieldMemOperand(arguments_list, JSArray::kElementsOffset));
__ bind(&done_create);
}
@@ -2110,7 +2176,7 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
__ Dsubu(scratch, sp, Operand(scratch));
__ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
__ bind(&loop);
- __ ld(a5, MemOperand(src));
+ __ Ld(a5, MemOperand(src));
__ Branch(&push, ne, a5, Operand(t1));
__ LoadRoot(a5, Heap::kUndefinedValueRootIndex);
__ bind(&push);
@@ -2143,68 +2209,64 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- a1 : the target to call (can be any Object)
- // -- a2 : start index (to support rest parameters)
- // -- ra : return address.
- // -- sp[0] : thisArgument
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a3 : the new.target (for [[Construct]] calls)
+ // -- a1 : the target to call (can be any Object)
+ // -- a2 : start index (to support rest parameters)
// -----------------------------------
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
- __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(a0, MemOperand(a3, CommonFrameConstants::kContextOrFrameTypeOffset));
- __ Branch(&arguments_adaptor, eq, a0,
+ __ Ld(a6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ld(a7, MemOperand(a6, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ Branch(&arguments_adaptor, eq, a7,
Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
{
- __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ld(a0, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a0,
- FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mov(a3, fp);
+ __ Ld(a7, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ld(a7, FieldMemOperand(a7, JSFunction::kSharedFunctionInfoOffset));
+ __ Lw(a7,
+ FieldMemOperand(a7, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(a6, fp);
}
__ Branch(&arguments_done);
__ bind(&arguments_adaptor);
{
// Just get the length from the ArgumentsAdaptorFrame.
- __ lw(a0, UntagSmiMemOperand(
- a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ Lw(a7, UntagSmiMemOperand(
+ a6, ArgumentsAdaptorFrameConstants::kLengthOffset));
}
__ bind(&arguments_done);
- Label stack_empty, stack_done, stack_overflow;
- __ Subu(a0, a0, a2);
- __ Branch(&stack_empty, le, a0, Operand(zero_reg));
+ Label stack_done, stack_overflow;
+ __ Subu(a7, a7, a2);
+ __ Branch(&stack_done, le, a7, Operand(zero_reg));
{
// Check for stack overflow.
- Generate_StackOverflowCheck(masm, a0, a4, a5, &stack_overflow);
+ Generate_StackOverflowCheck(masm, a7, a4, a5, &stack_overflow);
// Forward the arguments from the caller frame.
{
Label loop;
- __ mov(a2, a0);
+ __ Daddu(a0, a0, a7);
__ bind(&loop);
{
- __ Dlsa(at, a3, a2, kPointerSizeLog2);
- __ ld(at, MemOperand(at, 1 * kPointerSize));
+ __ Dlsa(at, a6, a7, kPointerSizeLog2);
+ __ Ld(at, MemOperand(at, 1 * kPointerSize));
__ push(at);
- __ Subu(a2, a2, Operand(1));
- __ Branch(&loop, ne, a2, Operand(zero_reg));
+ __ Subu(a7, a7, Operand(1));
+ __ Branch(&loop, ne, a7, Operand(zero_reg));
}
}
}
__ Branch(&stack_done);
__ bind(&stack_overflow);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&stack_empty);
- {
- // We just pass the receiver, which is already on the stack.
- __ mov(a0, zero_reg);
- }
__ bind(&stack_done);
+ // Tail-call to the {code} handler.
__ Jump(code, RelocInfo::CODE_TARGET);
}
@@ -2249,42 +2311,42 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
ExternalReference::is_tail_call_elimination_enabled_address(
masm->isolate());
__ li(at, Operand(is_tail_call_elimination_enabled));
- __ lb(scratch1, MemOperand(at));
+ __ Lb(scratch1, MemOperand(at));
__ Branch(&done, eq, scratch1, Operand(zero_reg));
// Drop possible interpreter handler/stub frame.
{
Label no_interpreter_frame;
- __ ld(scratch3,
+ __ Ld(scratch3,
MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Branch(&no_interpreter_frame, ne, scratch3,
Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
- __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ bind(&no_interpreter_frame);
}
// Check if next frame is an arguments adaptor frame.
Register caller_args_count_reg = scratch1;
Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(scratch3,
+ __ Ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ld(scratch3,
MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Branch(&no_arguments_adaptor, ne, scratch3,
Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
// Drop current frame and load arguments count from arguments adaptor frame.
__ mov(fp, scratch2);
- __ lw(caller_args_count_reg,
+ __ Lw(caller_args_count_reg,
UntagSmiMemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ Branch(&formal_parameter_count_loaded);
__ bind(&no_arguments_adaptor);
// Load caller's formal parameter count
- __ ld(scratch1,
+ __ Ld(scratch1,
MemOperand(fp, ArgumentsAdaptorFrameConstants::kFunctionOffset));
- __ ld(scratch1,
+ __ Ld(scratch1,
FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(caller_args_count_reg,
+ __ Lw(caller_args_count_reg,
FieldMemOperand(scratch1,
SharedFunctionInfo::kFormalParameterCountOffset));
@@ -2310,8 +2372,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
// Check that function is not a "classConstructor".
Label class_constructor;
- __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFunctionKindByteOffset));
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFunctionKindByteOffset));
__ And(at, a3, Operand(SharedFunctionInfo::kClassConstructorBitsWithinByte));
__ Branch(&class_constructor, ne, at, Operand(zero_reg));
@@ -2320,10 +2382,10 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// context in case of conversion.
STATIC_ASSERT(SharedFunctionInfo::kNativeByteOffset ==
SharedFunctionInfo::kStrictModeByteOffset);
- __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ __ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// We need to convert the receiver for non-native sloppy mode functions.
Label done_convert;
- __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
+ __ Lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
__ And(at, a3, Operand((1 << SharedFunctionInfo::kNativeBitWithinByte) |
(1 << SharedFunctionInfo::kStrictModeBitWithinByte)));
__ Branch(&done_convert, ne, at, Operand(zero_reg));
@@ -2341,7 +2403,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
} else {
Label convert_to_object, convert_receiver;
__ Dlsa(at, sp, a0, kPointerSizeLog2);
- __ ld(a3, MemOperand(at));
+ __ Ld(a3, MemOperand(at));
__ JumpIfSmi(a3, &convert_to_object);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
__ GetObjectType(a3, a4, a4);
@@ -2375,11 +2437,11 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Pop(a0, a1);
__ SmiUntag(a0);
}
- __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ bind(&convert_receiver);
}
__ Dlsa(at, sp, a0, kPointerSizeLog2);
- __ sd(a3, MemOperand(at));
+ __ Sd(a3, MemOperand(at));
}
__ bind(&done_convert);
@@ -2394,7 +2456,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
PrepareForTailCall(masm, a0, t0, t1, t2);
}
- __ lw(a2,
+ __ Lw(a2,
FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(a0);
ParameterCount expected(a2);
@@ -2425,14 +2487,14 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
// Patch the receiver to [[BoundThis]].
{
- __ ld(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
+ __ Ld(at, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
__ Dlsa(a4, sp, a0, kPointerSizeLog2);
- __ sd(at, MemOperand(a4));
+ __ Sd(at, MemOperand(a4));
}
// Load [[BoundArguments]] into a2 and length of that into a4.
- __ ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
- __ lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ Ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+ __ Lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
@@ -2467,9 +2529,9 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
__ bind(&loop);
__ Branch(&done_loop, gt, a5, Operand(a0));
__ Dlsa(a6, sp, a4, kPointerSizeLog2);
- __ ld(at, MemOperand(a6));
+ __ Ld(at, MemOperand(a6));
__ Dlsa(a6, sp, a5, kPointerSizeLog2);
- __ sd(at, MemOperand(a6));
+ __ Sd(at, MemOperand(a6));
__ Daddu(a4, a4, Operand(1));
__ Daddu(a5, a5, Operand(1));
__ Branch(&loop);
@@ -2479,25 +2541,25 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm,
// Copy [[BoundArguments]] to the stack (below the arguments).
{
Label loop, done_loop;
- __ lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ Lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
__ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
__ Dsubu(a4, a4, Operand(1));
__ Branch(&done_loop, lt, a4, Operand(zero_reg));
__ Dlsa(a5, a2, a4, kPointerSizeLog2);
- __ ld(at, MemOperand(a5));
+ __ Ld(at, MemOperand(a5));
__ Dlsa(a5, sp, a0, kPointerSizeLog2);
- __ sd(at, MemOperand(a5));
+ __ Sd(at, MemOperand(a5));
__ Daddu(a0, a0, Operand(1));
__ Branch(&loop);
__ bind(&done_loop);
}
// Call the [[BoundTargetFunction]] via the Call builtin.
- __ ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
__ li(at, Operand(ExternalReference(Builtins::kCall_ReceiverIsAny,
masm->isolate())));
- __ ld(at, MemOperand(at));
+ __ Ld(at, MemOperand(at));
__ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
}
@@ -2520,7 +2582,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
// Check if target has a [[Call]] internal method.
- __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ Lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
__ And(t1, t1, Operand(1 << Map::kIsCallable));
__ Branch(&non_callable, eq, t1, Operand(zero_reg));
@@ -2545,7 +2607,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
__ bind(&non_function);
// Overwrite the original receiver with the (original) target.
__ Dlsa(at, sp, a0, kPointerSizeLog2);
- __ sd(a1, MemOperand(at));
+ __ Sd(a1, MemOperand(at));
// Let the "call_as_function_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_FUNCTION_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(
@@ -2577,34 +2639,34 @@ static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
Register native_context = a5;
Label runtime_call, push_args;
- __ ld(spread, MemOperand(sp, 0));
+ __ Ld(spread, MemOperand(sp, 0));
__ JumpIfSmi(spread, &runtime_call);
- __ ld(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
- __ ld(native_context, NativeContextMemOperand());
+ __ Ld(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
+ __ Ld(native_context, NativeContextMemOperand());
// Check that the spread is an array.
- __ lbu(scratch, FieldMemOperand(spread_map, Map::kInstanceTypeOffset));
+ __ Lbu(scratch, FieldMemOperand(spread_map, Map::kInstanceTypeOffset));
__ Branch(&runtime_call, ne, scratch, Operand(JS_ARRAY_TYPE));
// Check that we have the original ArrayPrototype.
- __ ld(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
- __ ld(scratch2, ContextMemOperand(native_context,
+ __ Ld(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
+ __ Ld(scratch2, ContextMemOperand(native_context,
Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
__ Branch(&runtime_call, ne, scratch, Operand(scratch2));
// Check that the ArrayPrototype hasn't been modified in a way that would
// affect iteration.
__ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
- __ ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ Ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ Branch(&runtime_call, ne, scratch,
Operand(Smi::FromInt(Isolate::kProtectorValid)));
// Check that the map of the initial array iterator hasn't changed.
- __ ld(scratch,
+ __ Ld(scratch,
ContextMemOperand(native_context,
Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
- __ ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
- __ ld(scratch2,
+ __ Ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ __ Ld(scratch2,
ContextMemOperand(native_context,
Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
__ Branch(&runtime_call, ne, scratch, Operand(scratch2));
@@ -2612,7 +2674,7 @@ static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
// For FastPacked kinds, iteration will have the same effect as simply
// accessing each property in order.
Label no_protector_check;
- __ lbu(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
+ __ Lbu(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
__ DecodeField<Map::ElementsKindBits>(scratch);
__ Branch(&runtime_call, hi, scratch, Operand(FAST_HOLEY_ELEMENTS));
// For non-FastHoley kinds, we can skip the protector check.
@@ -2620,14 +2682,14 @@ static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
__ Branch(&no_protector_check, eq, scratch, Operand(FAST_ELEMENTS));
// Check the ArrayProtector cell.
__ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
- __ ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ Ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ Branch(&runtime_call, ne, scratch,
Operand(Smi::FromInt(Isolate::kProtectorValid)));
__ bind(&no_protector_check);
// Load the FixedArray backing store, but use the length from the array.
- __ lw(spread_len, UntagSmiFieldMemOperand(spread, JSArray::kLengthOffset));
- __ ld(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
+ __ Lw(spread_len, UntagSmiFieldMemOperand(spread, JSArray::kLengthOffset));
+ __ Ld(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
__ Branch(&push_args);
__ bind(&runtime_call);
@@ -2644,7 +2706,7 @@ static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
{
// Calculate the new nargs including the result of the spread.
- __ lw(spread_len,
+ __ Lw(spread_len,
UntagSmiFieldMemOperand(spread, FixedArray::kLengthOffset));
__ bind(&push_args);
@@ -2679,7 +2741,7 @@ static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
__ bind(&loop);
__ Branch(&done, eq, scratch, Operand(spread_len));
__ Dlsa(scratch2, spread, scratch, kPointerSizeLog2);
- __ ld(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
+ __ Ld(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
__ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
__ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
__ bind(&push);
@@ -2719,8 +2781,8 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
- __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
+ __ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
__ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
}
@@ -2735,8 +2797,8 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ AssertBoundFunction(a1);
// Load [[BoundArguments]] into a2 and length of that into a4.
- __ ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
- __ lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ Ld(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+ __ Lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
// ----------- S t a t e -------------
// -- a0 : the number of arguments (not including the receiver)
@@ -2772,9 +2834,9 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
__ bind(&loop);
__ Branch(&done_loop, ge, a5, Operand(a0));
__ Dlsa(a6, sp, a4, kPointerSizeLog2);
- __ ld(at, MemOperand(a6));
+ __ Ld(at, MemOperand(a6));
__ Dlsa(a6, sp, a5, kPointerSizeLog2);
- __ sd(at, MemOperand(a6));
+ __ Sd(at, MemOperand(a6));
__ Daddu(a4, a4, Operand(1));
__ Daddu(a5, a5, Operand(1));
__ Branch(&loop);
@@ -2784,15 +2846,15 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// Copy [[BoundArguments]] to the stack (below the arguments).
{
Label loop, done_loop;
- __ lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ Lw(a4, UntagSmiFieldMemOperand(a2, FixedArray::kLengthOffset));
__ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ bind(&loop);
__ Dsubu(a4, a4, Operand(1));
__ Branch(&done_loop, lt, a4, Operand(zero_reg));
__ Dlsa(a5, a2, a4, kPointerSizeLog2);
- __ ld(at, MemOperand(a5));
+ __ Ld(at, MemOperand(a5));
__ Dlsa(a5, sp, a0, kPointerSizeLog2);
- __ sd(at, MemOperand(a5));
+ __ Sd(at, MemOperand(a5));
__ Daddu(a0, a0, Operand(1));
__ Branch(&loop);
__ bind(&done_loop);
@@ -2802,14 +2864,14 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
{
Label skip_load;
__ Branch(&skip_load, ne, a1, Operand(a3));
- __ ld(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Ld(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
__ bind(&skip_load);
}
// Construct the [[BoundTargetFunction]] via the Construct builtin.
- __ ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Ld(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
__ li(at, Operand(ExternalReference(Builtins::kConstruct, masm->isolate())));
- __ ld(at, MemOperand(at));
+ __ Ld(at, MemOperand(at));
__ Daddu(at, at, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
}
@@ -2846,13 +2908,13 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
__ JumpIfSmi(a1, &non_constructor);
// Dispatch based on instance type.
- __ ld(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
- __ lbu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
+ __ Ld(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Lbu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset));
__ Jump(masm->isolate()->builtins()->ConstructFunction(),
RelocInfo::CODE_TARGET, eq, t2, Operand(JS_FUNCTION_TYPE));
// Check if target has a [[Construct]] internal method.
- __ lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ Lbu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
__ And(t3, t3, Operand(1 << Map::kIsConstructor));
__ Branch(&non_constructor, eq, t3, Operand(zero_reg));
@@ -2869,7 +2931,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
{
// Overwrite the original receiver with the (original) target.
__ Dlsa(at, sp, a0, kPointerSizeLog2);
- __ sd(a1, MemOperand(at));
+ __ Sd(a1, MemOperand(at));
// Let the "call_as_constructor_delegate" take care of the rest.
__ LoadNativeContextSlot(Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, a1);
__ Jump(masm->isolate()->builtins()->CallFunction(),
@@ -2976,7 +3038,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label copy;
__ bind(&copy);
- __ ld(a5, MemOperand(a0));
+ __ Ld(a5, MemOperand(a0));
__ push(a5);
__ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(a4));
__ daddiu(a0, a0, -kPointerSize); // In delay slot.
@@ -3009,11 +3071,11 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a7: copy end address
Label copy;
__ bind(&copy);
- __ ld(a4, MemOperand(a0)); // Adjusted above for return addr and receiver.
+ __ Ld(a4, MemOperand(a0)); // Adjusted above for return addr and receiver.
__ Dsubu(sp, sp, kPointerSize);
__ Dsubu(a0, a0, kPointerSize);
__ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(a7));
- __ sd(a4, MemOperand(sp)); // In the delay slot.
+ __ Sd(a4, MemOperand(sp)); // In the delay slot.
// Fill the remaining expected arguments with undefined.
// a1: function
@@ -3030,7 +3092,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ bind(&fill);
__ Dsubu(sp, sp, kPointerSize);
__ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(a4));
- __ sd(a5, MemOperand(sp));
+ __ Sd(a5, MemOperand(sp));
}
// Call the entry point.
@@ -3039,7 +3101,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// a0 : expected number of arguments
// a1 : function (passed through to callee)
// a3: new target (passed through to callee)
- __ ld(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Ld(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ Call(a4);
// Store offset of return address for deoptimizer.
@@ -3053,7 +3115,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Don't adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
- __ ld(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+ __ Ld(a4, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
__ Jump(a4);
__ bind(&stack_overflow);
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index d76b637fd8..dc2221e10b 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -446,9 +446,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
namespace {
-void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
+void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
Label post_instantiation_deopt_entry;
// ----------- S t a t e -------------
// -- r3 : number of arguments
@@ -459,76 +457,179 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// -- sp[...]: constructor arguments
// -----------------------------------
- Isolate* isolate = masm->isolate();
-
// Enter a construct frame.
{
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
// Preserve the incoming parameters on the stack.
- if (!create_implicit_receiver) {
- __ SmiTag(r7, r3, SetRC);
- __ Push(cp, r7);
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- } else {
- __ SmiTag(r3);
- __ Push(cp, r3);
-
- // Allocate the new receiver object.
- __ Push(r4, r6);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
- RelocInfo::CODE_TARGET);
- __ mr(r7, r3);
- __ Pop(r4, r6);
+ __ SmiTag(r3);
+ __ Push(cp, r3);
+ __ SmiUntag(r3, SetRC);
+ // The receiver for the builtin/api call.
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
+ // Set up pointer to last argument.
+ __ addi(r7, fp, Operand(StandardFrameConstants::kCallerSPOffset));
- // ----------- S t a t e -------------
- // -- r4: constructor function
- // -- r6: new target
- // -- r7: newly allocated object
- // -----------------------------------
+ // Copy arguments and receiver to the expression stack.
- // Retrieve smi-tagged arguments count from the stack.
- __ LoadP(r3, MemOperand(sp));
- __ SmiUntag(r3, SetRC);
+ Label loop, no_args;
+ // ----------- S t a t e -------------
+ // -- r3: number of arguments (untagged)
+ // -- r4: constructor function
+ // -- r6: new target
+ // -- r7: pointer to last argument
+ // -- cr0: condition indicating whether r3 is zero
+ // -- sp[0*kPointerSize]: the hole (receiver)
+ // -- sp[1*kPointerSize]: number of arguments (tagged)
+ // -- sp[2*kPointerSize]: context
+ // -----------------------------------
+ __ beq(&no_args, cr0);
+ __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
+ __ sub(sp, sp, ip);
+ __ mtctr(r3);
+ __ bind(&loop);
+ __ subi(ip, ip, Operand(kPointerSize));
+ __ LoadPX(r0, MemOperand(r7, ip));
+ __ StorePX(r0, MemOperand(sp, ip));
+ __ bdnz(&loop);
+ __ bind(&no_args);
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ Push(r7, r7);
+ // Call the function.
+ // r3: number of arguments (untagged)
+ // r4: constructor function
+ // r6: new target
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+ ParameterCount actual(r3);
+ __ InvokeFunction(r4, r6, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
}
- // Deoptimizer re-enters stub code here.
+ // Restore context from the frame.
+ __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ // Restore smi-tagged arguments count from the frame.
+ __ LoadP(r4, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+
+ // Leave construct frame.
+ }
+ // Remove caller arguments from the stack and return.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+
+ __ SmiToPtrArrayOffset(r4, r4);
+ __ add(sp, sp, r4);
+ __ addi(sp, sp, Operand(kPointerSize));
+ __ blr();
+}
+
+// The construct stub for ES5 constructor functions and ES6 class constructors.
+void Generate_JSConstructStubGeneric(MacroAssembler* masm,
+ bool restrict_constructor_return) {
+ // ----------- S t a t e -------------
+ // -- r3: number of arguments (untagged)
+ // -- r4: constructor function
+ // -- r6: new target
+ // -- cp: context
+ // -- lr: return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Enter a construct frame.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
+
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(r3);
+ __ Push(cp, r3, r4, r6);
+
+ // ----------- S t a t e -------------
+ // -- sp[0*kPointerSize]: new target
+ // -- r4 and sp[1*kPointerSize]: constructor function
+ // -- sp[2*kPointerSize]: number of arguments (tagged)
+ // -- sp[3*kPointerSize]: context
+ // -----------------------------------
+
+ __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+ __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestBitMask(r7,
+ FunctionKind::kDerivedConstructor
+ << SharedFunctionInfo::kFunctionKindShift,
+ r0);
+ __ bne(&not_create_implicit_receiver, cr0);
+
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
+ r7, r8);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
+ __ b(&post_instantiation_deopt_entry);
+
+ // Else: use TheHoleValue as receiver for constructor call
+ __ bind(&not_create_implicit_receiver);
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+
+ // ----------- S t a t e -------------
+ // -- r3: receiver
+ // -- Slot 3 / sp[0*kPointerSize]: new target
+ // -- Slot 2 / sp[1*kPointerSize]: constructor function
+ // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[3*kPointerSize]: context
+ // -----------------------------------
+ // Deoptimizer enters here.
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
__ bind(&post_instantiation_deopt_entry);
+ // Restore new target.
+ __ Pop(r6);
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
+ __ Push(r3, r3);
+
+ // ----------- S t a t e -------------
+ // -- r6: new target
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
+
+ // Restore constructor function and argument count.
+ __ LoadP(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ LoadP(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ SmiUntag(r3, SetRC);
+
// Set up pointer to last argument.
- __ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ addi(r7, fp, Operand(StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
- // r3: number of arguments
- // r4: constructor function
- // r5: address of last argument (caller sp)
- // r6: new target
- // cr0: condition indicating whether r3 is zero
- // sp[0]: receiver
- // sp[1]: receiver
- // sp[2]: number of arguments (smi-tagged)
Label loop, no_args;
+ // ----------- S t a t e -------------
+ // -- r3: number of arguments (untagged)
+ // -- r6: new target
+ // -- r7: pointer to last argument
+ // -- cr0: condition indicating whether r3 is zero
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- r4 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
__ beq(&no_args, cr0);
__ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
__ sub(sp, sp, ip);
__ mtctr(r3);
__ bind(&loop);
__ subi(ip, ip, Operand(kPointerSize));
- __ LoadPX(r0, MemOperand(r5, ip));
+ __ LoadPX(r0, MemOperand(r7, ip));
__ StorePX(r0, MemOperand(sp, ip));
__ bdnz(&loop);
__ bind(&no_args);
// Call the function.
- // r3: number of arguments
- // r4: constructor function
- // r6: new target
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
ParameterCount actual(r3);
@@ -536,125 +637,100 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
CheckDebugStepCallWrapper());
}
+ // ----------- S t a t e -------------
+ // -- r0: constructor result
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: constructor function
+ // -- sp[2*kPointerSize]: number of arguments
+ // -- sp[3*kPointerSize]: context
+ // -----------------------------------
+
// Store offset of return address for deoptimizer.
- if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
- masm->pc_offset());
- }
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
- // Restore context from the frame.
- // r3: result
- // sp[0]: receiver
- // sp[1]: number of arguments (smi-tagged)
+ // Restore the context from the frame.
__ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
- if (create_implicit_receiver) {
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // r3: result
- // sp[0]: receiver
- // sp[1]: number of arguments (smi-tagged)
- __ JumpIfSmi(r3, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r3, r4, r6, FIRST_JS_RECEIVER_TYPE);
- __ bge(&exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ LoadP(r3, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // r3: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: number of arguments (smi-tagged)
- __ LoadP(r4, MemOperand(sp, 1 * kPointerSize));
- } else {
- __ LoadP(r4, MemOperand(sp));
- }
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, do_throw, other_result, leave_frame;
- // Leave construct frame.
- }
+ // If the result is undefined, we jump out to using the implicit receiver.
+ __ JumpIfRoot(r3, Heap::kUndefinedValueRootIndex, &use_receiver);
+
+ // Otherwise we do a smi check and fall through to check if the return value
+ // is a valid receiver.
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(r3, &other_result);
- // ES6 9.2.2. Step 13+
- // Check that the result is not a Smi, indicating that the constructor result
- // from a derived class is neither undefined nor an Object.
- if (check_derived_construct) {
- Label do_throw, dont_throw;
- __ JumpIfSmi(r3, &do_throw);
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CompareObjectType(r3, r6, r6, FIRST_JS_RECEIVER_TYPE);
- __ bge(&dont_throw);
+ __ CompareObjectType(r3, r7, r7, FIRST_JS_RECEIVER_TYPE);
+ __ bge(&leave_frame);
+
+ __ bind(&other_result);
+ // The result is now neither undefined nor an object.
+ if (restrict_constructor_return) {
+ // Throw if constructor function is a class constructor
+ __ LoadP(r7, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ LoadP(r7, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
+ __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestBitMask(r7,
+ FunctionKind::kClassConstructor
+ << SharedFunctionInfo::kFunctionKindShift,
+ r0);
+ __ beq(&use_receiver, cr0);
+
+ } else {
+ __ b(&use_receiver);
+ }
+
__ bind(&do_throw);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
}
- __ bind(&dont_throw);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ LoadP(r3, MemOperand(sp));
+ __ JumpIfRoot(r3, Heap::kTheHoleValueRootIndex, &do_throw);
+
+ __ bind(&leave_frame);
+ // Restore smi-tagged arguments count from the frame.
+ __ LoadP(r4, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
}
+ // Remove caller arguments from the stack and return.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+
__ SmiToPtrArrayOffset(r4, r4);
__ add(sp, sp, r4);
__ addi(sp, sp, Operand(kPointerSize));
- if (create_implicit_receiver) {
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r4, r5);
- }
__ blr();
- // Store offset of trampoline address for deoptimizer. This is the bailout
- // point after the receiver instantiation but before the function invocation.
- // We need to restore some registers in order to continue the above code.
- if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
- masm->pc_offset());
-
- // ----------- S t a t e -------------
- // -- r3 : newly allocated object
- // -- sp[0] : constructor function
- // -----------------------------------
-
- __ pop(r4);
- __ Push(r3, r3);
-
- // Retrieve smi-tagged arguments count from the stack.
- __ LoadP(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
- __ SmiUntag(r3);
-
- // Retrieve the new target value from the stack. This was placed into the
- // frame description in place of the receiver by the optimizing compiler.
- __ addi(r6, fp, Operand(StandardFrameConstants::kCallerSPOffset));
- __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
- __ LoadPX(r6, MemOperand(r6, ip));
-
- // Continue with constructor function invocation.
- __ b(&post_instantiation_deopt_entry);
- }
}
} // namespace
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, false);
+void Builtins::Generate_JSConstructStubGenericRestrictedReturn(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubGeneric(masm, true);
+}
+void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubGeneric(masm, false);
}
-
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
+ Generate_JSBuiltinsConstructStubHelper(masm);
}
-
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, false);
-}
-
-void Builtins::Generate_JSBuiltinsConstructStubForDerived(
- MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, true);
+ Generate_JSBuiltinsConstructStubHelper(masm);
}
// static
@@ -918,6 +994,41 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+static void ReplaceClosureEntryWithOptimizedCode(
+ MacroAssembler* masm, Register optimized_code_entry, Register closure,
+ Register scratch1, Register scratch2, Register scratch3) {
+ Register native_context = scratch1;
+ // Store code entry in the closure.
+ __ addi(optimized_code_entry, optimized_code_entry,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ StoreP(optimized_code_entry,
+ FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
+ __ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
+
+ // Link the closure into the optimized function list.
+ // r7 : code entry
+ // r10: native context
+ // r4 : closure
+ __ LoadP(native_context, NativeContextMemOperand());
+ __ LoadP(scratch2, ContextMemOperand(native_context,
+ Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ StoreP(scratch2,
+ FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset), r0);
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
+ scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ StoreP(
+ closure,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
+ // Save closure before the write barrier.
+ __ mr(scratch2, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, closure,
+ scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ mr(closure, scratch2);
+}
+
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
Register args_count = scratch;
@@ -958,6 +1069,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(r4);
+ // First check if there is optimized code in the feedback vector which we
+ // could call instead.
+ Label switch_to_optimized_code;
+
+ Register optimized_code_entry = r7;
+ __ LoadP(r3, FieldMemOperand(r4, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(r3, FieldMemOperand(r3, Cell::kValueOffset));
+ __ LoadP(
+ optimized_code_entry,
+ FieldMemOperand(r3, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
+ __ LoadP(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
__ LoadP(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
@@ -1078,6 +1204,29 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ StoreP(r7, FieldMemOperand(r4, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(r4, r7, r8);
__ JumpToJSEntry(r7);
+
+ // If there is optimized code on the type feedback vector, check if it is good
+ // to run, and if so, self heal the closure and call the optimized code.
+ __ bind(&switch_to_optimized_code);
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ Label gotta_call_runtime;
+
+ // Check if the optimized code is marked for deopt.
+ __ lwz(r8, FieldMemOperand(optimized_code_entry,
+ Code::kKindSpecificFlags1Offset));
+ __ TestBit(r8, Code::kMarkedForDeoptimizationBit, r0);
+ __ bne(&gotta_call_runtime, cr0);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r4, r9, r8,
+ r5);
+ __ JumpToJSEntry(optimized_code_entry);
+
+ // Optimized code is marked for deopt, bailout to the CompileLazy runtime
+ // function which will clear the feedback vector's optimized code slot.
+ __ bind(&gotta_call_runtime);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1321,10 +1470,8 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label try_shared;
- Label loop_top, loop_bottom;
Register closure = r4;
- Register map = r9;
Register index = r5;
// Do we have a valid feedback vector?
@@ -1332,80 +1479,25 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ LoadP(index, FieldMemOperand(index, Cell::kValueOffset));
__ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
- __ LoadP(map,
- FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(map,
- FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ LoadP(index, FieldMemOperand(map, FixedArray::kLengthOffset));
- __ CmpSmiLiteral(index, Smi::FromInt(2), r0);
- __ blt(&try_shared);
-
- // r10 : native context
- // r5 : length / index
- // r9 : optimized code map
- // r6 : new target
- // r4 : closure
- Register native_context = r10;
- __ LoadP(native_context, NativeContextMemOperand());
-
- __ bind(&loop_top);
- Register temp = r11;
- Register array_pointer = r8;
-
- // Does the native context match?
- __ SmiToPtrArrayOffset(array_pointer, index);
- __ add(array_pointer, map, array_pointer);
- __ LoadP(temp, FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousContext));
- __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
- __ cmp(temp, native_context);
- __ bne(&loop_bottom);
-
- // Code available?
+ // Is optimized code available in the feedback vector?
Register entry = r7;
- __ LoadP(entry,
- FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ LoadP(entry, FieldMemOperand(index, FeedbackVector::kOptimizedCodeIndex *
+ kPointerSize +
+ FeedbackVector::kHeaderSize));
__ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
- // Found code. Get it into the closure and return.
- // Store code entry in the closure.
- __ addi(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
- __ RecordWriteCodeEntryField(closure, entry, r8);
+ // Found code, check if it is marked for deopt, if so call into runtime to
+ // clear the optimized code slot.
+ __ lwz(r8, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
+ __ TestBit(r8, Code::kMarkedForDeoptimizationBit, r0);
+ __ bne(&gotta_call_runtime, cr0);
- // Link the closure into the optimized function list.
- // r7 : code entry
- // r10: native context
- // r4 : closure
- __ LoadP(
- r8, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- __ StoreP(r8, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset),
- r0);
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r8, temp,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- const int function_list_offset =
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
- __ StoreP(
- closure,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
- // Save closure before the write barrier.
- __ mr(r8, closure);
- __ RecordWriteContextSlot(native_context, function_list_offset, r8, temp,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
+ // Code is good, get it into the closure and tail call.
+ ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r9, r8, r5);
__ JumpToJSEntry(entry);
- __ bind(&loop_bottom);
- __ SubSmiLiteral(index, index, Smi::FromInt(SharedFunctionInfo::kEntryLength),
- r0);
- __ CmpSmiLiteral(index, Smi::FromInt(1), r0);
- __ bgt(&loop_top);
-
- // We found no code.
- __ b(&gotta_call_runtime);
-
+ // We found no optimized code.
__ bind(&try_shared);
__ LoadP(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
@@ -2155,63 +2247,63 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- r4 : the target to call (can be any Object)
- // -- r5 : start index (to support rest parameters)
- // -- lr : return address.
- // -- sp[0] : thisArgument
+ // -- r3 : the number of arguments (not including the receiver)
+ // -- r6 : the new.target (for [[Construct]] calls)
+ // -- r4 : the target to call (can be any Object)
+ // -- r5 : start index (to support rest parameters)
// -----------------------------------
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
- __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(ip, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(ip, MemOperand(r7, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmpi(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ beq(&arguments_adaptor);
{
- __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadP(r3, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r8, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadP(r8, FieldMemOperand(r8, JSFunction::kSharedFunctionInfoOffset));
__ LoadWordArith(
- r3,
- FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ mr(r6, fp);
+ r8,
+ FieldMemOperand(r8, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mr(r7, fp);
}
__ b(&arguments_done);
__ bind(&arguments_adaptor);
{
// Load the length from the ArgumentsAdaptorFrame.
- __ LoadP(r3, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ LoadP(r8, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset));
#if V8_TARGET_ARCH_PPC64
- __ SmiUntag(r3);
+ __ SmiUntag(r8);
#endif
}
__ bind(&arguments_done);
- Label stack_empty, stack_done, stack_overflow;
+ Label stack_done, stack_overflow;
#if !V8_TARGET_ARCH_PPC64
- __ SmiUntag(r3);
+ __ SmiUntag(r8);
#endif
- __ sub(r3, r3, r5);
- __ cmpi(r3, Operand::Zero());
- __ ble(&stack_empty);
+ __ sub(r8, r8, r5);
+ __ cmpi(r8, Operand::Zero());
+ __ ble(&stack_done);
{
// Check for stack overflow.
- Generate_StackOverflowCheck(masm, r3, r5, &stack_overflow);
+ Generate_StackOverflowCheck(masm, r8, r5, &stack_overflow);
// Forward the arguments from the caller frame.
{
Label loop;
- __ addi(r6, r6, Operand(kPointerSize));
- __ mr(r5, r3);
+ __ addi(r7, r7, Operand(kPointerSize));
+ __ add(r3, r3, r8);
__ bind(&loop);
{
- __ ShiftLeftImm(ip, r5, Operand(kPointerSizeLog2));
- __ LoadPX(ip, MemOperand(r6, ip));
+ __ ShiftLeftImm(ip, r8, Operand(kPointerSizeLog2));
+ __ LoadPX(ip, MemOperand(r7, ip));
__ push(ip);
- __ subi(r5, r5, Operand(1));
- __ cmpi(r5, Operand::Zero());
+ __ subi(r8, r8, Operand(1));
+ __ cmpi(r8, Operand::Zero());
__ bne(&loop);
}
}
@@ -2219,13 +2311,9 @@ void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
__ b(&stack_done);
__ bind(&stack_overflow);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&stack_empty);
- {
- // We just pass the receiver, which is already on the stack.
- __ mov(r3, Operand::Zero());
- }
__ bind(&stack_done);
+ // Tail-call to the {code} handler.
__ Jump(code, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 59d53f9439..2148f11105 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -442,9 +442,7 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
namespace {
-void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
+void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
Label post_instantiation_deopt_entry;
// ----------- S t a t e -------------
// -- r2 : number of arguments
@@ -455,52 +453,18 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// -- sp[...]: constructor arguments
// -----------------------------------
- Isolate* isolate = masm->isolate();
-
// Enter a construct frame.
{
FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
// Preserve the incoming parameters on the stack.
-
- if (!create_implicit_receiver) {
- __ SmiTag(r6, r2);
- __ LoadAndTestP(r6, r6);
- __ Push(cp, r6);
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- } else {
- __ SmiTag(r2);
- __ Push(cp, r2);
-
- // Allocate the new receiver object.
- __ Push(r3, r5);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
- RelocInfo::CODE_TARGET);
- __ LoadRR(r6, r2);
- __ Pop(r3, r5);
-
- // ----------- S t a t e -------------
- // -- r3: constructor function
- // -- r5: new target
- // -- r6: newly allocated object
- // -----------------------------------
-
- // Retrieve smi-tagged arguments count from the stack.
- __ LoadP(r2, MemOperand(sp));
- __ SmiUntag(r2);
- __ LoadAndTestP(r2, r2);
-
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ Push(r6, r6);
- }
-
- // Deoptimizer re-enters stub code here.
- __ bind(&post_instantiation_deopt_entry);
-
+ __ SmiTag(r2);
+ __ Push(cp, r2);
+ __ SmiUntag(r2);
+ // The receiver for the builtin/api call.
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
// Set up pointer to last argument.
- __ la(r4, MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
+ __ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
// Copy arguments and receiver to the expression stack.
// r2: number of arguments
@@ -518,7 +482,7 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ LoadRR(r1, r2);
__ bind(&loop);
__ lay(ip, MemOperand(ip, -kPointerSize));
- __ LoadP(r0, MemOperand(ip, r4));
+ __ LoadP(r0, MemOperand(ip, r6));
__ StoreP(r0, MemOperand(ip, sp));
__ BranchOnCount(r1, &loop);
__ bind(&no_args);
@@ -532,127 +496,228 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ InvokeFunction(r3, r5, actual, CALL_FUNCTION,
CheckDebugStepCallWrapper());
- // Store offset of return address for deoptimizer.
- if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
- masm->pc_offset());
- }
-
// Restore context from the frame.
- // r2: result
- // sp[0]: receiver
- // sp[1]: number of arguments (smi-tagged)
__ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
-
- if (create_implicit_receiver) {
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
-
- // If the result is a smi, it is *not* an object in the ECMA sense.
- // r2: result
- // sp[0]: receiver
- // sp[1]: new.target
- // sp[2]: number of arguments (smi-tagged)
- __ JumpIfSmi(r2, &use_receiver);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r2, r3, r5, FIRST_JS_RECEIVER_TYPE);
- __ bge(&exit);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ LoadP(r2, MemOperand(sp));
-
- // Remove receiver from the stack, remove caller arguments, and
- // return.
- __ bind(&exit);
- // r2: result
- // sp[0]: receiver (newly allocated object)
- // sp[1]: number of arguments (smi-tagged)
- __ LoadP(r3, MemOperand(sp, 1 * kPointerSize));
- } else {
- __ LoadP(r3, MemOperand(sp));
- }
+ // Restore smi-tagged arguments count from the frame.
+ __ LoadP(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
-
- // ES6 9.2.2. Step 13+
- // Check that the result is not a Smi, indicating that the constructor result
- // from a derived class is neither undefined nor an Object.
- if (check_derived_construct) {
- Label do_throw, dont_throw;
- __ JumpIfSmi(r2, &do_throw);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CompareObjectType(r2, r5, r5, FIRST_JS_RECEIVER_TYPE);
- __ bge(&dont_throw);
- __ bind(&do_throw);
- {
- FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
- }
- __ bind(&dont_throw);
- }
+ // Remove caller arguments from the stack and return.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
__ SmiToPtrArrayOffset(r3, r3);
__ AddP(sp, sp, r3);
__ AddP(sp, sp, Operand(kPointerSize));
- if (create_implicit_receiver) {
- __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r3, r4);
- }
__ Ret();
+}
- // Store offset of trampoline address for deoptimizer. This is the bailout
- // point after the receiver instantiation but before the function invocation.
- // We need to restore some registers in order to continue the above code.
- if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
- masm->pc_offset());
+// The construct stub for ES5 constructor functions and ES6 class constructors.
+void Generate_JSConstructStubGeneric(MacroAssembler* masm,
+ bool restrict_constructor_return) {
+ // ----------- S t a t e -------------
+ // -- r2: number of arguments (untagged)
+ // -- r3: constructor function
+ // -- r5: new target
+ // -- cp: context
+ // -- lr: return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Enter a construct frame.
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
+
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(r2);
+ __ Push(cp, r2, r3, r5);
+
+ // ----------- S t a t e -------------
+ // -- sp[0*kPointerSize]: new target
+ // -- r3 and sp[1*kPointerSize]: constructor function
+ // -- sp[2*kPointerSize]: number of arguments (tagged)
+ // -- sp[3*kPointerSize]: context
+ // -----------------------------------
+
+ __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadlW(r6,
+ FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestBitMask(r6,
+ FunctionKind::kDerivedConstructor
+ << SharedFunctionInfo::kFunctionKindShift,
+ r0);
+ __ bne(&not_create_implicit_receiver);
+
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1,
+ r6, r7);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
+ __ b(&post_instantiation_deopt_entry);
+
+ // Else: use TheHoleValue as receiver for constructor call
+ __ bind(&not_create_implicit_receiver);
+ __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
// ----------- S t a t e -------------
- // -- r2 : newly allocated object
- // -- sp[0] : constructor function
+ // -- r2: receiver
+ // -- Slot 3 / sp[0*kPointerSize]: new target
+ // -- Slot 2 / sp[1*kPointerSize]: constructor function
+ // -- Slot 1 / sp[2*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[3*kPointerSize]: context
// -----------------------------------
+ // Deoptimizer enters here.
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+ __ bind(&post_instantiation_deopt_entry);
- __ pop(r3);
+ // Restore new target.
+ __ Pop(r5);
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
__ Push(r2, r2);
- // Retrieve smi-tagged arguments count from the stack.
+ // ----------- S t a t e -------------
+ // -- r5: new target
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
+
+ // Restore constructor function and argument count.
+ __ LoadP(r3, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ LoadP(r2, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(r2);
- // Retrieve the new target value from the stack. This was placed into the
- // frame description in place of the receiver by the optimizing compiler.
- __ la(r5, MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
+ // Set up pointer to last argument.
+ __ la(r6, MemOperand(fp, StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ Label loop, no_args;
+ // ----------- S t a t e -------------
+ // -- r2: number of arguments (untagged)
+ // -- r5: new target
+ // -- r6: pointer to last argument
+ // -- cr0: condition indicating whether r2 is zero
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- r3 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
+
+ __ beq(&no_args);
__ ShiftLeftP(ip, r2, Operand(kPointerSizeLog2));
- __ LoadP(r5, MemOperand(r5, ip));
+ __ SubP(sp, sp, ip);
+ __ LoadRR(r1, r2);
+ __ bind(&loop);
+ __ lay(ip, MemOperand(ip, -kPointerSize));
+ __ LoadP(r0, MemOperand(ip, r6));
+ __ StoreP(r0, MemOperand(ip, sp));
+ __ BranchOnCount(r1, &loop);
+ __ bind(&no_args);
- // Continue with constructor function invocation.
- __ b(&post_instantiation_deopt_entry);
+ // Call the function.
+ ParameterCount actual(r2);
+ __ InvokeFunction(r3, r5, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
+
+ // ----------- S t a t e -------------
+ // -- r0: constructor result
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: constructor function
+ // -- sp[2*kPointerSize]: number of arguments
+ // -- sp[3*kPointerSize]: context
+ // -----------------------------------
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
+
+ // Restore the context from the frame.
+ __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, do_throw, other_result, leave_frame;
+
+ // If the result is undefined, we jump out to using the implicit receiver.
+ __ JumpIfRoot(r2, Heap::kUndefinedValueRootIndex, &use_receiver);
+
+ // Otherwise we do a smi check and fall through to check if the return value
+ // is a valid receiver.
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(r2, &other_result);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CompareObjectType(r2, r6, r6, FIRST_JS_RECEIVER_TYPE);
+ __ bge(&leave_frame);
+
+ __ bind(&other_result);
+ // The result is now neither undefined nor an object.
+ if (restrict_constructor_return) {
+ // Throw if constructor function is a class constructor
+ __ LoadP(r6, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ LoadP(r6, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadlW(r6,
+ FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
+ __ TestBitMask(r6,
+ FunctionKind::kClassConstructor
+ << SharedFunctionInfo::kFunctionKindShift,
+ r0);
+ __ beq(&use_receiver);
+ } else {
+ __ b(&use_receiver);
+ }
+ __ bind(&do_throw);
+ {
+ FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ }
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ LoadP(r2, MemOperand(sp));
+ __ JumpIfRoot(r2, Heap::kTheHoleValueRootIndex, &do_throw);
+
+ __ bind(&leave_frame);
+ // Restore smi-tagged arguments count from the frame.
+ __ LoadP(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
}
-}
-} // namespace
+ // Remove caller arguments from the stack and return.
+ STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, false);
+ __ SmiToPtrArrayOffset(r3, r3);
+ __ AddP(sp, sp, r3);
+ __ AddP(sp, sp, Operand(kPointerSize));
+ __ Ret();
}
+} // namespace
+void Builtins::Generate_JSConstructStubGenericRestrictedReturn(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubGeneric(masm, true);
+}
+void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
+ MacroAssembler* masm) {
+ Generate_JSConstructStubGeneric(masm, false);
+}
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
+ Generate_JSBuiltinsConstructStubHelper(masm);
}
-
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, false);
-}
-
-void Builtins::Generate_JSBuiltinsConstructStubForDerived(
- MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, true);
+ Generate_JSBuiltinsConstructStubHelper(masm);
}
// static
@@ -670,10 +735,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Store input value into generator object.
Label async_await, done_store_input;
- __ AndP(r5, r5,
- Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ CmpP(r5, Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
- __ beq(&async_await);
+ __ tmll(r5, Operand(static_cast<int>(SuspendFlags::kAsyncGeneratorAwait)));
+ __ b(Condition(1), &async_await);
__ StoreP(r2, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset),
r0);
@@ -923,6 +986,41 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
Generate_JSEntryTrampolineHelper(masm, true);
}
+static void ReplaceClosureEntryWithOptimizedCode(
+ MacroAssembler* masm, Register optimized_code_entry, Register closure,
+ Register scratch1, Register scratch2, Register scratch3) {
+ Register native_context = scratch1;
+ // Store code entry in the closure.
+ __ AddP(optimized_code_entry, optimized_code_entry,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ StoreP(optimized_code_entry,
+ FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
+ __ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
+
+ // Link the closure into the optimized function list.
+ // r6 : code entry
+ // r9: native context
+ // r3 : closure
+ __ LoadP(native_context, NativeContextMemOperand());
+ __ LoadP(scratch2, ContextMemOperand(native_context,
+ Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ StoreP(scratch2,
+ FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset), r0);
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch2,
+ scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ StoreP(
+ closure,
+ ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
+ // Save closure before the write barrier.
+ __ LoadRR(scratch2, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, closure,
+ scratch3, kLRHasNotBeenSaved, kDontSaveFPRegs);
+ __ LoadRR(closure, scratch2);
+}
+
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
Register args_count = scratch;
@@ -963,6 +1061,21 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(r3);
+ // First check if there is optimized code in the feedback vector which we
+ // could call instead.
+ Label switch_to_optimized_code;
+
+ Register optimized_code_entry = r6;
+ __ LoadP(r2, FieldMemOperand(r3, JSFunction::kFeedbackVectorOffset));
+ __ LoadP(r2, FieldMemOperand(r2, Cell::kValueOffset));
+ __ LoadP(
+ optimized_code_entry,
+ FieldMemOperand(r2, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
+ __ LoadP(optimized_code_entry,
+ FieldMemOperand(optimized_code_entry, WeakCell::kValueOffset));
+ __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
__ LoadP(r2, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
@@ -1082,6 +1195,29 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ StoreP(r6, FieldMemOperand(r3, JSFunction::kCodeEntryOffset), r0);
__ RecordWriteCodeEntryField(r3, r6, r7);
__ JumpToJSEntry(r6);
+
+ // If there is optimized code on the type feedback vector, check if it is good
+ // to run, and if so, self heal the closure and call the optimized code.
+ __ bind(&switch_to_optimized_code);
+ __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+ Label gotta_call_runtime;
+
+ // Check if the optimized code is marked for deopt.
+ __ LoadlW(r7, FieldMemOperand(optimized_code_entry,
+ Code::kKindSpecificFlags1Offset));
+ __ And(r0, r7, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ bne(&gotta_call_runtime);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, r3, r8, r7,
+ r4);
+ __ JumpToJSEntry(optimized_code_entry);
+
+ // Optimized code is marked for deopt, bailout to the CompileLazy runtime
+ // function which will clear the feedback vector's optimized code slot.
+ __ bind(&gotta_call_runtime);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
@@ -1324,10 +1460,8 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label try_shared;
- Label loop_top, loop_bottom;
Register closure = r3;
- Register map = r8;
Register index = r4;
// Do we have a valid feedback vector?
@@ -1335,88 +1469,32 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ LoadP(index, FieldMemOperand(index, Cell::kValueOffset));
__ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
- __ LoadP(map,
- FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(map,
- FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ LoadP(index, FieldMemOperand(map, FixedArray::kLengthOffset));
- __ CmpSmiLiteral(index, Smi::FromInt(2), r0);
- __ blt(&try_shared);
-
- // Find literals.
- // r9 : native context
- // r4 : length / index
- // r8 : optimized code map
- // r5 : new target
- // r3 : closure
- Register native_context = r9;
- __ LoadP(native_context, NativeContextMemOperand());
-
- __ bind(&loop_top);
- Register temp = r1;
- Register array_pointer = r7;
-
- // Does the native context match?
- __ SmiToPtrArrayOffset(array_pointer, index);
- __ AddP(array_pointer, map, array_pointer);
- __ LoadP(temp, FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousContext));
- __ LoadP(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
- __ CmpP(temp, native_context);
- __ bne(&loop_bottom, Label::kNear);
-
- // Code available?
+ // Is optimized code available in the feedback vector?
Register entry = r6;
- __ LoadP(entry,
- FieldMemOperand(array_pointer,
- SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ LoadP(entry, FieldMemOperand(index, FeedbackVector::kOptimizedCodeIndex *
+ kPointerSize +
+ FeedbackVector::kHeaderSize));
__ LoadP(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
- // Found code. Get it into the closure and return.
- // Store code entry in the closure.
- __ AddP(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ StoreP(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset), r0);
- __ RecordWriteCodeEntryField(closure, entry, r7);
+ // Found code, check if it is marked for deopt, if so call into runtime to
+ // clear the optimized code slot.
+ __ LoadlW(r7, FieldMemOperand(entry, Code::kKindSpecificFlags1Offset));
+ __ And(r0, r7, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ bne(&gotta_call_runtime);
- // Link the closure into the optimized function list.
- // r6 : code entry
- // r9: native context
- // r3 : closure
- __ LoadP(
- r7, ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- __ StoreP(r7, FieldMemOperand(closure, JSFunction::kNextFunctionLinkOffset),
- r0);
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, r7, temp,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- const int function_list_offset =
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
- __ StoreP(
- closure,
- ContextMemOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST), r0);
- // Save closure before the write barrier.
- __ LoadRR(r7, closure);
- __ RecordWriteContextSlot(native_context, function_list_offset, r7, temp,
- kLRHasNotBeenSaved, kDontSaveFPRegs);
+ // Code is good, get it into the closure and tail call.
+ ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r8, r7, r4);
__ JumpToJSEntry(entry);
- __ bind(&loop_bottom);
- __ SubSmiLiteral(index, index, Smi::FromInt(SharedFunctionInfo::kEntryLength),
- r0);
- __ CmpSmiLiteral(index, Smi::FromInt(1), r0);
- __ bgt(&loop_top);
-
- // We found no code.
- __ b(&gotta_call_runtime);
-
+ // We found no optimized code.
__ bind(&try_shared);
__ LoadP(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
- __ LoadlB(temp, FieldMemOperand(
- entry, SharedFunctionInfo::kMarkedForTierUpByteOffset));
- __ TestBit(temp, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0);
+ __ LoadlB(r7, FieldMemOperand(
+ entry, SharedFunctionInfo::kMarkedForTierUpByteOffset));
+ __ TestBit(r7, SharedFunctionInfo::kMarkedForTierUpBitWithinByte, r0);
__ bne(&gotta_call_runtime);
// If SFI points to anything other than CompileLazy, install that.
@@ -2166,62 +2244,62 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- r3 : the target to call (can be any Object)
- // -- r4 : start index (to support rest parameters)
- // -- lr : return address.
- // -- sp[0] : thisArgument
+ // -- r2 : the number of arguments (not including the receiver)
+ // -- r5 : the new.target (for [[Construct]] calls)
+ // -- r3 : the target to call (can be any Object)
+ // -- r4 : start index (to support rest parameters)
// -----------------------------------
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
- __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ LoadP(ip, MemOperand(r5, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(ip, MemOperand(r6, CommonFrameConstants::kContextOrFrameTypeOffset));
__ CmpP(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ beq(&arguments_adaptor);
{
- __ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadP(r2, FieldMemOperand(r2, JSFunction::kSharedFunctionInfoOffset));
- __ LoadW(r2, FieldMemOperand(
- r2, SharedFunctionInfo::kFormalParameterCountOffset));
- __ LoadRR(r5, fp);
+ __ LoadP(r7, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadP(r7, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadW(r7, FieldMemOperand(
+ r7, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadRR(r6, fp);
}
__ b(&arguments_done);
__ bind(&arguments_adaptor);
{
// Load the length from the ArgumentsAdaptorFrame.
- __ LoadP(r2, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ LoadP(r7, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
#if V8_TARGET_ARCH_S390X
- __ SmiUntag(r2);
+ __ SmiUntag(r7);
#endif
}
__ bind(&arguments_done);
- Label stack_empty, stack_done, stack_overflow;
+ Label stack_done, stack_overflow;
#if !V8_TARGET_ARCH_S390X
- __ SmiUntag(r2);
+ __ SmiUntag(r7);
#endif
- __ SubP(r2, r2, r4);
- __ CmpP(r2, Operand::Zero());
- __ ble(&stack_empty);
+ __ SubP(r7, r7, r4);
+ __ CmpP(r7, Operand::Zero());
+ __ ble(&stack_done);
{
// Check for stack overflow.
- Generate_StackOverflowCheck(masm, r2, r4, &stack_overflow);
+ Generate_StackOverflowCheck(masm, r7, r4, &stack_overflow);
// Forward the arguments from the caller frame.
{
Label loop;
- __ AddP(r5, r5, Operand(kPointerSize));
- __ LoadRR(r4, r2);
+ __ AddP(r6, r6, Operand(kPointerSize));
+ __ AddP(r2, r2, r7);
__ bind(&loop);
{
- __ ShiftLeftP(ip, r4, Operand(kPointerSizeLog2));
- __ LoadP(ip, MemOperand(r5, ip));
+ __ ShiftLeftP(ip, r7, Operand(kPointerSizeLog2));
+ __ LoadP(ip, MemOperand(r6, ip));
__ push(ip);
- __ SubP(r4, r4, Operand(1));
- __ CmpP(r4, Operand::Zero());
+ __ SubP(r7, r7, Operand(1));
+ __ CmpP(r7, Operand::Zero());
__ bne(&loop);
}
}
@@ -2229,13 +2307,9 @@ void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
__ b(&stack_done);
__ bind(&stack_overflow);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&stack_empty);
- {
- // We just pass the receiver, which is already on the stack.
- __ mov(r2, Operand::Zero());
- }
__ bind(&stack_done);
+ // Tail-call to the {code} handler.
__ Jump(code, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index f4e298f571..d4fb131afc 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -116,16 +116,12 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
namespace {
-void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
- bool create_implicit_receiver,
- bool check_derived_construct) {
- Label post_instantiation_deopt_entry;
-
+void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax: number of arguments
- // -- rsi: context
// -- rdi: constructor function
// -- rdx: new target
+ // -- rsi: context
// -----------------------------------
// Enter a construct frame.
@@ -137,36 +133,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ Push(rsi);
__ Push(rcx);
- if (create_implicit_receiver) {
- // Allocate the new receiver object.
- __ Push(rdi);
- __ Push(rdx);
- __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
- RelocInfo::CODE_TARGET);
- __ movp(rbx, rax);
- __ Pop(rdx);
- __ Pop(rdi);
-
- // ----------- S t a t e -------------
- // -- rdi: constructor function
- // -- rbx: newly allocated object
- // -- rdx: new target
- // -----------------------------------
-
- // Retrieve smi-tagged arguments count from the stack.
- __ SmiToInteger32(rax, Operand(rsp, 0 * kPointerSize));
-
- // Push the allocated receiver to the stack. We need two copies
- // because we may have to return the original one and the calling
- // conventions dictate that the called function pops the receiver.
- __ Push(rbx);
- __ Push(rbx);
- } else {
- __ PushRoot(Heap::kTheHoleValueRootIndex);
- }
-
- // Deoptimizer re-enters stub code here.
- __ bind(&post_instantiation_deopt_entry);
+ // The receiver for the builtin/api call.
+ __ PushRoot(Heap::kTheHoleValueRootIndex);
// Set up pointer to last argument.
__ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
@@ -174,6 +142,16 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Copy arguments and receiver to the expression stack.
Label loop, entry;
__ movp(rcx, rax);
+ // ----------- S t a t e -------------
+ // -- rax: number of arguments (untagged)
+ // -- rdi: constructor function
+ // -- rdx: new target
+ // -- rbx: pointer to last argument
+ // -- rcx: counter
+ // -- sp[0*kPointerSize]: the hole (receiver)
+ // -- sp[1*kPointerSize]: number of arguments (tagged)
+ // -- sp[2*kPointerSize]: context
+ // -----------------------------------
__ jmp(&entry);
__ bind(&loop);
__ Push(Operand(rbx, rcx, times_pointer_size, 0));
@@ -182,124 +160,223 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ j(greater_equal, &loop);
// Call the function.
+ // rax: number of arguments (untagged)
+ // rdi: constructor function
+ // rdx: new target
ParameterCount actual(rax);
__ InvokeFunction(rdi, rdx, actual, CALL_FUNCTION,
CheckDebugStepCallWrapper());
- // Store offset of return address for deoptimizer.
- if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
- masm->pc_offset());
- }
-
// Restore context from the frame.
__ movp(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
-
- if (create_implicit_receiver) {
- // If the result is an object (in the ECMA sense), we should get rid
- // of the receiver and use the result; see ECMA-262 section 13.2.2-7
- // on page 74.
- Label use_receiver, exit;
- // If the result is a smi, it is *not* an object in the ECMA sense.
- __ JumpIfSmi(rax, &use_receiver, Label::kNear);
-
- // If the type of the result (stored in its map) is less than
- // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
- __ j(above_equal, &exit, Label::kNear);
-
- // Throw away the result of the constructor invocation and use the
- // on-stack receiver as the result.
- __ bind(&use_receiver);
- __ movp(rax, Operand(rsp, 0));
-
- // Restore the arguments count and leave the construct frame. The
- // arguments count is stored below the receiver.
- __ bind(&exit);
- __ movp(rbx, Operand(rsp, 1 * kPointerSize));
- } else {
- __ movp(rbx, Operand(rsp, 0));
- }
+ // Restore smi-tagged arguments count from the frame.
+ __ movp(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
- // ES6 9.2.2. Step 13+
- // For derived class constructors, throw a TypeError here if the result
- // is not a JSReceiver.
- if (check_derived_construct) {
- Label do_throw, dont_throw;
- __ JumpIfSmi(rax, &do_throw, Label::kNear);
- STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
- __ j(above_equal, &dont_throw, Label::kNear);
- __ bind(&do_throw);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- __ CallRuntime(Runtime::kThrowDerivedConstructorReturnedNonObject);
- }
- __ bind(&dont_throw);
- }
-
// Remove caller arguments from the stack and return.
__ PopReturnAddressTo(rcx);
SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
__ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
__ PushReturnAddressFrom(rcx);
- if (create_implicit_receiver) {
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(counters->constructed_objects(), 1);
- }
+
__ ret(0);
+}
- // Store offset of trampoline address for deoptimizer. This is the bailout
- // point after the receiver instantiation but before the function invocation.
- // We need to restore some registers in order to continue the above code.
- if (create_implicit_receiver && !is_api_function) {
- masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
- masm->pc_offset());
+// The construct stub for ES5 constructor functions and ES6 class constructors.
+void Generate_JSConstructStubGeneric(MacroAssembler* masm,
+ bool restrict_constructor_return) {
+ // ----------- S t a t e -------------
+ // -- rax: number of arguments (untagged)
+ // -- rdi: constructor function
+ // -- rdx: new target
+ // -- rsi: context
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Enter a construct frame.
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
+
+ // Preserve the incoming parameters on the stack.
+ __ Integer32ToSmi(rcx, rax);
+ __ Push(rsi);
+ __ Push(rcx);
+ __ Push(rdi);
+ __ Push(rdx);
// ----------- S t a t e -------------
- // -- rax : newly allocated object
- // -- rsp[0] : constructor function
+ // -- sp[0*kPointerSize]: new target
+ // -- rdi and sp[1*kPointerSize]: constructor function
+ // -- sp[2*kPointerSize]: argument count
+ // -- sp[3*kPointerSize]: context
// -----------------------------------
- __ Pop(rdi);
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(rbx, SharedFunctionInfo::kFunctionKindByteOffset),
+ Immediate(SharedFunctionInfo::kDerivedConstructorBitsWithinByte));
+ __ j(not_zero, &not_create_implicit_receiver);
+
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1);
+ __ Call(CodeFactory::FastNewObject(masm->isolate()).code(),
+ RelocInfo::CODE_TARGET);
+ __ jmp(&post_instantiation_deopt_entry, Label::kNear);
+
+ // Else: use TheHoleValue as receiver for constructor call
+ __ bind(&not_create_implicit_receiver);
+ __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
+
+ // ----------- S t a t e -------------
+ // -- rax implicit receiver
+ // -- Slot 3 / sp[0*kPointerSize] new target
+ // -- Slot 2 / sp[1*kPointerSize] constructor function
+ // -- Slot 1 / sp[2*kPointerSize] number of arguments (tagged)
+ // -- Slot 0 / sp[3*kPointerSize] context
+ // -----------------------------------
+ // Deoptimizer enters here.
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+ __ bind(&post_instantiation_deopt_entry);
+
+ // Restore new target.
+ __ Pop(rdx);
+
+ // Push the allocated receiver to the stack. We need two copies
+ // because we may have to return the original one and the calling
+ // conventions dictate that the called function pops the receiver.
__ Push(rax);
__ Push(rax);
- // Retrieve smi-tagged arguments count from the stack.
+ // ----------- S t a t e -------------
+ // -- sp[0*kPointerSize] implicit receiver
+ // -- sp[1*kPointerSize] implicit receiver
+ // -- sp[2*kPointerSize] constructor function
+ // -- sp[3*kPointerSize] number of arguments (tagged)
+ // -- sp[4*kPointerSize] context
+ // -----------------------------------
+
+ // Restore constructor function and argument count.
+ __ movp(rdi, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
__ SmiToInteger32(rax,
Operand(rbp, ConstructFrameConstants::kLengthOffset));
- // Retrieve the new target value from the stack. This was placed into the
- // frame description in place of the receiver by the optimizing compiler.
- __ movp(rdx, Operand(rbp, rax, times_pointer_size,
- StandardFrameConstants::kCallerSPOffset));
+ // Set up pointer to last argument.
+ __ leap(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset));
+
+ // Copy arguments and receiver to the expression stack.
+ Label loop, entry;
+ __ movp(rcx, rax);
+ // ----------- S t a t e -------------
+ // -- rax: number of arguments (untagged)
+ // -- rdx: new target
+ // -- rbx: pointer to last argument
+ // -- rcx: counter (tagged)
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- rdi and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
+ __ jmp(&entry, Label::kNear);
+ __ bind(&loop);
+ __ Push(Operand(rbx, rcx, times_pointer_size, 0));
+ __ bind(&entry);
+ __ decp(rcx);
+ __ j(greater_equal, &loop);
- // Continue with constructor function invocation.
- __ jmp(&post_instantiation_deopt_entry);
+ // Call the function.
+ ParameterCount actual(rax);
+ __ InvokeFunction(rdi, rdx, actual, CALL_FUNCTION,
+ CheckDebugStepCallWrapper());
+
+ // ----------- S t a t e -------------
+ // -- rax constructor result
+ // -- sp[0*kPointerSize] implicit receiver
+ // -- sp[1*kPointerSize] constructor function
+ // -- sp[2*kPointerSize] number of arguments
+ // -- sp[3*kPointerSize] context
+ // -----------------------------------
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
+
+ // Restore context from the frame.
+ __ movp(rsi, Operand(rbp, ConstructFrameConstants::kContextOffset));
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, do_throw, other_result, leave_frame;
+
+ // If the result is undefined, we jump out to using the implicit receiver.
+ __ JumpIfRoot(rax, Heap::kUndefinedValueRootIndex, &use_receiver,
+ Label::kNear);
+
+ // Otherwise we do a smi check and fall through to check if the return value
+ // is a valid receiver.
+
+ // If the result is a smi, it is *not* an object in the ECMA sense.
+ __ JumpIfSmi(rax, &other_result, Label::kNear);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ CmpObjectType(rax, FIRST_JS_RECEIVER_TYPE, rcx);
+ __ j(above_equal, &leave_frame, Label::kNear);
+
+ __ bind(&other_result);
+ // The result is now neither undefined nor an object.
+ if (restrict_constructor_return) {
+ // Throw if constructor function is a class constructor
+ __ movp(rbx, Operand(rbp, ConstructFrameConstants::kConstructorOffset));
+ __ movp(rbx, FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
+ __ testb(FieldOperand(rbx, SharedFunctionInfo::kFunctionKindByteOffset),
+ Immediate(SharedFunctionInfo::kClassConstructorBitsWithinByte));
+ __ j(Condition::zero, &use_receiver, Label::kNear);
+ } else {
+ __ jmp(&use_receiver, Label::kNear);
+ }
+
+ __ bind(&do_throw);
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ movp(rax, Operand(rsp, 0 * kPointerSize));
+ __ JumpIfRoot(rax, Heap::kTheHoleValueRootIndex, &do_throw);
+
+ __ bind(&leave_frame);
+ // Restore the arguments count.
+ __ movp(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
}
+ // Remove caller arguments from the stack and return.
+ __ PopReturnAddressTo(rcx);
+ SmiIndex index = masm->SmiToIndex(rbx, rbx, kPointerSizeLog2);
+ __ leap(rsp, Operand(rsp, index.reg, index.scale, 1 * kPointerSize));
+ __ PushReturnAddressFrom(rcx);
+ __ ret(0);
}
-
} // namespace
-void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, true, false);
+void Builtins::Generate_JSConstructStubGenericRestrictedReturn(
+ MacroAssembler* masm) {
+ return Generate_JSConstructStubGeneric(masm, true);
+}
+void Builtins::Generate_JSConstructStubGenericUnrestrictedReturn(
+ MacroAssembler* masm) {
+ return Generate_JSConstructStubGeneric(masm, false);
}
-
void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, true, false, false);
+ Generate_JSBuiltinsConstructStubHelper(masm);
}
-
void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, false);
-}
-
-void Builtins::Generate_JSBuiltinsConstructStubForDerived(
- MacroAssembler* masm) {
- Generate_JSConstructStubHelper(masm, false, false, true);
+ Generate_JSBuiltinsConstructStubHelper(masm);
}
void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
@@ -619,6 +696,37 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ jmp(&stepping_prepared);
}
+static void ReplaceClosureEntryWithOptimizedCode(
+ MacroAssembler* masm, Register optimized_code_entry, Register closure,
+ Register scratch1, Register scratch2, Register scratch3) {
+ Register native_context = scratch1;
+
+ // Store the optimized code in the closure.
+ __ leap(optimized_code_entry,
+ FieldOperand(optimized_code_entry, Code::kHeaderSize));
+ __ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset),
+ optimized_code_entry);
+ __ RecordWriteCodeEntryField(closure, optimized_code_entry, scratch2);
+
+ // Link the closure into the optimized function list.
+ __ movp(native_context, NativeContextOperand());
+ __ movp(scratch3,
+ ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
+ __ movp(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), scratch3);
+ __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, scratch3,
+ scratch2, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ const int function_list_offset =
+ Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
+ __ movp(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
+ closure);
+ // Save closure before the write barrier.
+ __ movp(scratch3, closure);
+ __ RecordWriteContextSlot(native_context, function_list_offset, closure,
+ scratch2, kDontSaveFPRegs);
+ __ movp(closure, scratch3);
+}
+
static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
Register scratch2) {
Register args_count = scratch1;
@@ -666,6 +774,18 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(rdi); // Callee's JS function.
__ Push(rdx); // Callee's new target.
+ // First check if there is optimized code in the feedback vector which we
+ // could call instead.
+ Label switch_to_optimized_code;
+ Register optimized_code_entry = rcx;
+ __ movp(rbx, FieldOperand(rdi, JSFunction::kFeedbackVectorOffset));
+ __ movp(rbx, FieldOperand(rbx, Cell::kValueOffset));
+ __ movp(rbx,
+ FieldOperand(rbx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
+ __ movp(optimized_code_entry, FieldOperand(rbx, WeakCell::kValueOffset));
+ __ JumpIfNotSmi(optimized_code_entry, &switch_to_optimized_code);
+
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
__ movp(rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
@@ -780,6 +900,28 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ movp(FieldOperand(rdi, JSFunction::kCodeEntryOffset), rcx);
__ RecordWriteCodeEntryField(rdi, rcx, r15);
__ jmp(rcx);
+
+ // If there is optimized code on the type feedback vector, check if it is good
+ // to run, and if so, self heal the closure and call the optimized code.
+ __ bind(&switch_to_optimized_code);
+ __ leave();
+ Label gotta_call_runtime;
+
+ // Check if the optimized code is marked for deopt.
+ __ testl(FieldOperand(optimized_code_entry, Code::kKindSpecificFlags1Offset),
+ Immediate(1 << Code::kMarkedForDeoptimizationBit));
+ __ j(not_zero, &gotta_call_runtime);
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ ReplaceClosureEntryWithOptimizedCode(masm, optimized_code_entry, rdi, r14,
+ r15, rbx);
+ __ jmp(optimized_code_entry);
+
+ // Optimized code is marked for deopt, bailout to the CompileLazy runtime
+ // function which will clear the feedback vector's optimized code slot.
+ __ bind(&gotta_call_runtime);
+ GenerateTailCallToReturnedCode(masm, Runtime::kEvictOptimizedCodeSlot);
}
static void Generate_StackOverflowCheck(
@@ -1060,79 +1202,33 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Label try_shared;
- Label loop_top, loop_bottom;
Register closure = rdi;
- Register map = r8;
- Register index = r9;
// Do we have a valid feedback vector?
__ movp(rbx, FieldOperand(closure, JSFunction::kFeedbackVectorOffset));
__ movp(rbx, FieldOperand(rbx, Cell::kValueOffset));
__ JumpIfRoot(rbx, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
- __ movp(map, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
- __ movp(map, FieldOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
- __ SmiToInteger32(index, FieldOperand(map, FixedArray::kLengthOffset));
- __ cmpl(index, Immediate(2));
- __ j(less, &try_shared);
-
- // r14 : native context
- // r9 : length / index
- // r8 : optimized code map
- // rdx : new target
- // rdi : closure
- Register native_context = r14;
- __ movp(native_context, NativeContextOperand());
-
- __ bind(&loop_top);
- // Native context match?
- Register temp = r11;
- __ movp(temp, FieldOperand(map, index, times_pointer_size,
- SharedFunctionInfo::kOffsetToPreviousContext));
- __ movp(temp, FieldOperand(temp, WeakCell::kValueOffset));
- __ cmpp(temp, native_context);
- __ j(not_equal, &loop_bottom);
-
- // Code available?
+ // Is optimized code available in the feedback vector?
Register entry = rcx;
- __ movp(entry, FieldOperand(map, index, times_pointer_size,
- SharedFunctionInfo::kOffsetToPreviousCachedCode));
+ __ movp(entry,
+ FieldOperand(rbx, FeedbackVector::kOptimizedCodeIndex * kPointerSize +
+ FeedbackVector::kHeaderSize));
__ movp(entry, FieldOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
- // Found code. Get it into the closure and return.
- __ leap(entry, FieldOperand(entry, Code::kHeaderSize));
- __ movp(FieldOperand(closure, JSFunction::kCodeEntryOffset), entry);
- __ RecordWriteCodeEntryField(closure, entry, r15);
+ // Found code, check if it is marked for deopt, if so call into runtime to
+ // clear the optimized code slot.
+ __ testl(FieldOperand(entry, Code::kKindSpecificFlags1Offset),
+ Immediate(1 << Code::kMarkedForDeoptimizationBit));
+ __ j(not_zero, &gotta_call_runtime);
- // Link the closure into the optimized function list.
- // rcx : code entry (entry)
- // r14 : native context
- // rdx : new target
- // rdi : closure
- __ movp(rbx,
- ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST));
- __ movp(FieldOperand(closure, JSFunction::kNextFunctionLinkOffset), rbx);
- __ RecordWriteField(closure, JSFunction::kNextFunctionLinkOffset, rbx, r15,
- kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- const int function_list_offset =
- Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST);
- __ movp(ContextOperand(native_context, Context::OPTIMIZED_FUNCTIONS_LIST),
- closure);
- // Save closure before the write barrier.
- __ movp(rbx, closure);
- __ RecordWriteContextSlot(native_context, function_list_offset, closure, r15,
- kDontSaveFPRegs);
- __ movp(closure, rbx);
+ // Code is good, get it into the closure and tail call.
+ ReplaceClosureEntryWithOptimizedCode(masm, entry, closure, r14, r15, rbx);
__ jmp(entry);
- __ bind(&loop_bottom);
- __ subl(index, Immediate(SharedFunctionInfo::kEntryLength));
- __ cmpl(index, Immediate(1));
- __ j(greater, &loop_top);
-
- // We found no code.
+ // We found no optimized code.
__ bind(&try_shared);
__ movp(entry, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
// Is the shared function marked for tier up?
@@ -2320,13 +2416,13 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
// static
-void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
- Handle<Code> code) {
+void Builtins::Generate_ForwardVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
// ----------- S t a t e -------------
- // -- rdi : the target to call (can be any Object)
- // -- rcx : start index (to support rest parameters)
- // -- rsp[0] : return address.
- // -- rsp[8] : thisArgument
+ // -- rax : the number of arguments (not including the receiver)
+ // -- rdx : the new target (for [[Construct]] calls)
+ // -- rdi : the target to call (can be any Object)
+ // -- rcx : start index (to support rest parameters)
// -----------------------------------
// Check if we have an arguments adaptor frame below the function frame.
@@ -2336,52 +2432,48 @@ void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(equal, &arguments_adaptor, Label::kNear);
{
- __ movp(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movp(rax, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
+ __ movp(r8, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movp(r8, FieldOperand(r8, JSFunction::kSharedFunctionInfoOffset));
__ LoadSharedFunctionInfoSpecialField(
- rax, rax, SharedFunctionInfo::kFormalParameterCountOffset);
+ r8, r8, SharedFunctionInfo::kFormalParameterCountOffset);
__ movp(rbx, rbp);
}
__ jmp(&arguments_done, Label::kNear);
__ bind(&arguments_adaptor);
{
__ SmiToInteger32(
- rax, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ r8, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
}
__ bind(&arguments_done);
- Label stack_empty, stack_done, stack_overflow;
- __ subl(rax, rcx);
- __ j(less_equal, &stack_empty);
+ Label stack_done, stack_overflow;
+ __ subl(r8, rcx);
+ __ j(less_equal, &stack_done);
{
// Check for stack overflow.
- Generate_StackOverflowCheck(masm, rax, rcx, &stack_overflow, Label::kNear);
+ Generate_StackOverflowCheck(masm, r8, rcx, &stack_overflow, Label::kNear);
// Forward the arguments from the caller frame.
{
Label loop;
- __ movl(rcx, rax);
- __ Pop(r8);
+ __ addl(rax, r8);
+ __ PopReturnAddressTo(rcx);
__ bind(&loop);
{
- StackArgumentsAccessor args(rbx, rcx, ARGUMENTS_DONT_CONTAIN_RECEIVER);
+ StackArgumentsAccessor args(rbx, r8, ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ Push(args.GetArgumentOperand(0));
- __ decl(rcx);
+ __ decl(r8);
__ j(not_zero, &loop);
}
- __ Push(r8);
+ __ PushReturnAddressFrom(rcx);
}
}
__ jmp(&stack_done, Label::kNear);
__ bind(&stack_overflow);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
- __ bind(&stack_empty);
- {
- // We just pass the receiver, which is already on the stack.
- __ Set(rax, 0);
- }
__ bind(&stack_done);
+ // Tail-call to the {code} handler.
__ Jump(code, RelocInfo::CODE_TARGET);
}
diff --git a/deps/v8/src/char-predicates.cc b/deps/v8/src/char-predicates.cc
index 66508375a1..dc9865b558 100644
--- a/deps/v8/src/char-predicates.cc
+++ b/deps/v8/src/char-predicates.cc
@@ -4,17 +4,17 @@
#include "src/char-predicates.h"
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
#include "unicode/uchar.h"
#include "unicode/urename.h"
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
namespace v8 {
namespace internal {
bool SupplementaryPlanes::IsIDStart(uc32 c) {
DCHECK(c > 0xFFFF);
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
// This only works for code points in the SMPs, since ICU does not exclude
// code points with properties 'Pattern_Syntax' or 'Pattern_White_Space'.
// Code points in the SMP do not have those properties.
@@ -22,13 +22,13 @@ bool SupplementaryPlanes::IsIDStart(uc32 c) {
#else
// This is incorrect, but if we don't have ICU, use this as fallback.
return false;
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
}
bool SupplementaryPlanes::IsIDPart(uc32 c) {
DCHECK(c > 0xFFFF);
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
// This only works for code points in the SMPs, since ICU does not exclude
// code points with properties 'Pattern_Syntax' or 'Pattern_White_Space'.
// Code points in the SMP do not have those properties.
@@ -36,7 +36,7 @@ bool SupplementaryPlanes::IsIDPart(uc32 c) {
#else
// This is incorrect, but if we don't have ICU, use this as fallback.
return false;
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/code-factory.cc b/deps/v8/src/code-factory.cc
index 71fbb1dbab..5252b438be 100644
--- a/deps/v8/src/code-factory.cc
+++ b/deps/v8/src/code-factory.cc
@@ -232,12 +232,6 @@ Callable CodeFactory::NumberToString(Isolate* isolate) {
}
// static
-Callable CodeFactory::RegExpExec(Isolate* isolate) {
- RegExpExecStub stub(isolate);
- return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
-}
-
-// static
Callable CodeFactory::StringFromCharCode(Isolate* isolate) {
Handle<Code> code(isolate->builtins()->StringFromCharCode());
return Callable(code, BuiltinDescriptor(isolate));
@@ -377,8 +371,8 @@ Callable CodeFactory::FastCloneShallowArray(
}
// static
-Callable CodeFactory::FastCloneShallowObject(Isolate* isolate, int length) {
- return Callable(isolate->builtins()->NewCloneShallowObject(length),
+Callable CodeFactory::FastCloneShallowObject(Isolate* isolate) {
+ return Callable(isolate->builtins()->FastCloneShallowObject(),
FastCloneShallowObjectDescriptor(isolate));
}
@@ -464,6 +458,18 @@ Callable CodeFactory::ConstructFunction(Isolate* isolate) {
}
// static
+Callable CodeFactory::ConstructForwardVarargs(Isolate* isolate) {
+ return Callable(isolate->builtins()->ConstructForwardVarargs(),
+ ConstructForwardVarargsDescriptor(isolate));
+}
+
+// static
+Callable CodeFactory::ConstructFunctionForwardVarargs(Isolate* isolate) {
+ return Callable(isolate->builtins()->ConstructFunctionForwardVarargs(),
+ ConstructForwardVarargsDescriptor(isolate));
+}
+
+// static
Callable CodeFactory::InterpreterPushArgsThenCall(
Isolate* isolate, ConvertReceiverMode receiver_mode,
TailCallMode tail_call_mode, InterpreterPushArgsMode mode) {
@@ -506,50 +512,19 @@ Callable CodeFactory::ArrayConstructor(Isolate* isolate) {
}
// static
-Callable CodeFactory::ArrayPush(Isolate* isolate) {
- return Callable(isolate->builtins()->ArrayPush(), BuiltinDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::ArrayFilterLoopContinuation(Isolate* isolate) {
- return Callable(isolate->builtins()->ArrayFilterLoopContinuation(),
- IteratingArrayBuiltinLoopContinuationDescriptor(isolate));
+Callable CodeFactory::ArrayPop(Isolate* isolate) {
+ return Callable(isolate->builtins()->ArrayPop(), BuiltinDescriptor(isolate));
}
// static
-Callable CodeFactory::ArrayMapLoopContinuation(Isolate* isolate) {
- return Callable(isolate->builtins()->ArrayMapLoopContinuation(),
- IteratingArrayBuiltinLoopContinuationDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::ArrayForEachLoopContinuation(Isolate* isolate) {
- return Callable(isolate->builtins()->ArrayForEachLoopContinuation(),
- IteratingArrayBuiltinLoopContinuationDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::ArraySomeLoopContinuation(Isolate* isolate) {
- return Callable(isolate->builtins()->ArraySomeLoopContinuation(),
- IteratingArrayBuiltinLoopContinuationDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::ArrayEveryLoopContinuation(Isolate* isolate) {
- return Callable(isolate->builtins()->ArrayEveryLoopContinuation(),
- IteratingArrayBuiltinLoopContinuationDescriptor(isolate));
-}
-
-// static
-Callable CodeFactory::ArrayReduceLoopContinuation(Isolate* isolate) {
- return Callable(isolate->builtins()->ArrayReduceLoopContinuation(),
- IteratingArrayBuiltinLoopContinuationDescriptor(isolate));
+Callable CodeFactory::ArrayShift(Isolate* isolate) {
+ return Callable(isolate->builtins()->ArrayShift(),
+ BuiltinDescriptor(isolate));
}
// static
-Callable CodeFactory::ArrayReduceRightLoopContinuation(Isolate* isolate) {
- return Callable(isolate->builtins()->ArrayReduceRightLoopContinuation(),
- IteratingArrayBuiltinLoopContinuationDescriptor(isolate));
+Callable CodeFactory::ArrayPush(Isolate* isolate) {
+ return Callable(isolate->builtins()->ArrayPush(), BuiltinDescriptor(isolate));
}
// static
diff --git a/deps/v8/src/code-factory.h b/deps/v8/src/code-factory.h
index 669f3798aa..c0cc549523 100644
--- a/deps/v8/src/code-factory.h
+++ b/deps/v8/src/code-factory.h
@@ -91,8 +91,6 @@ class V8_EXPORT_PRIVATE CodeFactory final {
OrdinaryToPrimitiveHint hint);
static Callable NumberToString(Isolate* isolate);
- static Callable RegExpExec(Isolate* isolate);
-
static Callable Add(Isolate* isolate);
static Callable Subtract(Isolate* isolate);
static Callable Multiply(Isolate* isolate);
@@ -135,7 +133,7 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable FastCloneRegExp(Isolate* isolate);
static Callable FastCloneShallowArray(Isolate* isolate,
AllocationSiteMode allocation_mode);
- static Callable FastCloneShallowObject(Isolate* isolate, int length);
+ static Callable FastCloneShallowObject(Isolate* isolate);
static Callable FastNewFunctionContext(Isolate* isolate,
ScopeType scope_type);
@@ -169,6 +167,8 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable Construct(Isolate* isolate);
static Callable ConstructWithSpread(Isolate* isolate);
static Callable ConstructFunction(Isolate* isolate);
+ static Callable ConstructForwardVarargs(Isolate* isolate);
+ static Callable ConstructFunctionForwardVarargs(Isolate* isolate);
static Callable CreateIterResultObject(Isolate* isolate);
static Callable HasProperty(Isolate* isolate);
static Callable ForInFilter(Isolate* isolate);
@@ -184,14 +184,9 @@ class V8_EXPORT_PRIVATE CodeFactory final {
static Callable InterpreterOnStackReplacement(Isolate* isolate);
static Callable ArrayConstructor(Isolate* isolate);
+ static Callable ArrayPop(Isolate* isolate);
static Callable ArrayPush(Isolate* isolate);
- static Callable ArrayFilterLoopContinuation(Isolate* isolate);
- static Callable ArrayMapLoopContinuation(Isolate* isolate);
- static Callable ArrayForEachLoopContinuation(Isolate* isolate);
- static Callable ArraySomeLoopContinuation(Isolate* isolate);
- static Callable ArrayEveryLoopContinuation(Isolate* isolate);
- static Callable ArrayReduceLoopContinuation(Isolate* isolate);
- static Callable ArrayReduceRightLoopContinuation(Isolate* isolate);
+ static Callable ArrayShift(Isolate* isolate);
static Callable FunctionPrototypeBind(Isolate* isolate);
static Callable PromiseHandleReject(Isolate* isolate);
diff --git a/deps/v8/src/code-stub-assembler.cc b/deps/v8/src/code-stub-assembler.cc
index 7b062b6bf6..edfe2de86c 100644
--- a/deps/v8/src/code-stub-assembler.cc
+++ b/deps/v8/src/code-stub-assembler.cc
@@ -43,41 +43,45 @@ void CodeStubAssembler::HandleBreakOnNode() {
BreakOnNode(node_id);
}
-void CodeStubAssembler::Assert(const NodeGenerator& codition_body,
+void CodeStubAssembler::Assert(const NodeGenerator& condition_body,
const char* message, const char* file,
int line) {
#if defined(DEBUG)
if (FLAG_debug_code) {
- Label ok(this);
- Label not_ok(this, Label::kDeferred);
- if (message != nullptr && FLAG_code_comments) {
- Comment("[ Assert: %s", message);
+ Check(condition_body, message, file, line);
+ }
+#endif
+}
+
+void CodeStubAssembler::Check(const NodeGenerator& condition_body,
+ const char* message, const char* file, int line) {
+ Label ok(this);
+ Label not_ok(this, Label::kDeferred);
+ if (message != nullptr && FLAG_code_comments) {
+ Comment("[ Assert: %s", message);
+ } else {
+ Comment("[ Assert");
+ }
+ Node* condition = condition_body();
+ DCHECK_NOT_NULL(condition);
+ Branch(condition, &ok, &not_ok);
+ BIND(&not_ok);
+ if (message != nullptr) {
+ char chars[1024];
+ Vector<char> buffer(chars);
+ if (file != nullptr) {
+ SNPrintF(buffer, "CSA_ASSERT failed: %s [%s:%d]\n", message, file, line);
} else {
- Comment("[ Assert");
+ SNPrintF(buffer, "CSA_ASSERT failed: %s\n", message);
}
- Node* condition = codition_body();
- DCHECK_NOT_NULL(condition);
- Branch(condition, &ok, &not_ok);
- BIND(&not_ok);
- if (message != nullptr) {
- char chars[1024];
- Vector<char> buffer(chars);
- if (file != nullptr) {
- SNPrintF(buffer, "CSA_ASSERT failed: %s [%s:%d]\n", message, file,
- line);
- } else {
- SNPrintF(buffer, "CSA_ASSERT failed: %s\n", message);
- }
- CallRuntime(
- Runtime::kGlobalPrint, SmiConstant(Smi::kZero),
- HeapConstant(factory()->NewStringFromAsciiChecked(&(buffer[0]))));
- }
- DebugBreak();
- Goto(&ok);
- BIND(&ok);
- Comment("] Assert");
+ CallRuntime(
+ Runtime::kGlobalPrint, SmiConstant(Smi::kZero),
+ HeapConstant(factory()->NewStringFromAsciiChecked(&(buffer[0]))));
}
-#endif
+ DebugBreak();
+ Goto(&ok);
+ BIND(&ok);
+ Comment("] Assert");
}
Node* CodeStubAssembler::Select(Node* condition, const NodeGenerator& true_body,
@@ -593,12 +597,12 @@ Node* CodeStubAssembler::TrySmiDiv(Node* dividend, Node* divisor,
Branch(WordEqual(dividend, SmiConstant(0)), &dividend_is_zero,
&dividend_is_not_zero);
- Bind(&dividend_is_zero);
+ BIND(&dividend_is_zero);
{
GotoIf(SmiLessThan(divisor, SmiConstant(0)), bailout);
Goto(&dividend_is_not_zero);
}
- Bind(&dividend_is_not_zero);
+ BIND(&dividend_is_not_zero);
Node* untagged_divisor = SmiToWord32(divisor);
Node* untagged_dividend = SmiToWord32(dividend);
@@ -609,7 +613,7 @@ Node* CodeStubAssembler::TrySmiDiv(Node* dividend, Node* divisor,
Branch(Word32Equal(untagged_divisor, Int32Constant(-1)),
&divisor_is_minus_one, &divisor_is_not_minus_one);
- Bind(&divisor_is_minus_one);
+ BIND(&divisor_is_minus_one);
{
GotoIf(Word32Equal(
untagged_dividend,
@@ -617,7 +621,7 @@ Node* CodeStubAssembler::TrySmiDiv(Node* dividend, Node* divisor,
bailout);
Goto(&divisor_is_not_minus_one);
}
- Bind(&divisor_is_not_minus_one);
+ BIND(&divisor_is_not_minus_one);
Node* untagged_result = Int32Div(untagged_dividend, untagged_divisor);
Node* truncated = Int32Mul(untagged_result, untagged_divisor);
@@ -654,8 +658,16 @@ Node* CodeStubAssembler::TaggedIsPositiveSmi(Node* a) {
Node* CodeStubAssembler::WordIsWordAligned(Node* word) {
return WordEqual(IntPtrConstant(0),
- WordAnd(word, IntPtrConstant((1 << kPointerSizeLog2) - 1)));
+ WordAnd(word, IntPtrConstant(kPointerSize - 1)));
+}
+
+#if DEBUG
+void CodeStubAssembler::Bind(Label* label, AssemblerDebugInfo debug_info) {
+ CodeAssembler::Bind(label, debug_info);
}
+#else
+void CodeStubAssembler::Bind(Label* label) { CodeAssembler::Bind(label); }
+#endif // DEBUG
void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
Node* receiver_map, Label* definitely_no_elements,
@@ -1238,6 +1250,16 @@ Node* CodeStubAssembler::LoadStringLength(Node* object) {
return LoadObjectField(object, String::kLengthOffset);
}
+Node* CodeStubAssembler::PointerToSeqStringData(Node* seq_string) {
+ CSA_ASSERT(this, IsString(seq_string));
+ CSA_ASSERT(this,
+ IsSequentialStringInstanceType(LoadInstanceType(seq_string)));
+ STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ return IntPtrAdd(
+ BitcastTaggedToWord(seq_string),
+ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+}
+
Node* CodeStubAssembler::LoadJSValueValue(Node* object) {
CSA_ASSERT(this, IsJSValue(object));
return LoadObjectField(object, JSValue::kValueOffset);
@@ -1393,7 +1415,7 @@ Node* CodeStubAssembler::LoadContextElement(Node* context, int slot_index) {
Node* CodeStubAssembler::LoadContextElement(Node* context, Node* slot_index) {
Node* offset =
- IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
+ IntPtrAdd(TimesPointerSize(slot_index),
IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
return Load(MachineType::AnyTagged(), context, offset);
}
@@ -1407,7 +1429,7 @@ Node* CodeStubAssembler::StoreContextElement(Node* context, int slot_index,
Node* CodeStubAssembler::StoreContextElement(Node* context, Node* slot_index,
Node* value) {
Node* offset =
- IntPtrAdd(WordShl(slot_index, kPointerSizeLog2),
+ IntPtrAdd(TimesPointerSize(slot_index),
IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
return Store(context, offset, value);
}
@@ -1430,6 +1452,27 @@ Node* CodeStubAssembler::LoadJSArrayElementsMap(ElementsKind kind,
return LoadContextElement(native_context, Context::ArrayMapIndex(kind));
}
+Node* CodeStubAssembler::LoadJSFunctionPrototype(Node* function,
+ Label* if_bailout) {
+ CSA_ASSERT(this, TaggedIsNotSmi(function));
+ CSA_ASSERT(this, IsJSFunction(function));
+ CSA_ASSERT(this, IsClearWord32(LoadMapBitField(LoadMap(function)),
+ 1 << Map::kHasNonInstancePrototype));
+ Node* proto_or_map =
+ LoadObjectField(function, JSFunction::kPrototypeOrInitialMapOffset);
+ GotoIf(IsTheHole(proto_or_map), if_bailout);
+
+ VARIABLE(var_result, MachineRepresentation::kTagged, proto_or_map);
+ Label done(this, &var_result);
+ GotoIfNot(IsMap(proto_or_map), &done);
+
+ var_result.Bind(LoadMapPrototype(proto_or_map));
+ Goto(&done);
+
+ BIND(&done);
+ return var_result.value();
+}
+
Node* CodeStubAssembler::StoreHeapNumberValue(Node* object, Node* value) {
return StoreObjectFieldNoWriteBarrier(object, HeapNumber::kValueOffset, value,
MachineRepresentation::kFloat64);
@@ -1523,8 +1566,57 @@ Node* CodeStubAssembler::StoreFixedDoubleArrayElement(
return StoreNoWriteBarrier(rep, object, offset, value);
}
-Node* CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* context,
- Node* array,
+void CodeStubAssembler::EnsureArrayLengthWritable(Node* map, Label* bailout) {
+ // Check whether the length property is writable. The length property is the
+ // only default named property on arrays. It's nonconfigurable, hence is
+ // guaranteed to stay the first property.
+ Node* descriptors = LoadMapDescriptors(map);
+ Node* details =
+ LoadFixedArrayElement(descriptors, DescriptorArray::ToDetailsIndex(0));
+ GotoIf(IsSetSmi(details, PropertyDetails::kAttributesReadOnlyMask), bailout);
+}
+
+Node* CodeStubAssembler::EnsureArrayPushable(Node* receiver, Label* bailout) {
+ // Disallow pushing onto prototypes. It might be the JSArray prototype.
+ // Disallow pushing onto non-extensible objects.
+ Comment("Disallow pushing onto prototypes");
+ Node* map = LoadMap(receiver);
+ Node* bit_field2 = LoadMapBitField2(map);
+ int mask = static_cast<int>(Map::IsPrototypeMapBits::kMask) |
+ (1 << Map::kIsExtensible);
+ Node* test = Word32And(bit_field2, Int32Constant(mask));
+ GotoIf(Word32NotEqual(test, Int32Constant(1 << Map::kIsExtensible)), bailout);
+
+ // Disallow pushing onto arrays in dictionary named property mode. We need
+ // to figure out whether the length property is still writable.
+ Comment("Disallow pushing onto arrays in dictionary named property mode");
+ GotoIf(IsDictionaryMap(map), bailout);
+
+ EnsureArrayLengthWritable(map, bailout);
+
+ Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
+ return kind;
+}
+
+void CodeStubAssembler::PossiblyGrowElementsCapacity(
+ ParameterMode mode, ElementsKind kind, Node* array, Node* length,
+ Variable* var_elements, Node* growth, Label* bailout) {
+ Label fits(this, var_elements);
+ Node* capacity =
+ TaggedToParameter(LoadFixedArrayBaseLength(var_elements->value()), mode);
+ // length and growth nodes are already in a ParameterMode appropriate
+ // representation.
+ Node* new_length = IntPtrOrSmiAdd(growth, length, mode);
+ GotoIfNot(IntPtrOrSmiGreaterThan(new_length, capacity, mode), &fits);
+ Node* new_capacity = CalculateNewElementsCapacity(new_length, mode);
+ var_elements->Bind(GrowElementsCapacity(array, var_elements->value(), kind,
+ kind, capacity, new_capacity, mode,
+ bailout));
+ Goto(&fits);
+ BIND(&fits);
+}
+
+Node* CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* array,
CodeStubArguments& args,
Variable& arg_index,
Label* bailout) {
@@ -1536,46 +1628,22 @@ Node* CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* context,
VARIABLE(var_length, OptimalParameterRepresentation(),
TaggedToParameter(LoadJSArrayLength(array), mode));
VARIABLE(var_elements, MachineRepresentation::kTagged, LoadElements(array));
- Node* capacity =
- TaggedToParameter(LoadFixedArrayBaseLength(var_elements.value()), mode);
// Resize the capacity of the fixed array if it doesn't fit.
- Label fits(this, &var_elements);
Node* first = arg_index.value();
- Node* growth = IntPtrSub(args.GetLength(), first);
- Node* new_length =
- IntPtrOrSmiAdd(WordToParameter(growth, mode), var_length.value(), mode);
- GotoIfNot(IntPtrOrSmiGreaterThan(new_length, capacity, mode), &fits);
- Node* new_capacity = CalculateNewElementsCapacity(new_length, mode);
- var_elements.Bind(GrowElementsCapacity(array, var_elements.value(), kind,
- kind, capacity, new_capacity, mode,
- &pre_bailout));
- Goto(&fits);
- BIND(&fits);
- Node* elements = var_elements.value();
+ Node* growth = WordToParameter(IntPtrSub(args.GetLength(), first), mode);
+ PossiblyGrowElementsCapacity(mode, kind, array, var_length.value(),
+ &var_elements, growth, &pre_bailout);
// Push each argument onto the end of the array now that there is enough
// capacity.
CodeStubAssembler::VariableList push_vars({&var_length}, zone());
+ Node* elements = var_elements.value();
args.ForEach(
push_vars,
[this, kind, mode, elements, &var_length, &pre_bailout](Node* arg) {
- if (IsFastSmiElementsKind(kind)) {
- GotoIf(TaggedIsNotSmi(arg), &pre_bailout);
- } else if (IsFastDoubleElementsKind(kind)) {
- GotoIfNotNumber(arg, &pre_bailout);
- }
- if (IsFastDoubleElementsKind(kind)) {
- Node* double_value = ChangeNumberToFloat64(arg);
- StoreFixedDoubleArrayElement(elements, var_length.value(),
- Float64SilenceNaN(double_value), mode);
- } else {
- WriteBarrierMode barrier_mode = IsFastSmiElementsKind(kind)
- ? SKIP_WRITE_BARRIER
- : UPDATE_WRITE_BARRIER;
- StoreFixedArrayElement(elements, var_length.value(), arg,
- barrier_mode, 0, mode);
- }
+ TryStoreArrayElement(kind, mode, &pre_bailout, elements,
+ var_length.value(), arg);
Increment(var_length, 1, mode);
},
first, nullptr);
@@ -1600,6 +1668,49 @@ Node* CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* context,
return var_tagged_length.value();
}
+void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind,
+ ParameterMode mode, Label* bailout,
+ Node* elements, Node* index,
+ Node* value) {
+ if (IsFastSmiElementsKind(kind)) {
+ GotoIf(TaggedIsNotSmi(value), bailout);
+ } else if (IsFastDoubleElementsKind(kind)) {
+ GotoIfNotNumber(value, bailout);
+ }
+ if (IsFastDoubleElementsKind(kind)) {
+ Node* double_value = ChangeNumberToFloat64(value);
+ StoreFixedDoubleArrayElement(elements, index,
+ Float64SilenceNaN(double_value), mode);
+ } else {
+ WriteBarrierMode barrier_mode =
+ IsFastSmiElementsKind(kind) ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+ StoreFixedArrayElement(elements, index, value, barrier_mode, 0, mode);
+ }
+}
+
+void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* array,
+ Node* value, Label* bailout) {
+ Comment("BuildAppendJSArray: %s", ElementsKindToString(kind));
+ ParameterMode mode = OptimalParameterMode();
+ VARIABLE(var_length, OptimalParameterRepresentation(),
+ TaggedToParameter(LoadJSArrayLength(array), mode));
+ VARIABLE(var_elements, MachineRepresentation::kTagged, LoadElements(array));
+
+ // Resize the capacity of the fixed array if it doesn't fit.
+ Node* growth = IntPtrOrSmiConstant(1, mode);
+ PossiblyGrowElementsCapacity(mode, kind, array, var_length.value(),
+ &var_elements, growth, bailout);
+
+ // Push each argument onto the end of the array now that there is enough
+ // capacity.
+ TryStoreArrayElement(kind, mode, bailout, var_elements.value(),
+ var_length.value(), value);
+ Increment(var_length, 1, mode);
+
+ Node* length = ParameterToTagged(var_length.value(), mode);
+ StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
+}
+
Node* CodeStubAssembler::AllocateHeapNumber(MutableMode mode) {
Node* result = Allocate(HeapNumber::kSize, kNone);
Heap::RootListIndex heap_map_index =
@@ -1939,14 +2050,15 @@ Node* CodeStubAssembler::AllocateNameDictionary(Node* at_least_space_for) {
CSA_ASSERT(this, UintPtrLessThanOrEqual(
at_least_space_for,
IntPtrConstant(NameDictionary::kMaxCapacity)));
-
Node* capacity = HashTableComputeCapacity(at_least_space_for);
- CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
+ return AllocateNameDictionaryWithCapacity(capacity);
+}
+Node* CodeStubAssembler::AllocateNameDictionaryWithCapacity(Node* capacity) {
+ CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
Node* length = EntryToIndex<NameDictionary>(capacity);
- Node* store_size =
- IntPtrAdd(WordShl(length, IntPtrConstant(kPointerSizeLog2)),
- IntPtrConstant(NameDictionary::kHeaderSize));
+ Node* store_size = IntPtrAdd(TimesPointerSize(length),
+ IntPtrConstant(NameDictionary::kHeaderSize));
Node* result = AllocateInNewSpace(store_size);
Comment("Initialize NameDictionary");
@@ -1983,12 +2095,27 @@ Node* CodeStubAssembler::AllocateNameDictionary(Node* at_least_space_for) {
return result;
}
+Node* CodeStubAssembler::CopyNameDictionary(Node* dictionary,
+ Label* large_object_fallback) {
+ CSA_ASSERT(this, IsHashTable(dictionary));
+ Comment("Copy boilerplate property dict");
+ Node* capacity = SmiUntag(GetCapacity<NameDictionary>(dictionary));
+ CSA_ASSERT(this, IntPtrGreaterThanOrEqual(capacity, IntPtrConstant(0)));
+ GotoIf(UintPtrGreaterThan(
+ capacity, IntPtrConstant(NameDictionary::kMaxRegularCapacity)),
+ large_object_fallback);
+ Node* properties = AllocateNameDictionaryWithCapacity(capacity);
+ Node* length = SmiUntag(LoadFixedArrayBaseLength(dictionary));
+ CopyFixedArrayElements(FAST_ELEMENTS, dictionary, properties, length,
+ SKIP_WRITE_BARRIER, INTPTR_PARAMETERS);
+ return properties;
+}
+
Node* CodeStubAssembler::AllocateJSObjectFromMap(Node* map, Node* properties,
Node* elements,
AllocationFlags flags) {
CSA_ASSERT(this, IsMap(map));
- Node* size =
- IntPtrMul(LoadMapInstanceSize(map), IntPtrConstant(kPointerSize));
+ Node* size = TimesPointerSize(LoadMapInstanceSize(map));
Node* object = AllocateInNewSpace(size, flags);
StoreMapNoWriteBarrier(object, map);
InitializeJSObjectFromMap(object, map, size, properties, elements);
@@ -2839,6 +2966,10 @@ Node* CodeStubAssembler::ChangeNumberToIntPtr(Node* value) {
return result.value();
}
+Node* CodeStubAssembler::TimesPointerSize(Node* value) {
+ return WordShl(value, IntPtrConstant(kPointerSizeLog2));
+}
+
Node* CodeStubAssembler::ToThisValue(Node* context, Node* value,
PrimitiveType primitive_type,
char const* method_name) {
@@ -2900,10 +3031,27 @@ Node* CodeStubAssembler::ToThisValue(Node* context, Node* value,
BIND(&done_throw);
{
+ const char* primitive_name = nullptr;
+ switch (primitive_type) {
+ case PrimitiveType::kBoolean:
+ primitive_name = "Boolean";
+ break;
+ case PrimitiveType::kNumber:
+ primitive_name = "Number";
+ break;
+ case PrimitiveType::kString:
+ primitive_name = "String";
+ break;
+ case PrimitiveType::kSymbol:
+ primitive_name = "Symbol";
+ break;
+ }
+ CHECK_NOT_NULL(primitive_name);
+
// The {value} is not a compatible receiver for this method.
- CallRuntime(Runtime::kThrowNotGeneric, context,
- HeapConstant(factory()->NewStringFromAsciiChecked(method_name,
- TENURED)));
+ CallRuntime(Runtime::kThrowTypeError, context,
+ SmiConstant(MessageTemplate::kNotGeneric),
+ CStringConstant(method_name), CStringConstant(primitive_name));
Unreachable();
}
@@ -3009,6 +3157,20 @@ Node* CodeStubAssembler::IsSequentialStringInstanceType(Node* instance_type) {
Int32Constant(kSeqStringTag));
}
+Node* CodeStubAssembler::IsConsStringInstanceType(Node* instance_type) {
+ CSA_ASSERT(this, IsStringInstanceType(instance_type));
+ return Word32Equal(
+ Word32And(instance_type, Int32Constant(kStringRepresentationMask)),
+ Int32Constant(kConsStringTag));
+}
+
+Node* CodeStubAssembler::IsIndirectStringInstanceType(Node* instance_type) {
+ CSA_ASSERT(this, IsStringInstanceType(instance_type));
+ STATIC_ASSERT(kIsIndirectStringMask == 0x1);
+ STATIC_ASSERT(kIsIndirectStringTag == 0x1);
+ return Word32And(instance_type, Int32Constant(kIsIndirectStringMask));
+}
+
Node* CodeStubAssembler::IsExternalStringInstanceType(Node* instance_type) {
CSA_ASSERT(this, IsStringInstanceType(instance_type));
return Word32Equal(
@@ -3031,20 +3193,23 @@ Node* CodeStubAssembler::IsJSReceiverInstanceType(Node* instance_type) {
Int32Constant(FIRST_JS_RECEIVER_TYPE));
}
+Node* CodeStubAssembler::IsJSReceiverMap(Node* map) {
+ return IsJSReceiverInstanceType(LoadMapInstanceType(map));
+}
+
Node* CodeStubAssembler::IsJSReceiver(Node* object) {
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
- return IsJSReceiverInstanceType(LoadInstanceType(object));
+ return IsJSReceiverMap(LoadMap(object));
}
-Node* CodeStubAssembler::IsJSReceiverMap(Node* map) {
+Node* CodeStubAssembler::IsJSObjectMap(Node* map) {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
- return IsJSReceiverInstanceType(LoadMapInstanceType(map));
+ CSA_ASSERT(this, IsMap(map));
+ return Int32GreaterThanOrEqual(LoadMapInstanceType(map),
+ Int32Constant(FIRST_JS_OBJECT_TYPE));
}
Node* CodeStubAssembler::IsJSObject(Node* object) {
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
- return Int32GreaterThanOrEqual(LoadInstanceType(object),
- Int32Constant(FIRST_JS_RECEIVER_TYPE));
+ return IsJSObjectMap(LoadMap(object));
}
Node* CodeStubAssembler::IsJSGlobalProxy(Node* object) {
@@ -3056,12 +3221,28 @@ Node* CodeStubAssembler::IsMap(Node* map) {
return HasInstanceType(map, MAP_TYPE);
}
-Node* CodeStubAssembler::IsJSValue(Node* map) {
- return HasInstanceType(map, JS_VALUE_TYPE);
+Node* CodeStubAssembler::IsJSValueInstanceType(Node* instance_type) {
+ return Word32Equal(instance_type, Int32Constant(JS_VALUE_TYPE));
+}
+
+Node* CodeStubAssembler::IsJSValue(Node* object) {
+ return IsJSValueMap(LoadMap(object));
+}
+
+Node* CodeStubAssembler::IsJSValueMap(Node* map) {
+ return IsJSValueInstanceType(LoadMapInstanceType(map));
+}
+
+Node* CodeStubAssembler::IsJSArrayInstanceType(Node* instance_type) {
+ return Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE));
}
Node* CodeStubAssembler::IsJSArray(Node* object) {
- return HasInstanceType(object, JS_ARRAY_TYPE);
+ return IsJSArrayMap(LoadMap(object));
+}
+
+Node* CodeStubAssembler::IsJSArrayMap(Node* map) {
+ return IsJSArrayInstanceType(LoadMapInstanceType(map));
}
Node* CodeStubAssembler::IsWeakCell(Node* object) {
@@ -3076,6 +3257,10 @@ Node* CodeStubAssembler::IsPropertyCell(Node* object) {
return IsPropertyCellMap(LoadMap(object));
}
+Node* CodeStubAssembler::IsAccessorInfo(Node* object) {
+ return IsAccessorInfoMap(LoadMap(object));
+}
+
Node* CodeStubAssembler::IsAccessorPair(Node* object) {
return IsAccessorPairMap(LoadMap(object));
}
@@ -3084,6 +3269,10 @@ Node* CodeStubAssembler::IsHeapNumber(Node* object) {
return IsHeapNumberMap(LoadMap(object));
}
+Node* CodeStubAssembler::IsFeedbackVector(Node* object) {
+ return IsFeedbackVectorMap(LoadMap(object));
+}
+
Node* CodeStubAssembler::IsName(Node* object) {
return Int32LessThanOrEqual(LoadInstanceType(object),
Int32Constant(LAST_NAME_TYPE));
@@ -3131,14 +3320,26 @@ Node* CodeStubAssembler::IsUnseededNumberDictionary(Node* object) {
LoadRoot(Heap::kUnseededNumberDictionaryMapRootIndex));
}
+Node* CodeStubAssembler::IsJSFunctionInstanceType(Node* instance_type) {
+ return Word32Equal(instance_type, Int32Constant(JS_FUNCTION_TYPE));
+}
+
Node* CodeStubAssembler::IsJSFunction(Node* object) {
- return HasInstanceType(object, JS_FUNCTION_TYPE);
+ return IsJSFunctionMap(LoadMap(object));
+}
+
+Node* CodeStubAssembler::IsJSFunctionMap(Node* map) {
+ return IsJSFunctionInstanceType(LoadMapInstanceType(map));
}
Node* CodeStubAssembler::IsJSTypedArray(Node* object) {
return HasInstanceType(object, JS_TYPED_ARRAY_TYPE);
}
+Node* CodeStubAssembler::IsJSArrayBuffer(Node* object) {
+ return HasInstanceType(object, JS_ARRAY_BUFFER_TYPE);
+}
+
Node* CodeStubAssembler::IsFixedTypedArray(Node* object) {
Node* instance_type = LoadInstanceType(object);
return Word32And(
@@ -3181,6 +3382,18 @@ Node* CodeStubAssembler::IsNumberNormalized(Node* number) {
return var_result.value();
}
+Node* CodeStubAssembler::IsNumberPositive(Node* number) {
+ CSA_ASSERT(this, IsNumber(number));
+ Node* const float_zero = Float64Constant(0.);
+ return Select(TaggedIsSmi(number),
+ [=] { return TaggedIsPositiveSmi(number); },
+ [=] {
+ Node* v = LoadHeapNumberValue(number);
+ return Float64GreaterThanOrEqual(v, float_zero);
+ },
+ MachineRepresentation::kWord32);
+}
+
Node* CodeStubAssembler::StringCharCodeAt(Node* string, Node* index,
ParameterMode parameter_mode) {
if (parameter_mode == SMI_PARAMETERS) CSA_ASSERT(this, TaggedIsSmi(index));
@@ -3298,64 +3511,59 @@ Node* CodeStubAssembler::StringFromCharCode(Node* code) {
return var_result.value();
}
-namespace {
-
// A wrapper around CopyStringCharacters which determines the correct string
// encoding, allocates a corresponding sequential string, and then copies the
// given character range using CopyStringCharacters.
// |from_string| must be a sequential string. |from_index| and
// |character_count| must be Smis s.t.
// 0 <= |from_index| <= |from_index| + |character_count| < from_string.length.
-Node* AllocAndCopyStringCharacters(CodeStubAssembler* a, Node* context,
- Node* from, Node* from_instance_type,
- Node* from_index, Node* character_count) {
- typedef CodeStubAssembler::Label Label;
- typedef CodeStubAssembler::Variable Variable;
-
- Label end(a), one_byte_sequential(a), two_byte_sequential(a);
- Variable var_result(a, MachineRepresentation::kTagged);
+Node* CodeStubAssembler::AllocAndCopyStringCharacters(Node* context, Node* from,
+ Node* from_instance_type,
+ Node* from_index,
+ Node* character_count) {
+ Label end(this), one_byte_sequential(this), two_byte_sequential(this);
+ Variable var_result(this, MachineRepresentation::kTagged);
- Node* const smi_zero = a->SmiConstant(Smi::kZero);
+ Node* const smi_zero = SmiConstant(Smi::kZero);
- a->Branch(a->IsOneByteStringInstanceType(from_instance_type),
- &one_byte_sequential, &two_byte_sequential);
+ Branch(IsOneByteStringInstanceType(from_instance_type), &one_byte_sequential,
+ &two_byte_sequential);
// The subject string is a sequential one-byte string.
- a->BIND(&one_byte_sequential);
+ BIND(&one_byte_sequential);
{
Node* result =
- a->AllocateSeqOneByteString(context, a->SmiToWord(character_count));
- a->CopyStringCharacters(from, result, from_index, smi_zero, character_count,
- String::ONE_BYTE_ENCODING,
- String::ONE_BYTE_ENCODING,
- CodeStubAssembler::SMI_PARAMETERS);
+ AllocateSeqOneByteString(context, SmiToWord(character_count));
+ CopyStringCharacters(from, result, from_index, smi_zero, character_count,
+ String::ONE_BYTE_ENCODING, String::ONE_BYTE_ENCODING,
+ CodeStubAssembler::SMI_PARAMETERS);
var_result.Bind(result);
- a->Goto(&end);
+ Goto(&end);
}
// The subject string is a sequential two-byte string.
- a->BIND(&two_byte_sequential);
+ BIND(&two_byte_sequential);
{
Node* result =
- a->AllocateSeqTwoByteString(context, a->SmiToWord(character_count));
- a->CopyStringCharacters(from, result, from_index, smi_zero, character_count,
- String::TWO_BYTE_ENCODING,
- String::TWO_BYTE_ENCODING,
- CodeStubAssembler::SMI_PARAMETERS);
+ AllocateSeqTwoByteString(context, SmiToWord(character_count));
+ CopyStringCharacters(from, result, from_index, smi_zero, character_count,
+ String::TWO_BYTE_ENCODING, String::TWO_BYTE_ENCODING,
+ CodeStubAssembler::SMI_PARAMETERS);
var_result.Bind(result);
- a->Goto(&end);
+ Goto(&end);
}
- a->BIND(&end);
+ BIND(&end);
return var_result.value();
}
-} // namespace
Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
- Node* to) {
+ Node* to, SubStringFlags flags) {
+ DCHECK(flags == SubStringFlags::NONE ||
+ flags == SubStringFlags::FROM_TO_ARE_BOUNDED);
VARIABLE(var_result, MachineRepresentation::kTagged);
ToDirectStringAssembler to_direct(state(), string);
Label end(this), runtime(this);
@@ -3366,8 +3574,13 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
// Make sure that both from and to are non-negative smis.
- GotoIfNot(TaggedIsPositiveSmi(from), &runtime);
- GotoIfNot(TaggedIsPositiveSmi(to), &runtime);
+ if (flags == SubStringFlags::NONE) {
+ GotoIfNot(TaggedIsPositiveSmi(from), &runtime);
+ GotoIfNot(TaggedIsPositiveSmi(to), &runtime);
+ } else {
+ CSA_ASSERT(this, TaggedIsPositiveSmi(from));
+ CSA_ASSERT(this, TaggedIsPositiveSmi(to));
+ }
Node* const substr_length = SmiSub(to, from);
Node* const string_length = LoadStringLength(string);
@@ -3435,7 +3648,7 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
GotoIf(to_direct.is_external(), &external_string);
var_result.Bind(AllocAndCopyStringCharacters(
- this, context, direct_string, instance_type, offset, substr_length));
+ context, direct_string, instance_type, offset, substr_length));
Counters* counters = isolate()->counters();
IncrementCounter(counters->sub_string_native(), 1);
@@ -3448,9 +3661,8 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
{
Node* const fake_sequential_string = to_direct.PointerToString(&runtime);
- var_result.Bind(
- AllocAndCopyStringCharacters(this, context, fake_sequential_string,
- instance_type, offset, substr_length));
+ var_result.Bind(AllocAndCopyStringCharacters(
+ context, fake_sequential_string, instance_type, offset, substr_length));
Counters* counters = isolate()->counters();
IncrementCounter(counters->sub_string_native(), 1);
@@ -3468,8 +3680,14 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
BIND(&original_string_or_invalid_length);
{
- // Longer than original string's length or negative: unsafe arguments.
- GotoIf(SmiAbove(substr_length, string_length), &runtime);
+ if (flags == SubStringFlags::NONE) {
+ // Longer than original string's length or negative: unsafe arguments.
+ GotoIf(SmiAbove(substr_length, string_length), &runtime);
+ } else {
+ // with flag SubStringFlags::FROM_TO_ARE_BOUNDED, the only way we can
+ // get here is if substr_length is equal to string_length.
+ CSA_ASSERT(this, SmiEqual(substr_length, string_length));
+ }
// Equal length - check if {from, to} == {0, str.length}.
GotoIf(SmiAbove(from, SmiConstant(Smi::kZero)), &runtime);
@@ -3496,12 +3714,13 @@ Node* CodeStubAssembler::SubString(Node* context, Node* string, Node* from,
}
ToDirectStringAssembler::ToDirectStringAssembler(
- compiler::CodeAssemblerState* state, Node* string)
+ compiler::CodeAssemblerState* state, Node* string, Flags flags)
: CodeStubAssembler(state),
var_string_(this, MachineRepresentation::kTagged, string),
var_instance_type_(this, MachineRepresentation::kWord32),
var_offset_(this, MachineType::PointerRepresentation()),
- var_is_external_(this, MachineRepresentation::kWord32) {
+ var_is_external_(this, MachineRepresentation::kWord32),
+ flags_(flags) {
CSA_ASSERT(this, TaggedIsNotSmi(string));
CSA_ASSERT(this, IsString(string));
@@ -3558,16 +3777,20 @@ Node* ToDirectStringAssembler::TryToDirect(Label* if_bailout) {
// Sliced string. Fetch parent and correct start index by offset.
BIND(&if_issliced);
{
- Node* const string = var_string_.value();
- Node* const sliced_offset =
- LoadAndUntagObjectField(string, SlicedString::kOffsetOffset);
- var_offset_.Bind(IntPtrAdd(var_offset_.value(), sliced_offset));
+ if (flags_ & kDontUnpackSlicedStrings) {
+ Goto(if_bailout);
+ } else {
+ Node* const string = var_string_.value();
+ Node* const sliced_offset =
+ LoadAndUntagObjectField(string, SlicedString::kOffsetOffset);
+ var_offset_.Bind(IntPtrAdd(var_offset_.value(), sliced_offset));
- Node* const parent = LoadObjectField(string, SlicedString::kParentOffset);
- var_string_.Bind(parent);
- var_instance_type_.Bind(LoadInstanceType(parent));
+ Node* const parent = LoadObjectField(string, SlicedString::kParentOffset);
+ var_string_.Bind(parent);
+ var_instance_type_.Bind(LoadInstanceType(parent));
- Goto(&dispatch);
+ Goto(&dispatch);
+ }
}
// Thin string. Fetch the actual string.
@@ -4269,6 +4492,22 @@ Node* CodeStubAssembler::ToString(Node* context, Node* input) {
return result.value();
}
+Node* CodeStubAssembler::ToString_Inline(Node* const context,
+ Node* const input) {
+ VARIABLE(var_result, MachineRepresentation::kTagged, input);
+ Label stub_call(this, Label::kDeferred), out(this);
+
+ GotoIf(TaggedIsSmi(input), &stub_call);
+ Branch(IsString(input), &out, &stub_call);
+
+ BIND(&stub_call);
+ var_result.Bind(CallBuiltin(Builtins::kToString, context, input));
+ Goto(&out);
+
+ BIND(&out);
+ return var_result.value();
+}
+
Node* CodeStubAssembler::JSReceiverToPrimitive(Node* context, Node* input) {
Label if_isreceiver(this, Label::kDeferred), if_isnotreceiver(this);
VARIABLE(result, MachineRepresentation::kTagged);
@@ -4346,6 +4585,15 @@ Node* CodeStubAssembler::ToSmiLength(Node* input, Node* const context,
return result.value();
}
+Node* CodeStubAssembler::ToLength_Inline(Node* const context,
+ Node* const input) {
+ Node* const smi_zero = SmiConstant(0);
+ return Select(
+ TaggedIsSmi(input), [=] { return SmiMax(input, smi_zero); },
+ [=] { return CallBuiltin(Builtins::kToLength, context, input); },
+ MachineRepresentation::kTagged);
+}
+
Node* CodeStubAssembler::ToInteger(Node* context, Node* input,
ToIntegerTruncationMode mode) {
// We might need to loop once for ToNumber conversion.
@@ -4461,7 +4709,8 @@ void CodeStubAssembler::Use(Label* label) {
void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
Variable* var_index, Label* if_keyisunique,
- Variable* var_unique, Label* if_bailout) {
+ Variable* var_unique, Label* if_bailout,
+ Label* if_notinternalized) {
DCHECK_EQ(MachineType::PointerRepresentation(), var_index->rep());
DCHECK_EQ(MachineRepresentation::kTagged, var_unique->rep());
Comment("TryToName");
@@ -4500,7 +4749,8 @@ void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
STATIC_ASSERT(kNotInternalizedTag != 0);
Node* not_internalized =
Word32And(key_instance_type, Int32Constant(kIsNotInternalizedMask));
- GotoIf(Word32NotEqual(not_internalized, Int32Constant(0)), if_bailout);
+ GotoIf(Word32NotEqual(not_internalized, Int32Constant(0)),
+ if_notinternalized != nullptr ? if_notinternalized : if_bailout);
Goto(if_keyisunique);
BIND(&if_thinstring);
@@ -4512,6 +4762,30 @@ void CodeStubAssembler::TryToName(Node* key, Label* if_keyisindex,
Goto(if_keyisindex);
}
+void CodeStubAssembler::TryInternalizeString(
+ Node* string, Label* if_index, Variable* var_index, Label* if_internalized,
+ Variable* var_internalized, Label* if_not_internalized, Label* if_bailout) {
+ DCHECK(var_index->rep() == MachineType::PointerRepresentation());
+ DCHECK(var_internalized->rep() == MachineRepresentation::kTagged);
+ Node* function = ExternalConstant(
+ ExternalReference::try_internalize_string_function(isolate()));
+ Node* result = CallCFunction1(MachineType::AnyTagged(),
+ MachineType::AnyTagged(), function, string);
+ Label internalized(this);
+ GotoIf(TaggedIsNotSmi(result), &internalized);
+ Node* word_result = SmiUntag(result);
+ GotoIf(WordEqual(word_result, IntPtrConstant(ResultSentinel::kNotFound)),
+ if_not_internalized);
+ GotoIf(WordEqual(word_result, IntPtrConstant(ResultSentinel::kUnsupported)),
+ if_bailout);
+ var_index->Bind(word_result);
+ Goto(if_index);
+
+ BIND(&internalized);
+ var_internalized->Bind(result);
+ Goto(if_internalized);
+}
+
template <typename Dictionary>
Node* CodeStubAssembler::EntryToIndex(Node* entry, int field_index) {
Node* entry_index = IntPtrMul(entry, IntPtrConstant(Dictionary::kEntrySize));
@@ -4524,9 +4798,10 @@ template Node* CodeStubAssembler::EntryToIndex<GlobalDictionary>(Node*, int);
template Node* CodeStubAssembler::EntryToIndex<SeededNumberDictionary>(Node*,
int);
+// This must be kept in sync with HashTableBase::ComputeCapacity().
Node* CodeStubAssembler::HashTableComputeCapacity(Node* at_least_space_for) {
- Node* capacity = IntPtrRoundUpToPowerOfTwo32(
- WordShl(at_least_space_for, IntPtrConstant(1)));
+ Node* capacity = IntPtrRoundUpToPowerOfTwo32(IntPtrAdd(
+ at_least_space_for, WordShr(at_least_space_for, IntPtrConstant(1))));
return IntPtrMax(capacity, IntPtrConstant(HashTableBase::kMinCapacity));
}
@@ -4541,29 +4816,6 @@ Node* CodeStubAssembler::IntPtrMin(Node* left, Node* right) {
}
template <class Dictionary>
-Node* CodeStubAssembler::GetNumberOfElements(Node* dictionary) {
- return LoadFixedArrayElement(dictionary, Dictionary::kNumberOfElementsIndex);
-}
-
-template <class Dictionary>
-void CodeStubAssembler::SetNumberOfElements(Node* dictionary,
- Node* num_elements_smi) {
- StoreFixedArrayElement(dictionary, Dictionary::kNumberOfElementsIndex,
- num_elements_smi, SKIP_WRITE_BARRIER);
-}
-
-template <class Dictionary>
-Node* CodeStubAssembler::GetNumberOfDeletedElements(Node* dictionary) {
- return LoadFixedArrayElement(dictionary,
- Dictionary::kNumberOfDeletedElementsIndex);
-}
-
-template <class Dictionary>
-Node* CodeStubAssembler::GetCapacity(Node* dictionary) {
- return LoadFixedArrayElement(dictionary, Dictionary::kCapacityIndex);
-}
-
-template <class Dictionary>
Node* CodeStubAssembler::GetNextEnumerationIndex(Node* dictionary) {
return LoadFixedArrayElement(dictionary,
Dictionary::kNextEnumerationIndexIndex);
@@ -5115,10 +5367,9 @@ void CodeStubAssembler::LoadPropertyFromFastObject(Node* object, Node* map,
BIND(&if_inobject);
{
Comment("if_inobject");
- Node* field_offset =
- IntPtrMul(IntPtrSub(LoadMapInstanceSize(map),
- IntPtrSub(inobject_properties, field_index)),
- IntPtrConstant(kPointerSize));
+ Node* field_offset = TimesPointerSize(
+ IntPtrAdd(IntPtrSub(LoadMapInstanceSize(map), inobject_properties),
+ field_index));
Label if_double(this), if_tagged(this);
Branch(Word32NotEqual(representation,
@@ -5226,18 +5477,17 @@ Node* CodeStubAssembler::CallGetterIfAccessor(Node* value, Node* details,
Node* context, Node* receiver,
Label* if_bailout) {
VARIABLE(var_value, MachineRepresentation::kTagged, value);
- Label done(this);
+ Label done(this), if_accessor_info(this, Label::kDeferred);
Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
GotoIf(Word32Equal(kind, Int32Constant(kData)), &done);
// Accessor case.
+ GotoIfNot(IsAccessorPair(value), &if_accessor_info);
+
+ // AccessorPair case.
{
Node* accessor_pair = value;
- GotoIf(Word32Equal(LoadInstanceType(accessor_pair),
- Int32Constant(ACCESSOR_INFO_TYPE)),
- if_bailout);
- CSA_ASSERT(this, IsAccessorPair(accessor_pair));
Node* getter = LoadObjectField(accessor_pair, AccessorPair::kGetterOffset);
Node* getter_map = LoadMap(getter);
Node* instance_type = LoadMapInstanceType(getter_map);
@@ -5257,6 +5507,62 @@ Node* CodeStubAssembler::CallGetterIfAccessor(Node* value, Node* details,
Goto(&done);
}
+ // AccessorInfo case.
+ BIND(&if_accessor_info);
+ {
+ Node* accessor_info = value;
+ CSA_ASSERT(this, IsAccessorInfo(value));
+ CSA_ASSERT(this, TaggedIsNotSmi(receiver));
+ Label if_array(this), if_function(this), if_value(this);
+
+ // Dispatch based on {receiver} instance type.
+ Node* receiver_map = LoadMap(receiver);
+ Node* receiver_instance_type = LoadMapInstanceType(receiver_map);
+ GotoIf(IsJSArrayInstanceType(receiver_instance_type), &if_array);
+ GotoIf(IsJSFunctionInstanceType(receiver_instance_type), &if_function);
+ Branch(IsJSValueInstanceType(receiver_instance_type), &if_value,
+ if_bailout);
+
+ // JSArray AccessorInfo case.
+ BIND(&if_array);
+ {
+ // We only deal with the "length" accessor on JSArray.
+ GotoIfNot(IsLengthString(
+ LoadObjectField(accessor_info, AccessorInfo::kNameOffset)),
+ if_bailout);
+ var_value.Bind(LoadJSArrayLength(receiver));
+ Goto(&done);
+ }
+
+ // JSFunction AccessorInfo case.
+ BIND(&if_function);
+ {
+ // We only deal with the "prototype" accessor on JSFunction here.
+ GotoIfNot(IsPrototypeString(
+ LoadObjectField(accessor_info, AccessorInfo::kNameOffset)),
+ if_bailout);
+ GotoIf(IsSetWord32(LoadMapBitField(receiver_map),
+ 1 << Map::kHasNonInstancePrototype),
+ if_bailout);
+ var_value.Bind(LoadJSFunctionPrototype(receiver, if_bailout));
+ Goto(&done);
+ }
+
+ // JSValue AccessorInfo case.
+ BIND(&if_value);
+ {
+ // We only deal with the "length" accessor on JSValue string wrappers.
+ GotoIfNot(IsLengthString(
+ LoadObjectField(accessor_info, AccessorInfo::kNameOffset)),
+ if_bailout);
+ Node* receiver_value = LoadJSValueValue(receiver);
+ GotoIfNot(TaggedIsNotSmi(receiver_value), if_bailout);
+ GotoIfNot(IsString(receiver_value), if_bailout);
+ var_value.Bind(LoadStringLength(receiver_value));
+ Goto(&done);
+ }
+ }
+
BIND(&done);
return var_value.value();
}
@@ -5746,6 +6052,26 @@ void CodeStubAssembler::UpdateFeedback(Node* feedback, Node* feedback_vector,
SKIP_WRITE_BARRIER);
}
+void CodeStubAssembler::CombineFeedback(Variable* existing_feedback,
+ Node* feedback) {
+ existing_feedback->Bind(SmiOr(existing_feedback->value(), feedback));
+}
+
+void CodeStubAssembler::CheckForAssociatedProtector(Node* name,
+ Label* if_protector) {
+ // This list must be kept in sync with LookupIterator::UpdateProtector!
+ // TODO(jkummerow): Would it be faster to have a bit in Symbol::flags()?
+ GotoIf(WordEqual(name, LoadRoot(Heap::kconstructor_stringRootIndex)),
+ if_protector);
+ GotoIf(WordEqual(name, LoadRoot(Heap::kiterator_symbolRootIndex)),
+ if_protector);
+ GotoIf(WordEqual(name, LoadRoot(Heap::kspecies_symbolRootIndex)),
+ if_protector);
+ GotoIf(WordEqual(name, LoadRoot(Heap::kis_concat_spreadable_symbolRootIndex)),
+ if_protector);
+ // Fall through if no case matched.
+}
+
Node* CodeStubAssembler::LoadReceiverMap(Node* receiver) {
return Select(TaggedIsSmi(receiver),
[=] { return LoadRoot(Heap::kHeapNumberMapRootIndex); },
@@ -6367,6 +6693,78 @@ Node* CodeStubAssembler::CreateWeakCellInFeedbackVector(Node* feedback_vector,
return cell;
}
+void CodeStubAssembler::HandleSlackTracking(Node* context, Node* object,
+ Node* initial_map,
+ int start_offset) {
+ Node* instance_size_words = ChangeUint32ToWord(LoadObjectField(
+ initial_map, Map::kInstanceSizeOffset, MachineType::Uint8()));
+ Node* instance_size = TimesPointerSize(instance_size_words);
+
+ // Perform in-object slack tracking if requested.
+ Node* bit_field3 = LoadMapBitField3(initial_map);
+ Label end(this), slack_tracking(this), finalize(this, Label::kDeferred);
+ GotoIf(IsSetWord32<Map::ConstructionCounter>(bit_field3), &slack_tracking);
+
+ // Initialize remaining fields.
+ {
+ Comment("no slack tracking");
+ InitializeFieldsWithRoot(object, IntPtrConstant(start_offset),
+ instance_size, Heap::kUndefinedValueRootIndex);
+ Goto(&end);
+ }
+
+ {
+ BIND(&slack_tracking);
+
+ // Decrease generous allocation count.
+ STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
+ Comment("update allocation count");
+ Node* new_bit_field3 = Int32Sub(
+ bit_field3, Int32Constant(1 << Map::ConstructionCounter::kShift));
+ StoreObjectFieldNoWriteBarrier(initial_map, Map::kBitField3Offset,
+ new_bit_field3,
+ MachineRepresentation::kWord32);
+ GotoIf(IsClearWord32<Map::ConstructionCounter>(new_bit_field3), &finalize);
+
+ Node* unused_fields = LoadObjectField(
+ initial_map, Map::kUnusedPropertyFieldsOffset, MachineType::Uint8());
+ Node* used_size = IntPtrSub(
+ instance_size, TimesPointerSize(ChangeUint32ToWord(unused_fields)));
+
+ Comment("initialize filler fields (no finalize)");
+ InitializeFieldsWithRoot(object, used_size, instance_size,
+ Heap::kOnePointerFillerMapRootIndex);
+
+ Comment("initialize undefined fields (no finalize)");
+ InitializeFieldsWithRoot(object, IntPtrConstant(start_offset), used_size,
+ Heap::kUndefinedValueRootIndex);
+ Goto(&end);
+ }
+
+ {
+ // Finalize the instance size.
+ BIND(&finalize);
+
+ Node* unused_fields = LoadObjectField(
+ initial_map, Map::kUnusedPropertyFieldsOffset, MachineType::Uint8());
+ Node* used_size = IntPtrSub(
+ instance_size, TimesPointerSize(ChangeUint32ToWord(unused_fields)));
+
+ Comment("initialize filler fields (finalize)");
+ InitializeFieldsWithRoot(object, used_size, instance_size,
+ Heap::kOnePointerFillerMapRootIndex);
+
+ Comment("initialize undefined fields (finalize)");
+ InitializeFieldsWithRoot(object, IntPtrConstant(start_offset), used_size,
+ Heap::kUndefinedValueRootIndex);
+
+ CallRuntime(Runtime::kFinalizeInstanceSize, context, initial_map);
+ Goto(&end);
+ }
+
+ BIND(&end);
+}
+
Node* CodeStubAssembler::BuildFastLoop(
const CodeStubAssembler::VariableList& vars, Node* start_index,
Node* end_index, const FastLoopBody& body, int increment,
@@ -6593,7 +6991,8 @@ void CodeStubAssembler::GotoUnlessNumberLessThan(Node* lhs, Node* rhs,
Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
Node* lhs, Node* rhs,
- Node* context) {
+ Node* context,
+ Variable* var_type_feedback) {
Label return_true(this), return_false(this), end(this);
VARIABLE(result, MachineRepresentation::kTagged);
@@ -6606,8 +7005,14 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
// conversions.
VARIABLE(var_lhs, MachineRepresentation::kTagged, lhs);
VARIABLE(var_rhs, MachineRepresentation::kTagged, rhs);
- Variable* loop_vars[2] = {&var_lhs, &var_rhs};
- Label loop(this, 2, loop_vars);
+ VariableList loop_variable_list({&var_lhs, &var_rhs}, zone());
+ if (var_type_feedback != nullptr) {
+ // Initialize the type feedback to None. The current feedback is combined
+ // with the previous feedback.
+ var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kNone));
+ loop_variable_list.Add(var_type_feedback, zone());
+ }
+ Label loop(this, loop_variable_list);
Goto(&loop);
BIND(&loop);
{
@@ -6628,6 +7033,10 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
BIND(&if_rhsissmi);
{
// Both {lhs} and {rhs} are Smi, so just perform a fast Smi comparison.
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kSignedSmall));
+ }
switch (mode) {
case kLessThan:
BranchIfSmiLessThan(lhs, rhs, &return_true, &return_false);
@@ -6657,6 +7066,10 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
{
// Convert the {lhs} and {rhs} to floating point values, and
// perform a floating point comparison.
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kNumber));
+ }
var_fcmp_lhs.Bind(SmiToFloat64(lhs));
var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
Goto(&do_fcmp);
@@ -6664,6 +7077,11 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
BIND(&if_rhsisnotnumber);
{
+ // The {rhs} is not a HeapNumber and {lhs} is an Smi.
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
// Convert the {rhs} to a Number; we don't need to perform the
// dedicated ToPrimitive(rhs, hint Number) operation, as the
// ToNumber(rhs) will by itself already invoke ToPrimitive with
@@ -6694,6 +7112,10 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
{
// Convert the {lhs} and {rhs} to floating point values, and
// perform a floating point comparison.
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kNumber));
+ }
var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
var_fcmp_rhs.Bind(SmiToFloat64(rhs));
Goto(&do_fcmp);
@@ -6701,6 +7123,11 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
BIND(&if_lhsisnotnumber);
{
+ // The {lhs} is not a HeapNumber and {rhs} is an Smi.
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
// Convert the {lhs} to a Number; we don't need to perform the
// dedicated ToPrimitive(lhs, hint Number) operation, as the
// ToNumber(lhs) will by itself already invoke ToPrimitive with
@@ -6731,6 +7158,10 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
{
// Convert the {lhs} and {rhs} to floating point values, and
// perform a floating point comparison.
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kNumber));
+ }
var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
Goto(&do_fcmp);
@@ -6738,6 +7169,11 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
BIND(&if_rhsisnotnumber);
{
+ // The {rhs} is not a HeapNumber and {lhs} is a HeapNumber.
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
// Convert the {rhs} to a Number; we don't need to perform
// dedicated ToPrimitive(rhs, hint Number) operation, as the
// ToNumber(rhs) will by itself already invoke ToPrimitive with
@@ -6772,6 +7208,10 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
BIND(&if_rhsisstring);
{
// Both {lhs} and {rhs} are strings.
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kString));
+ }
switch (mode) {
case kLessThan:
result.Bind(CallStub(CodeFactory::StringLessThan(isolate()),
@@ -6801,6 +7241,11 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
BIND(&if_rhsisnotstring);
{
+ // The {lhs} is a String and {rhs} is not a String.
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
// The {lhs} is a String, while {rhs} is neither a Number nor a
// String, so we need to call ToPrimitive(rhs, hint Number) if
// {rhs} is a receiver or ToNumber(lhs) and ToNumber(rhs) in the
@@ -6833,6 +7278,41 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
BIND(&if_lhsisnotstring);
{
+ if (var_type_feedback != nullptr) {
+ // The {lhs} is not an Smi, HeapNumber or String and {rhs} is not
+ // an Smi: collect NumberOrOddball feedback if {lhs} is an Oddball
+ // and {rhs} is either a HeapNumber or Oddball.
+ Label collect_any_feedback(this), collect_oddball_feedback(this),
+ collect_feedback_done(this);
+ GotoIfNot(
+ Word32Equal(lhs_instance_type, Int32Constant(ODDBALL_TYPE)),
+ &collect_any_feedback);
+
+ Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
+ GotoIf(Word32Equal(rhs_instance_type,
+ Int32Constant(HEAP_NUMBER_TYPE)),
+ &collect_oddball_feedback);
+ Branch(
+ Word32Equal(rhs_instance_type, Int32Constant(ODDBALL_TYPE)),
+ &collect_oddball_feedback, &collect_any_feedback);
+
+ BIND(&collect_oddball_feedback);
+ {
+ CombineFeedback(
+ var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kNumberOrOddball));
+ Goto(&collect_feedback_done);
+ }
+
+ BIND(&collect_any_feedback);
+ {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ Goto(&collect_feedback_done);
+ }
+
+ BIND(&collect_feedback_done);
+ }
// The {lhs} is neither a Number nor a String, so we need to call
// ToPrimitive(lhs, hint Number) if {lhs} is a receiver or
// ToNumber(lhs) and ToNumber(rhs) in the other cases.
@@ -6905,52 +7385,91 @@ Node* CodeStubAssembler::RelationalComparison(RelationalComparisonMode mode,
return result.value();
}
-namespace {
+Node* CodeStubAssembler::CollectFeedbackForString(Node* instance_type) {
+ Node* feedback = SelectSmiConstant(
+ Word32Equal(
+ Word32And(instance_type, Int32Constant(kIsNotInternalizedMask)),
+ Int32Constant(kInternalizedTag)),
+ CompareOperationFeedback::kInternalizedString,
+ CompareOperationFeedback::kString);
+ return feedback;
+}
-void GenerateEqual_Same(CodeStubAssembler* assembler, Node* value,
- CodeStubAssembler::Label* if_equal,
- CodeStubAssembler::Label* if_notequal) {
+void CodeStubAssembler::GenerateEqual_Same(Node* value, Label* if_equal,
+ Label* if_notequal,
+ Variable* var_type_feedback) {
// In case of abstract or strict equality checks, we need additional checks
// for NaN values because they are not considered equal, even if both the
// left and the right hand side reference exactly the same value.
- typedef CodeStubAssembler::Label Label;
-
// Check if {value} is a Smi or a HeapObject.
- Label if_valueissmi(assembler), if_valueisnotsmi(assembler);
- assembler->Branch(assembler->TaggedIsSmi(value), &if_valueissmi,
- &if_valueisnotsmi);
+ Label if_valueissmi(this), if_valueisnotsmi(this);
+ Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
- assembler->BIND(&if_valueisnotsmi);
+ BIND(&if_valueisnotsmi);
{
// Load the map of {value}.
- Node* value_map = assembler->LoadMap(value);
+ Node* value_map = LoadMap(value);
// Check if {value} (and therefore {rhs}) is a HeapNumber.
- Label if_valueisnumber(assembler), if_valueisnotnumber(assembler);
- assembler->Branch(assembler->IsHeapNumberMap(value_map), &if_valueisnumber,
- &if_valueisnotnumber);
+ Label if_valueisnumber(this), if_valueisnotnumber(this);
+ Branch(IsHeapNumberMap(value_map), &if_valueisnumber, &if_valueisnotnumber);
- assembler->BIND(&if_valueisnumber);
+ BIND(&if_valueisnumber);
{
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kNumber));
+ }
+
// Convert {value} (and therefore {rhs}) to floating point value.
- Node* value_value = assembler->LoadHeapNumberValue(value);
+ Node* value_value = LoadHeapNumberValue(value);
// Check if the HeapNumber value is a NaN.
- assembler->BranchIfFloat64IsNaN(value_value, if_notequal, if_equal);
+ BranchIfFloat64IsNaN(value_value, if_notequal, if_equal);
}
- assembler->BIND(&if_valueisnotnumber);
- assembler->Goto(if_equal);
+ BIND(&if_valueisnotnumber);
+ if (var_type_feedback != nullptr) {
+ // Collect type feedback.
+ Node* instance_type = LoadMapInstanceType(value_map);
+
+ Label if_valueisstring(this), if_valueisnotstring(this);
+ Branch(IsStringInstanceType(instance_type), &if_valueisstring,
+ &if_valueisnotstring);
+
+ BIND(&if_valueisstring);
+ {
+ CombineFeedback(var_type_feedback,
+ CollectFeedbackForString(instance_type));
+ Goto(if_equal);
+ }
+
+ BIND(&if_valueisnotstring);
+ {
+ var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
+ GotoIfNot(IsJSReceiverInstanceType(instance_type), if_equal);
+
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kReceiver));
+ Goto(if_equal);
+ }
+ } else {
+ Goto(if_equal);
+ }
}
- assembler->BIND(&if_valueissmi);
- assembler->Goto(if_equal);
+ BIND(&if_valueissmi);
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kSignedSmall));
+ }
+ Goto(if_equal);
}
-} // namespace
// ES6 section 7.2.12 Abstract Equality Comparison
-Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context) {
+Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context,
+ Variable* var_type_feedback) {
// This is a slightly optimized version of Object::Equals represented as
// scheduled TurboFan graph utilizing the CodeStubAssembler. Whenever you
// change something functionality wise in here, remember to update the
@@ -6969,8 +7488,14 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context) {
// conversions.
VARIABLE(var_lhs, MachineRepresentation::kTagged, lhs);
VARIABLE(var_rhs, MachineRepresentation::kTagged, rhs);
- Variable* loop_vars[2] = {&var_lhs, &var_rhs};
- Label loop(this, 2, loop_vars);
+ VariableList loop_variable_list({&var_lhs, &var_rhs}, zone());
+ if (var_type_feedback != nullptr) {
+ // Initialize the type feedback to None. The current feedback is combined
+ // with the previous feedback.
+ var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kNone));
+ loop_variable_list.Add(var_type_feedback, zone());
+ }
+ Label loop(this, loop_variable_list);
Goto(&loop);
BIND(&loop);
{
@@ -6986,7 +7511,7 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context) {
{
// The {lhs} and {rhs} reference the exact same value, yet we need special
// treatment for HeapNumber, as NaN is not equal to NaN.
- GenerateEqual_Same(this, lhs, &if_equal, &if_notequal);
+ GenerateEqual_Same(lhs, &if_equal, &if_notequal, var_type_feedback);
}
BIND(&if_notsame);
@@ -7004,6 +7529,10 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context) {
BIND(&if_rhsissmi);
// We have already checked for {lhs} and {rhs} being the same value, so
// if both are Smis when we get here they must not be equal.
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kSignedSmall));
+ }
Goto(&if_notequal);
BIND(&if_rhsisnotsmi);
@@ -7021,11 +7550,21 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context) {
// perform a floating point comparison.
var_fcmp_lhs.Bind(SmiToFloat64(lhs));
var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kNumber));
+ }
Goto(&do_fcmp);
}
BIND(&if_rhsisnotnumber);
{
+ // The {lhs} is Smi and {rhs} is not HeapNumber or Smi.
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
+
// Load the instance type of the {rhs}.
Node* rhs_instance_type = LoadMapInstanceType(rhs_map);
@@ -7132,7 +7671,7 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context) {
Switch(lhs_instance_type, &if_lhsisreceiver, case_values, case_labels,
arraysize(case_values));
for (int32_t i = 0; i < FIRST_NONSTRING_TYPE; ++i) {
- Bind(case_labels[i]);
+ BIND(case_labels[i]);
Goto(&if_lhsisstring);
delete case_labels[i];
}
@@ -7151,6 +7690,14 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context) {
// string comparison then.
Callable callable = CodeFactory::StringEqual(isolate());
result.Bind(CallStub(callable, context, lhs, rhs));
+ if (var_type_feedback != nullptr) {
+ Node* lhs_feedback =
+ CollectFeedbackForString(lhs_instance_type);
+ Node* rhs_feedback =
+ CollectFeedbackForString(rhs_instance_type);
+ CombineFeedback(var_type_feedback,
+ SmiOr(lhs_feedback, rhs_feedback));
+ }
Goto(&end);
}
@@ -7162,6 +7709,10 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context) {
// handling below (for {rhs} being a String).
var_lhs.Bind(rhs);
var_rhs.Bind(lhs);
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
Goto(&loop);
}
}
@@ -7179,6 +7730,10 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context) {
// perform a floating point comparison.
var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
+ if (var_type_feedback != nullptr) {
+ CombineFeedback(var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kNumber));
+ }
Goto(&do_fcmp);
}
@@ -7187,6 +7742,12 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context) {
// The {lhs} is a Number, the {rhs} is some other HeapObject.
Label if_rhsisstring(this, Label::kDeferred),
if_rhsisnotstring(this);
+
+ if (var_type_feedback != nullptr) {
+ // The {lhs} is number and {rhs} is not Smi or HeapNumber.
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
Branch(IsStringInstanceType(rhs_instance_type), &if_rhsisstring,
&if_rhsisnotstring);
@@ -7242,6 +7803,11 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context) {
BIND(&if_lhsisoddball);
{
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
+
// The {lhs} is an Oddball and {rhs} is some other HeapObject.
Label if_lhsisboolean(this), if_lhsisnotboolean(this);
Node* boolean_map = BooleanMapConstant();
@@ -7285,6 +7851,11 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context) {
BIND(&if_lhsissymbol);
{
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
+
// Check if the {rhs} is a JSReceiver.
Label if_rhsisreceiver(this), if_rhsisnotreceiver(this);
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
@@ -7320,6 +7891,13 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context) {
BIND(&if_rhsisreceiver);
{
+ if (var_type_feedback != nullptr) {
+ // The {lhs} and {rhs} are receivers.
+ CombineFeedback(
+ var_type_feedback,
+ SmiConstant(CompareOperationFeedback::kReceiver));
+ }
+
// Both {lhs} and {rhs} are different JSReceiver references, so
// this cannot be considered equal.
Goto(&if_notequal);
@@ -7327,6 +7905,11 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context) {
BIND(&if_rhsisnotreceiver);
{
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kAny));
+ }
+
// Check if {rhs} is Null or Undefined (an undetectable check
// is sufficient here, since we already know that {rhs} is not
// a JSReceiver).
@@ -7399,7 +7982,8 @@ Node* CodeStubAssembler::Equal(Node* lhs, Node* rhs, Node* context) {
return result.value();
}
-Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs) {
+Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs,
+ Variable* var_type_feedback) {
// Here's pseudo-code for the algorithm below in case of kDontNegateResult
// mode; for kNegateResult mode we properly negate the result.
//
@@ -7455,7 +8039,10 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs) {
{
// The {lhs} and {rhs} reference the exact same value, yet we need special
// treatment for HeapNumber, as NaN is not equal to NaN.
- GenerateEqual_Same(this, lhs, &if_equal, &if_notequal);
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kNone));
+ }
+ GenerateEqual_Same(lhs, &if_equal, &if_notequal, var_type_feedback);
}
BIND(&if_notsame);
@@ -7463,6 +8050,10 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs) {
// The {lhs} and {rhs} reference different objects, yet for Smi, HeapNumber
// and String they can still be considered equal.
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(SmiConstant(CompareOperationFeedback::kAny));
+ }
+
// Check if {lhs} is a Smi or a HeapObject.
Label if_lhsissmi(this), if_lhsisnotsmi(this);
Branch(TaggedIsSmi(lhs), &if_lhsissmi, &if_lhsisnotsmi);
@@ -7488,6 +8079,11 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs) {
Node* lhs_value = LoadHeapNumberValue(lhs);
Node* rhs_value = SmiToFloat64(rhs);
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kNumber));
+ }
+
// Perform a floating point comparison of {lhs} and {rhs}.
Branch(Float64Equal(lhs_value, rhs_value), &if_equal, &if_notequal);
}
@@ -7507,6 +8103,11 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs) {
Node* lhs_value = LoadHeapNumberValue(lhs);
Node* rhs_value = LoadHeapNumberValue(rhs);
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kNumber));
+ }
+
// Perform a floating point comparison of {lhs} and {rhs}.
Branch(Float64Equal(lhs_value, rhs_value), &if_equal, &if_notequal);
}
@@ -7530,6 +8131,9 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs) {
// Load the instance type of {lhs}.
Node* lhs_instance_type = LoadMapInstanceType(lhs_map);
+ // Load the instance type of {rhs}.
+ Node* rhs_instance_type = LoadInstanceType(rhs);
+
// Check if {lhs} is a String.
Label if_lhsisstring(this), if_lhsisnotstring(this);
Branch(IsStringInstanceType(lhs_instance_type), &if_lhsisstring,
@@ -7537,9 +8141,6 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs) {
BIND(&if_lhsisstring);
{
- // Load the instance type of {rhs}.
- Node* rhs_instance_type = LoadInstanceType(rhs);
-
// Check if {rhs} is also a String.
Label if_rhsisstring(this, Label::kDeferred),
if_rhsisnotstring(this);
@@ -7549,6 +8150,13 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs) {
BIND(&if_rhsisstring);
{
Callable callable = CodeFactory::StringEqual(isolate());
+ if (var_type_feedback != nullptr) {
+ Node* lhs_feedback =
+ CollectFeedbackForString(lhs_instance_type);
+ Node* rhs_feedback =
+ CollectFeedbackForString(rhs_instance_type);
+ var_type_feedback->Bind(SmiOr(lhs_feedback, rhs_feedback));
+ }
result.Bind(CallStub(callable, NoContextConstant(), lhs, rhs));
Goto(&end);
}
@@ -7558,6 +8166,14 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs) {
}
BIND(&if_lhsisnotstring);
+ if (var_type_feedback != nullptr) {
+ GotoIfNot(IsJSReceiverInstanceType(lhs_instance_type),
+ &if_notequal);
+ GotoIfNot(IsJSReceiverInstanceType(rhs_instance_type),
+ &if_notequal);
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kReceiver));
+ }
Goto(&if_notequal);
}
}
@@ -7574,6 +8190,10 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs) {
Branch(TaggedIsSmi(rhs), &if_rhsissmi, &if_rhsisnotsmi);
BIND(&if_rhsissmi);
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kSignedSmall));
+ }
Goto(&if_notequal);
BIND(&if_rhsisnotsmi);
@@ -7591,6 +8211,11 @@ Node* CodeStubAssembler::StrictEqual(Node* lhs, Node* rhs) {
Node* lhs_value = SmiToFloat64(lhs);
Node* rhs_value = LoadHeapNumberValue(rhs);
+ if (var_type_feedback != nullptr) {
+ var_type_feedback->Bind(
+ SmiConstant(CompareOperationFeedback::kNumber));
+ }
+
// Perform a floating point comparison of {lhs} and {rhs}.
Branch(Float64Equal(lhs_value, rhs_value), &if_equal, &if_notequal);
}
@@ -8333,14 +8958,13 @@ Node* CodeStubAssembler::AllocateJSArrayIterator(Node* array, Node* array_map,
return iterator;
}
-Node* CodeStubAssembler::ArraySpeciesCreate(Node* context, Node* originalArray,
- Node* len) {
- // TODO(mvstanton): Install a fast path as well, which avoids the runtime
+Node* CodeStubAssembler::TypedArraySpeciesCreateByLength(Node* context,
+ Node* originalArray,
+ Node* len) {
+ // TODO(tebbi): Install a fast path as well, which avoids the runtime
// call.
- Node* constructor =
- CallRuntime(Runtime::kArraySpeciesConstructor, context, originalArray);
- return ConstructJS(CodeFactory::Construct(isolate()), context, constructor,
- len);
+ return CallRuntime(Runtime::kTypedArraySpeciesCreateByLength, context,
+ originalArray, len);
}
Node* CodeStubAssembler::IsDetachedBuffer(Node* buffer) {
@@ -8358,8 +8982,8 @@ CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler, Node* argc,
argc_mode_(mode),
argc_(argc),
arguments_(nullptr),
- fp_(fp != nullptr ? fp : assembler->LoadFramePointer()) {
- Node* offset = assembler->ElementOffsetFromIndex(
+ fp_(fp != nullptr ? fp : assembler_->LoadFramePointer()) {
+ Node* offset = assembler_->ElementOffsetFromIndex(
argc_, FAST_ELEMENTS, mode,
(StandardFrameConstants::kFixedSlotCountAboveFp - 1) * kPointerSize);
arguments_ = assembler_->IntPtrAdd(fp_, offset);
@@ -8392,6 +9016,28 @@ Node* CodeStubArguments::AtIndex(int index) const {
return AtIndex(assembler_->IntPtrConstant(index));
}
+Node* CodeStubArguments::GetOptionalArgumentValue(int index,
+ Node* default_value) {
+ typedef CodeStubAssembler::Variable Variable;
+ Variable result(assembler_, MachineRepresentation::kTagged);
+ CodeStubAssembler::Label argument_missing(assembler_),
+ argument_done(assembler_, &result);
+
+ assembler_->GotoIf(assembler_->UintPtrOrSmiGreaterThanOrEqual(
+ assembler_->IntPtrOrSmiConstant(index, argc_mode_),
+ argc_, argc_mode_),
+ &argument_missing);
+ result.Bind(AtIndex(index));
+ assembler_->Goto(&argument_done);
+
+ assembler_->BIND(&argument_missing);
+ result.Bind(default_value);
+ assembler_->Goto(&argument_done);
+
+ assembler_->BIND(&argument_done);
+ return result.value();
+}
+
void CodeStubArguments::ForEach(
const CodeStubAssembler::VariableList& vars,
const CodeStubArguments::ForEachBodyFunction& body, Node* first, Node* last,
@@ -8442,6 +9088,11 @@ Node* CodeStubAssembler::IsHoleyFastElementsKind(Node* elements_kind) {
return Word32Equal(holey_elements, Int32Constant(1));
}
+Node* CodeStubAssembler::IsElementsKindGreaterThan(
+ Node* target_kind, ElementsKind reference_kind) {
+ return Int32GreaterThan(target_kind, Int32Constant(reference_kind));
+}
+
Node* CodeStubAssembler::IsDebugActive() {
Node* is_debug_active = Load(
MachineType::Uint8(),
diff --git a/deps/v8/src/code-stub-assembler.h b/deps/v8/src/code-stub-assembler.h
index f988e32367..5b94e3ac6e 100644
--- a/deps/v8/src/code-stub-assembler.h
+++ b/deps/v8/src/code-stub-assembler.h
@@ -28,8 +28,12 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(BooleanMap, BooleanMap) \
V(CodeMap, CodeMap) \
V(empty_string, EmptyString) \
+ V(length_string, LengthString) \
+ V(prototype_string, PrototypeString) \
V(EmptyFixedArray, EmptyFixedArray) \
+ V(EmptyWeakCell, EmptyWeakCell) \
V(FalseValue, False) \
+ V(FeedbackVectorMap, FeedbackVectorMap) \
V(FixedArrayMap, FixedArrayMap) \
V(FixedCOWArrayMap, FixedCOWArrayMap) \
V(FixedDoubleArrayMap, FixedDoubleArrayMap) \
@@ -49,7 +53,8 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
V(Tuple2Map, Tuple2Map) \
V(Tuple3Map, Tuple3Map) \
V(UndefinedValue, Undefined) \
- V(WeakCellMap, WeakCellMap)
+ V(WeakCellMap, WeakCellMap) \
+ V(SpeciesProtector, SpeciesProtector)
// Provides JavaScript-specific "macro-assembler" functionality on top of the
// CodeAssembler. By factoring the JavaScript-isms out of the CodeAssembler,
@@ -72,7 +77,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
typedef base::Flags<AllocationFlag> AllocationFlags;
enum ParameterMode { SMI_PARAMETERS, INTPTR_PARAMETERS };
-
// On 32-bit platforms, there is a slight performance advantage to doing all
// of the array offset/index arithmetic with SMIs, since it's possible
// to save a few tag/untag operations without paying an extra expense when
@@ -260,6 +264,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void Assert(const NodeGenerator& condition_body, const char* string = nullptr,
const char* file = nullptr, int line = 0);
+ void Check(const NodeGenerator& condition_body, const char* string = nullptr,
+ const char* file = nullptr, int line = 0);
Node* Select(Node* condition, const NodeGenerator& true_body,
const NodeGenerator& false_body, MachineRepresentation rep);
@@ -295,6 +301,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* WordIsWordAligned(Node* word);
Node* WordIsPowerOfTwo(Node* value);
+#if DEBUG
+ void Bind(Label* label, AssemblerDebugInfo debug_info);
+#else
+ void Bind(Label* label);
+#endif // DEBUG
+
void BranchIfSmiEqual(Node* a, Node* b, Label* if_true, Label* if_false) {
Branch(SmiEqual(a, b), if_true, if_false);
}
@@ -415,6 +427,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Load length field of a String object.
Node* LoadStringLength(Node* object);
+ // Loads a pointer to the sequential String char array.
+ Node* PointerToSeqStringData(Node* seq_string);
// Load value field of a JSValue object.
Node* LoadJSValueValue(Node* object);
// Load value field of a WeakCell object.
@@ -465,6 +479,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* LoadJSArrayElementsMap(ElementsKind kind, Node* native_context);
+ // Load the "prototype" property of a JSFunction.
+ Node* LoadJSFunctionPrototype(Node* function, Label* if_bailout);
+
// Store the floating point value of a HeapNumber.
Node* StoreHeapNumberValue(Node* object, Node* value);
// Store a field to an object on the heap.
@@ -501,9 +518,25 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* object, Node* index, Node* value,
ParameterMode parameter_mode = INTPTR_PARAMETERS);
- Node* BuildAppendJSArray(ElementsKind kind, Node* context, Node* array,
+ void EnsureArrayLengthWritable(Node* map, Label* bailout);
+
+ // EnsureArrayPushable verifies that receiver is:
+ // 1. Is not a prototype.
+ // 2. Is not a dictionary.
+ // 3. Has a writeable length property.
+ // It returns ElementsKind as a node for further division into cases.
+ Node* EnsureArrayPushable(Node* receiver, Label* bailout);
+
+ void TryStoreArrayElement(ElementsKind kind, ParameterMode mode,
+ Label* bailout, Node* elements, Node* index,
+ Node* value);
+ // Consumes args into the array, and returns tagged new length.
+ Node* BuildAppendJSArray(ElementsKind kind, Node* array,
CodeStubArguments& args, Variable& arg_index,
Label* bailout);
+ // Pushes value onto the end of array.
+ void BuildAppendJSArray(ElementsKind kind, Node* array, Node* value,
+ Label* bailout);
void StoreFieldsNoWriteBarrier(Node* start_address, Node* end_address,
Node* value);
@@ -553,8 +586,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* AllocateRegExpResult(Node* context, Node* length, Node* index,
Node* input);
- Node* AllocateNameDictionary(int capacity);
- Node* AllocateNameDictionary(Node* capacity);
+ Node* AllocateNameDictionary(int at_least_space_for);
+ Node* AllocateNameDictionary(Node* at_least_space_for);
+ Node* AllocateNameDictionaryWithCapacity(Node* capacity);
+ Node* CopyNameDictionary(Node* dictionary, Label* large_object_fallback);
Node* AllocateJSObjectFromMap(Node* map, Node* properties = nullptr,
Node* elements = nullptr,
@@ -594,8 +629,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* AllocateJSArrayIterator(Node* array, Node* array_map, Node* map);
- // Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
- Node* ArraySpeciesCreate(Node* context, Node* originalArray, Node* len);
+ Node* TypedArraySpeciesCreateByLength(Node* context, Node* originalArray,
+ Node* len);
void FillFixedArrayWithValue(ElementsKind kind, Node* array, Node* from_index,
Node* to_index,
@@ -663,6 +698,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* capacity, Node* new_capacity,
ParameterMode mode, Label* bailout);
+ // Given a need to grow by |growth|, allocate an appropriate new capacity
+ // if necessary, and return a new elements FixedArray object. Label |bailout|
+ // is followed for allocation failure.
+ void PossiblyGrowElementsCapacity(ParameterMode mode, ElementsKind kind,
+ Node* array, Node* length,
+ Variable* var_elements, Node* growth,
+ Label* bailout);
+
// Allocation site manipulation
void InitializeAllocationMemento(Node* base_allocation,
int base_allocation_size,
@@ -681,6 +724,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* ChangeNumberToFloat64(Node* value);
Node* ChangeNumberToIntPtr(Node* value);
+ Node* TimesPointerSize(Node* value);
+
// Type conversions.
// Throws a TypeError for {method_name} if {value} is not coercible to Object,
// or returns the {value} converted to a String otherwise.
@@ -708,7 +753,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsExternalStringInstanceType(Node* instance_type);
Node* IsShortExternalStringInstanceType(Node* instance_type);
Node* IsSequentialStringInstanceType(Node* instance_type);
+ Node* IsConsStringInstanceType(Node* instance_type);
+ Node* IsIndirectStringInstanceType(Node* instance_type);
Node* IsString(Node* object);
+ Node* IsJSObjectMap(Node* map);
Node* IsJSObject(Node* object);
Node* IsJSGlobalProxy(Node* object);
Node* IsJSReceiverInstanceType(Node* instance_type);
@@ -720,13 +768,18 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsCallable(Node* object);
Node* IsBoolean(Node* object);
Node* IsPropertyCell(Node* object);
+ Node* IsAccessorInfo(Node* object);
Node* IsAccessorPair(Node* object);
Node* IsHeapNumber(Node* object);
Node* IsName(Node* object);
Node* IsSymbol(Node* object);
Node* IsPrivateSymbol(Node* object);
+ Node* IsJSValueInstanceType(Node* instance_type);
Node* IsJSValue(Node* object);
+ Node* IsJSValueMap(Node* map);
+ Node* IsJSArrayInstanceType(Node* instance_type);
Node* IsJSArray(Node* object);
+ Node* IsJSArrayMap(Node* object);
Node* IsNativeContext(Node* object);
Node* IsWeakCell(Node* object);
Node* IsFixedDoubleArray(Node* object);
@@ -734,10 +787,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* IsDictionary(Node* object);
Node* IsUnseededNumberDictionary(Node* object);
Node* IsConstructorMap(Node* map);
+ Node* IsJSFunctionInstanceType(Node* instance_type);
Node* IsJSFunction(Node* object);
+ Node* IsJSFunctionMap(Node* object);
Node* IsJSTypedArray(Node* object);
+ Node* IsJSArrayBuffer(Node* object);
Node* IsFixedTypedArray(Node* object);
Node* IsJSRegExp(Node* object);
+ Node* IsFeedbackVector(Node* object);
// True iff |object| is a Smi or a HeapNumber.
Node* IsNumber(Node* object);
@@ -745,10 +802,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// True iff |number| is either a Smi, or a HeapNumber whose value is not
// within Smi range.
Node* IsNumberNormalized(Node* number);
+ Node* IsNumberPositive(Node* number);
// ElementsKind helpers:
Node* IsFastElementsKind(Node* elements_kind);
Node* IsHoleyFastElementsKind(Node* elements_kind);
+ Node* IsElementsKindGreaterThan(Node* target_kind,
+ ElementsKind reference_kind);
// String helpers.
// Load a character from a String (might flatten a ConsString).
@@ -756,9 +816,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
ParameterMode parameter_mode = SMI_PARAMETERS);
// Return the single character string with only {code}.
Node* StringFromCharCode(Node* code);
+
+ enum class SubStringFlags { NONE, FROM_TO_ARE_BOUNDED };
+
// Return a new string object which holds a substring containing the range
// [from,to[ of string. |from| and |to| are expected to be tagged.
- Node* SubString(Node* context, Node* string, Node* from, Node* to);
+ // If flags has the value FROM_TO_ARE_BOUNDED then from and to are in
+ // the range [0, string-length)
+ Node* SubString(Node* context, Node* string, Node* from, Node* to,
+ SubStringFlags flags = SubStringFlags::NONE);
// Return a new string object produced by concatenating |first| with |second|.
Node* StringAdd(Node* context, Node* first, Node* second,
@@ -804,6 +870,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Convert any object to a String.
Node* ToString(Node* context, Node* input);
+ Node* ToString_Inline(Node* const context, Node* const input);
// Convert any object to a Primitive.
Node* JSReceiverToPrimitive(Node* context, Node* input);
@@ -819,6 +886,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// ES6 7.1.15 ToLength, but jumps to range_error if the result is not a Smi.
Node* ToSmiLength(Node* input, Node* const context, Label* range_error);
+ // ES6 7.1.15 ToLength, but with inlined fast path.
+ Node* ToLength_Inline(Node* const context, Node* const input);
+
// Convert any object to an Integer.
Node* ToInteger(Node* context, Node* input,
ToIntegerTruncationMode mode = kNoTruncation);
@@ -925,9 +995,24 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void Use(Label* label);
// Various building blocks for stubs doing property lookups.
+
+ // |if_notinternalized| is optional; |if_bailout| will be used by default.
void TryToName(Node* key, Label* if_keyisindex, Variable* var_index,
- Label* if_keyisunique, Variable* var_unique,
- Label* if_bailout);
+ Label* if_keyisunique, Variable* var_unique, Label* if_bailout,
+ Label* if_notinternalized = nullptr);
+
+ // Performs a hash computation and string table lookup for the given string,
+ // and jumps to:
+ // - |if_index| if the string is an array index like "123"; |var_index|
+ // will contain the intptr representation of that index.
+ // - |if_internalized| if the string exists in the string table; the
+ // internalized version will be in |var_internalized|.
+ // - |if_not_internalized| if the string is not in the string table (but
+ // does not add it).
+ // - |if_bailout| for unsupported cases (e.g. uncachable array index).
+ void TryInternalizeString(Node* string, Label* if_index, Variable* var_index,
+ Label* if_internalized, Variable* var_internalized,
+ Label* if_not_internalized, Label* if_bailout);
// Calculates array index for given dictionary entry and entry field.
// See Dictionary::EntryToIndex().
@@ -972,11 +1057,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Stores the value for the entry with the given key_index.
template <class ContainerType>
- void StoreValueByKeyIndex(Node* container, Node* key_index, Node* value) {
+ void StoreValueByKeyIndex(
+ Node* container, Node* key_index, Node* value,
+ WriteBarrierMode write_barrier = UPDATE_WRITE_BARRIER) {
const int kKeyToValueOffset =
(ContainerType::kEntryValueIndex - ContainerType::kEntryKeyIndex) *
kPointerSize;
- StoreFixedArrayElement(container, key_index, value, UPDATE_WRITE_BARRIER,
+ StoreFixedArrayElement(container, key_index, value, write_barrier,
kKeyToValueOffset);
}
@@ -984,16 +1071,34 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
Node* HashTableComputeCapacity(Node* at_least_space_for);
template <class Dictionary>
- Node* GetNumberOfElements(Node* dictionary);
+ Node* GetNumberOfElements(Node* dictionary) {
+ return LoadFixedArrayElement(dictionary,
+ Dictionary::kNumberOfElementsIndex);
+ }
template <class Dictionary>
- void SetNumberOfElements(Node* dictionary, Node* num_elements_smi);
+ void SetNumberOfElements(Node* dictionary, Node* num_elements_smi) {
+ StoreFixedArrayElement(dictionary, Dictionary::kNumberOfElementsIndex,
+ num_elements_smi, SKIP_WRITE_BARRIER);
+ }
template <class Dictionary>
- Node* GetNumberOfDeletedElements(Node* dictionary);
+ Node* GetNumberOfDeletedElements(Node* dictionary) {
+ return LoadFixedArrayElement(dictionary,
+ Dictionary::kNumberOfDeletedElementsIndex);
+ }
+
+ template <class Dictionary>
+ void SetNumberOfDeletedElements(Node* dictionary, Node* num_deleted_smi) {
+ StoreFixedArrayElement(dictionary,
+ Dictionary::kNumberOfDeletedElementsIndex,
+ num_deleted_smi, SKIP_WRITE_BARRIER);
+ }
template <class Dictionary>
- Node* GetCapacity(Node* dictionary);
+ Node* GetCapacity(Node* dictionary) {
+ return LoadFixedArrayElement(dictionary, Dictionary::kCapacityIndex);
+ }
template <class Dictionary>
Node* GetNextEnumerationIndex(Node* dictionary);
@@ -1058,6 +1163,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
return CallStub(Builtins::CallableFor(isolate(), id), context, args...);
}
+ template <class... TArgs>
+ Node* TailCallBuiltin(Builtins::Name id, Node* context, TArgs... args) {
+ return TailCallStub(Builtins::CallableFor(isolate(), id), context, args...);
+ }
+
void LoadPropertyFromFastObject(Node* object, Node* map, Node* descriptors,
Node* name_index, Variable* var_details,
Variable* var_value);
@@ -1124,6 +1234,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Update the type feedback vector.
void UpdateFeedback(Node* feedback, Node* feedback_vector, Node* slot_id);
+ // Combine the new feedback with the existing_feedback.
+ void CombineFeedback(Variable* existing_feedback, Node* feedback);
+
+ // Check if a property name might require protector invalidation when it is
+ // used for a property store or deletion.
+ void CheckForAssociatedProtector(Node* name, Label* if_protector);
+
Node* LoadReceiverMap(Node* receiver);
// Emits keyed sloppy arguments load. Returns either the loaded value.
@@ -1178,6 +1295,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Create a new AllocationSite and install it into a feedback vector.
Node* CreateAllocationSiteInFeedbackVector(Node* feedback_vector, Node* slot);
+ // Given a recently allocated object {object}, with map {initial_map},
+ // initialize remaining fields appropriately to comply with slack tracking.
+ void HandleSlackTracking(Node* context, Node* object, Node* initial_map,
+ int start_offset);
+
enum class IndexAdvanceMode { kPre, kPost };
typedef std::function<void(Node* index)> FastLoopBody;
@@ -1243,7 +1365,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
};
Node* RelationalComparison(RelationalComparisonMode mode, Node* lhs,
- Node* rhs, Node* context);
+ Node* rhs, Node* context,
+ Variable* var_type_feedback = nullptr);
void BranchIfNumericRelationalComparison(RelationalComparisonMode mode,
Node* lhs, Node* rhs, Label* if_true,
@@ -1251,9 +1374,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void GotoUnlessNumberLessThan(Node* lhs, Node* rhs, Label* if_false);
- Node* Equal(Node* lhs, Node* rhs, Node* context);
+ Node* Equal(Node* lhs, Node* rhs, Node* context,
+ Variable* var_type_feedback = nullptr);
- Node* StrictEqual(Node* lhs, Node* rhs);
+ Node* StrictEqual(Node* lhs, Node* rhs,
+ Variable* var_type_feedback = nullptr);
// ECMA#sec-samevalue
// Similar to StrictEqual except that NaNs are treated as equal and minus zero
@@ -1325,6 +1450,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
void DescriptorLookupBinary(Node* unique_name, Node* descriptors, Node* nof,
Label* if_found, Variable* var_name_index,
Label* if_not_found);
+ // Implements DescriptorArray::ToKeyIndex.
+ // Returns an untagged IntPtr.
+ Node* DescriptorArrayToKeyIndex(Node* descriptor_number);
Node* CallGetterIfAccessor(Node* value, Node* details, Node* context,
Node* receiver, Label* if_bailout);
@@ -1368,9 +1496,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Implements DescriptorArray::number_of_entries.
// Returns an untagged int32.
Node* DescriptorArrayNumberOfEntries(Node* descriptors);
- // Implements DescriptorArray::ToKeyIndex.
- // Returns an untagged IntPtr.
- Node* DescriptorArrayToKeyIndex(Node* descriptor_number);
// Implements DescriptorArray::GetSortedKeyIndex.
// Returns an untagged int32.
Node* DescriptorArrayGetSortedKeyIndex(Node* descriptors,
@@ -1378,6 +1503,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler : public compiler::CodeAssembler {
// Implements DescriptorArray::GetKey.
Node* DescriptorArrayGetKey(Node* descriptors, Node* descriptor_number);
+ Node* CollectFeedbackForString(Node* instance_type);
+ void GenerateEqual_Same(Node* value, Label* if_equal, Label* if_notequal,
+ Variable* var_type_feedback = nullptr);
+ Node* AllocAndCopyStringCharacters(Node* context, Node* from,
+ Node* from_instance_type, Node* from_index,
+ Node* character_count);
+
static const int kElementLoopUnrollThreshold = 8;
};
@@ -1385,11 +1517,12 @@ class CodeStubArguments {
public:
typedef compiler::Node Node;
- // |argc| is an uint32 value which specifies the number of arguments passed
+ // |argc| is an intptr value which specifies the number of arguments passed
// to the builtin excluding the receiver.
CodeStubArguments(CodeStubAssembler* assembler, Node* argc)
: CodeStubArguments(assembler, argc, nullptr,
CodeStubAssembler::INTPTR_PARAMETERS) {}
+ // |argc| is either a smi or intptr depending on |param_mode|
CodeStubArguments(CodeStubAssembler* assembler, Node* argc, Node* fp,
CodeStubAssembler::ParameterMode param_mode);
@@ -1404,6 +1537,8 @@ class CodeStubArguments {
Node* AtIndex(int index) const;
+ Node* GetOptionalArgumentValue(int index, Node* default_value);
+
Node* GetLength() const { return argc_; }
typedef std::function<void(Node* arg)> ForEachBodyFunction;
@@ -1439,19 +1574,28 @@ class ToDirectStringAssembler : public CodeStubAssembler {
enum StringPointerKind { PTR_TO_DATA, PTR_TO_STRING };
public:
- explicit ToDirectStringAssembler(compiler::CodeAssemblerState* state,
- Node* string);
+ enum Flag {
+ kDontUnpackSlicedStrings = 1 << 0,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ ToDirectStringAssembler(compiler::CodeAssemblerState* state, Node* string,
+ Flags flags = Flags());
// Converts flat cons, thin, and sliced strings and returns the direct
// string. The result can be either a sequential or external string.
+ // Jumps to if_bailout if the string if the string is indirect and cannot
+ // be unpacked.
Node* TryToDirect(Label* if_bailout);
// Returns a pointer to the beginning of the string data.
+ // Jumps to if_bailout if the external string cannot be unpacked.
Node* PointerToData(Label* if_bailout) {
return TryToSequential(PTR_TO_DATA, if_bailout);
}
// Returns a pointer that, offset-wise, looks like a String.
+ // Jumps to if_bailout if the external string cannot be unpacked.
Node* PointerToString(Label* if_bailout) {
return TryToSequential(PTR_TO_STRING, if_bailout);
}
@@ -1468,8 +1612,13 @@ class ToDirectStringAssembler : public CodeStubAssembler {
Variable var_instance_type_;
Variable var_offset_;
Variable var_is_external_;
+
+ const Flags flags_;
};
+#define CSA_CHECK(csa, x) \
+ (csa)->Check([&] { return (x); }, #x, __FILE__, __LINE__)
+
#ifdef DEBUG
#define CSA_ASSERT(csa, x) \
(csa)->Assert([&] { return (x); }, #x, __FILE__, __LINE__)
@@ -1485,21 +1634,24 @@ class ToDirectStringAssembler : public CodeStubAssembler {
#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) \
CSA_ASSERT_JS_ARGC_OP(csa, Word32Equal, ==, expected)
-#define BIND(label) Bind(label, {#label, __FILE__, __LINE__})
+#define CSA_DEBUG_INFO(name) \
+ , { #name, __FILE__, __LINE__ }
+#define BIND(label) Bind(label CSA_DEBUG_INFO(label))
#define VARIABLE(name, ...) \
- Variable name(this, {#name, __FILE__, __LINE__}, __VA_ARGS__);
+ Variable name(this CSA_DEBUG_INFO(name), __VA_ARGS__);
#else // DEBUG
#define CSA_ASSERT(csa, x) ((void)0)
#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) ((void)0)
+#define CSA_DEBUG_INFO(name)
#define BIND(label) Bind(label);
#define VARIABLE(name, ...) Variable name(this, __VA_ARGS__);
#endif // DEBUG
#ifdef ENABLE_SLOW_DCHECKS
-#define CSA_SLOW_ASSERT(csa, x) \
- if (FLAG_enable_slow_asserts) { \
- (csa)->Assert([&] { return (x); }, #x, __FILE__, __LINE__); \
+#define CSA_SLOW_ASSERT(csa, x) \
+ if (FLAG_enable_slow_asserts) { \
+ CSA_ASSERT(csa, x); \
}
#else
#define CSA_SLOW_ASSERT(csa, x) ((void)0)
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index b3269eade1..7a1b905fd6 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -45,7 +45,6 @@ class Node;
V(MathPow) \
V(ProfileEntryHook) \
V(RecordWrite) \
- V(RegExpExec) \
V(StoreBufferOverflow) \
V(StoreSlowElement) \
V(SubString) \
@@ -828,39 +827,29 @@ class CallApiCallbackStub : public PlatformCodeStub {
static const int kArgMax = (1 << kArgBits) - 1;
// CallApiCallbackStub for regular setters and getters.
- CallApiCallbackStub(Isolate* isolate, bool is_store, bool call_data_undefined,
- bool is_lazy)
- : CallApiCallbackStub(isolate, is_store ? 1 : 0, is_store,
- call_data_undefined, is_lazy) {}
+ CallApiCallbackStub(Isolate* isolate, bool is_store, bool is_lazy)
+ : CallApiCallbackStub(isolate, is_store ? 1 : 0, is_store, is_lazy) {}
// CallApiCallbackStub for callback functions.
- CallApiCallbackStub(Isolate* isolate, int argc, bool call_data_undefined,
- bool is_lazy)
- : CallApiCallbackStub(isolate, argc, false, call_data_undefined,
- is_lazy) {}
+ CallApiCallbackStub(Isolate* isolate, int argc, bool is_lazy)
+ : CallApiCallbackStub(isolate, argc, false, is_lazy) {}
private:
- CallApiCallbackStub(Isolate* isolate, int argc, bool is_store,
- bool call_data_undefined, bool is_lazy)
+ CallApiCallbackStub(Isolate* isolate, int argc, bool is_store, bool is_lazy)
: PlatformCodeStub(isolate) {
CHECK(0 <= argc && argc <= kArgMax);
minor_key_ = IsStoreBits::encode(is_store) |
- CallDataUndefinedBits::encode(call_data_undefined) |
ArgumentBits::encode(argc) |
IsLazyAccessorBits::encode(is_lazy);
}
bool is_store() const { return IsStoreBits::decode(minor_key_); }
bool is_lazy() const { return IsLazyAccessorBits::decode(minor_key_); }
- bool call_data_undefined() const {
- return CallDataUndefinedBits::decode(minor_key_);
- }
int argc() const { return ArgumentBits::decode(minor_key_); }
class IsStoreBits: public BitField<bool, 0, 1> {};
- class CallDataUndefinedBits: public BitField<bool, 1, 1> {};
+ class IsLazyAccessorBits : public BitField<bool, 1, 1> {};
class ArgumentBits : public BitField<int, 2, kArgBits> {};
- class IsLazyAccessorBits : public BitField<bool, 3 + kArgBits, 1> {};
DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiCallback);
DEFINE_PLATFORM_CODE_STUB(CallApiCallback, PlatformCodeStub);
@@ -1136,15 +1125,6 @@ class JSEntryStub : public PlatformCodeStub {
DEFINE_PLATFORM_CODE_STUB(JSEntry, PlatformCodeStub);
};
-
-class RegExpExecStub: public PlatformCodeStub {
- public:
- explicit RegExpExecStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
-
- DEFINE_CALL_INTERFACE_DESCRIPTOR(RegExpExec);
- DEFINE_PLATFORM_CODE_STUB(RegExpExec, PlatformCodeStub);
-};
-
// TODO(bmeurer/mvstanton): Turn CallConstructStub into ConstructICStub.
class CallConstructStub final : public PlatformCodeStub {
public:
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 11837e97ba..d43d1f47b4 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -69,24 +69,20 @@ UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction)
#undef UNARY_MATH_FUNCTION
-#define __ ACCESS_MASM(masm_)
-
#ifdef DEBUG
-Comment::Comment(MacroAssembler* masm, const char* msg)
- : masm_(masm), msg_(msg) {
- __ RecordComment(msg);
+Comment::Comment(Assembler* assembler, const char* msg)
+ : assembler_(assembler), msg_(msg) {
+ assembler_->RecordComment(msg);
}
Comment::~Comment() {
- if (msg_[0] == '[') __ RecordComment("]");
+ if (msg_[0] == '[') assembler_->RecordComment("]");
}
#endif // DEBUG
-#undef __
-
void CodeGenerator::MakeCodePrologue(CompilationInfo* info, const char* kind) {
bool print_ast = false;
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 134040038c..1619e0dd30 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -9,6 +9,7 @@
#include "src/globals.h"
#include "src/objects-inl.h"
#include "src/objects/compilation-cache-inl.h"
+#include "src/visitors.h"
namespace v8 {
namespace internal {
@@ -81,9 +82,9 @@ void CompilationSubCache::IterateFunctions(ObjectVisitor* v) {
}
}
-
-void CompilationSubCache::Iterate(ObjectVisitor* v) {
- v->VisitPointers(&tables_[0], &tables_[generations_]);
+void CompilationSubCache::Iterate(RootVisitor* v) {
+ v->VisitRootPointers(Root::kCompilationCache, &tables_[0],
+ &tables_[generations_]);
}
@@ -366,8 +367,7 @@ void CompilationCache::Clear() {
}
}
-
-void CompilationCache::Iterate(ObjectVisitor* v) {
+void CompilationCache::Iterate(RootVisitor* v) {
for (int i = 0; i < kSubCacheCount; i++) {
subcaches_[i]->Iterate(v);
}
diff --git a/deps/v8/src/compilation-cache.h b/deps/v8/src/compilation-cache.h
index efbe4f284c..89c54a4227 100644
--- a/deps/v8/src/compilation-cache.h
+++ b/deps/v8/src/compilation-cache.h
@@ -14,6 +14,8 @@ namespace internal {
template <typename T>
class Handle;
+class RootVisitor;
+
// The compilation cache consists of several generational sub-caches which uses
// this class as a base class. A sub-cache contains a compilation cache tables
// for each generation of the sub-cache. Since the same source code string has
@@ -49,7 +51,7 @@ class CompilationSubCache {
void Age();
// GC support.
- void Iterate(ObjectVisitor* v);
+ void Iterate(RootVisitor* v);
void IterateFunctions(ObjectVisitor* v);
// Clear this sub-cache evicting all its content.
@@ -197,7 +199,7 @@ class CompilationCache {
void Remove(Handle<SharedFunctionInfo> function_info);
// GC support.
- void Iterate(ObjectVisitor* v);
+ void Iterate(RootVisitor* v);
void IterateFunctions(ObjectVisitor* v);
// Notify the cache that a mark-sweep garbage collection is about to
diff --git a/deps/v8/src/compilation-dependencies.h b/deps/v8/src/compilation-dependencies.h
index 516962aa7d..d0d9b7647f 100644
--- a/deps/v8/src/compilation-dependencies.h
+++ b/deps/v8/src/compilation-dependencies.h
@@ -7,6 +7,7 @@
#include "src/handles.h"
#include "src/objects.h"
+#include "src/objects/map.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compilation-info.cc b/deps/v8/src/compilation-info.cc
index 845655ca57..a2e75fb2fc 100644
--- a/deps/v8/src/compilation-info.cc
+++ b/deps/v8/src/compilation-info.cc
@@ -124,8 +124,7 @@ bool CompilationInfo::is_this_defined() const { return !IsStub(); }
// profiler, so they trigger their own optimization when they're called
// for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
bool CompilationInfo::ShouldSelfOptimize() {
- return FLAG_opt && FLAG_crankshaft &&
- !(literal()->flags() & AstProperties::kDontSelfOptimize) &&
+ return FLAG_opt && !(literal()->flags() & AstProperties::kDontSelfOptimize) &&
!literal()->dont_optimize() &&
literal()->scope()->AllowsLazyCompilation() &&
!shared_info()->optimization_disabled();
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
index 7e90f3e8fa..bc1ec45a5b 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.cc
@@ -65,7 +65,7 @@ CompilerDispatcherJob::CompilerDispatcherJob(
CompilerDispatcherTracer* tracer, size_t max_stack_size,
Handle<String> source, int start_position, int end_position,
LanguageMode language_mode, int function_literal_id, bool native,
- bool module, bool is_named_expression, bool calls_eval, uint32_t hash_seed,
+ bool module, bool is_named_expression, uint32_t hash_seed,
AccountingAllocator* zone_allocator, int compiler_hints,
const AstStringConstants* ast_string_constants,
CompileJobFinishCallback* finish_callback)
@@ -90,11 +90,14 @@ CompilerDispatcherJob::CompilerDispatcherJob(
parse_info_->set_language_mode(language_mode);
parse_info_->set_function_literal_id(function_literal_id);
parse_info_->set_ast_string_constants(ast_string_constants);
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
+ parse_info_->set_runtime_call_stats(new (parse_info_->zone())
+ RuntimeCallStats());
+ }
parse_info_->set_native(native);
parse_info_->set_module(module);
parse_info_->set_is_named_expression(is_named_expression);
- parse_info_->set_calls_eval(calls_eval);
parser_.reset(new Parser(parse_info_.get()));
parser_->DeserializeScopeChain(parse_info_.get(), MaybeHandle<ScopeInfo>());
@@ -267,6 +270,10 @@ void CompilerDispatcherJob::PrepareToParseOnMainThread() {
parse_info_->set_unicode_cache(unicode_cache_.get());
parse_info_->set_language_mode(shared_->language_mode());
parse_info_->set_function_literal_id(shared_->function_literal_id());
+ if (V8_UNLIKELY(FLAG_runtime_stats)) {
+ parse_info_->set_runtime_call_stats(new (parse_info_->zone())
+ RuntimeCallStats());
+ }
parser_.reset(new Parser(parse_info_.get()));
MaybeHandle<ScopeInfo> outer_scope_info;
@@ -335,6 +342,7 @@ bool CompilerDispatcherJob::FinalizeParsingOnMainThread() {
status_ = CompileJobStatus::kReadyToAnalyze;
}
parser_->UpdateStatistics(isolate_, script);
+ parse_info_->UpdateStatisticsAfterBackgroundParse(isolate_);
DeferredHandleScope scope(isolate_);
{
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
index a7472bafc6..7b952f6cad 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher-job.h
@@ -11,7 +11,7 @@
#include "src/base/macros.h"
#include "src/globals.h"
#include "src/handles.h"
-#include "testing/gtest/include/gtest/gtest_prod.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
namespace v8 {
namespace internal {
@@ -43,7 +43,7 @@ enum class CompileJobStatus {
kDone,
};
-class CompileJobFinishCallback {
+class V8_EXPORT_PRIVATE CompileJobFinishCallback {
public:
virtual ~CompileJobFinishCallback() {}
virtual void ParseFinished(std::unique_ptr<ParseInfo> parse_info) = 0;
@@ -61,9 +61,8 @@ class V8_EXPORT_PRIVATE CompilerDispatcherJob {
Handle<String> source, int start_position,
int end_position, LanguageMode language_mode,
int function_literal_id, bool native, bool module,
- bool is_named_expression, bool calls_eval,
- uint32_t hash_seed, AccountingAllocator* zone_allocator,
- int compiler_hints,
+ bool is_named_expression, uint32_t hash_seed,
+ AccountingAllocator* zone_allocator, int compiler_hints,
const AstStringConstants* ast_string_constants,
CompileJobFinishCallback* finish_callback);
// Creates a CompilerDispatcherJob in the analyzed state.
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
index 68fa90ea91..69152b37f7 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.cc
@@ -248,8 +248,6 @@ CompilerDispatcher::~CompilerDispatcher() {
bool CompilerDispatcher::CanEnqueue() {
if (!IsEnabled()) return false;
- DCHECK(FLAG_ignition);
-
if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
return false;
}
@@ -263,6 +261,8 @@ bool CompilerDispatcher::CanEnqueue() {
}
bool CompilerDispatcher::CanEnqueue(Handle<SharedFunctionInfo> function) {
+ DCHECK_IMPLIES(IsEnabled(), FLAG_ignition);
+
if (!CanEnqueue()) return false;
// We only handle functions (no eval / top-level code / wasm) that are
@@ -338,7 +338,7 @@ bool CompilerDispatcher::Enqueue(Handle<String> source, int start_position,
int end_position, LanguageMode language_mode,
int function_literal_id, bool native,
bool module, bool is_named_expression,
- bool calls_eval, int compiler_hints,
+ int compiler_hints,
CompileJobFinishCallback* finish_callback,
JobId* job_id) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
@@ -353,8 +353,8 @@ bool CompilerDispatcher::Enqueue(Handle<String> source, int start_position,
std::unique_ptr<CompilerDispatcherJob> job(new CompilerDispatcherJob(
tracer_.get(), max_stack_size_, source, start_position, end_position,
language_mode, function_literal_id, native, module, is_named_expression,
- calls_eval, isolate_->heap()->HashSeed(), isolate_->allocator(),
- compiler_hints, isolate_->ast_string_constants(), finish_callback));
+ isolate_->heap()->HashSeed(), isolate_->allocator(), compiler_hints,
+ isolate_->ast_string_constants(), finish_callback));
JobId id = Enqueue(std::move(job));
if (job_id != nullptr) {
*job_id = id;
@@ -473,14 +473,31 @@ bool CompilerDispatcher::FinishNow(Handle<SharedFunctionInfo> function) {
JobMap::const_iterator job = GetJobFor(function);
CHECK(job != jobs_.end());
bool result = FinishNow(job->second.get());
- if (!job->second->shared().is_null()) {
- shared_to_job_id_.Delete(job->second->shared());
- }
RemoveIfFinished(job);
return result;
}
void CompilerDispatcher::FinishAllNow() {
+ // First finish all jobs not running in background
+ for (auto it = jobs_.cbegin(); it != jobs_.cend();) {
+ CompilerDispatcherJob* job = it->second.get();
+ bool is_running_in_background;
+ {
+ base::LockGuard<base::Mutex> lock(&mutex_);
+ is_running_in_background =
+ running_background_jobs_.find(job) != running_background_jobs_.end();
+ pending_background_jobs_.erase(job);
+ }
+ if (!is_running_in_background) {
+ while (!IsFinished(job)) {
+ DoNextStepOnMainThread(isolate_, job, ExceptionHandling::kThrow);
+ }
+ it = RemoveIfFinished(it);
+ } else {
+ ++it;
+ }
+ }
+ // Potentially wait for jobs that were running in background
for (auto it = jobs_.cbegin(); it != jobs_.cend();
it = RemoveIfFinished(it)) {
FinishNow(it->second.get());
@@ -598,7 +615,7 @@ CompilerDispatcher::JobMap::const_iterator CompilerDispatcher::GetJobFor(
void CompilerDispatcher::ScheduleIdleTaskFromAnyThread() {
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
- DCHECK(platform_->IdleTasksEnabled(v8_isolate));
+ if (!platform_->IdleTasksEnabled(v8_isolate)) return;
{
base::LockGuard<base::Mutex> lock(&mutex_);
if (idle_task_scheduled_) return;
diff --git a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
index d5ba34bc08..c58f19dd20 100644
--- a/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/compiler-dispatcher.h
@@ -18,7 +18,7 @@
#include "src/base/platform/semaphore.h"
#include "src/globals.h"
#include "src/identity-map.h"
-#include "testing/gtest/include/gtest/gtest_prod.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
namespace v8 {
@@ -85,9 +85,8 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
// Enqueue a job for initial parse. Returns true if a job was enqueued.
bool Enqueue(Handle<String> source, int start_pos, int end_position,
LanguageMode language_mode, int function_literal_id, bool native,
- bool module, bool is_named_expression, bool calls_eval,
- int compiler_hints, CompileJobFinishCallback* finish_callback,
- JobId* job_id);
+ bool module, bool is_named_expression, int compiler_hints,
+ CompileJobFinishCallback* finish_callback, JobId* job_id);
// Like Enqueue, but also advances the job so that it can potentially
// continue running on a background thread (if at all possible). Returns
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index 04df928727..2e375cc209 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -135,8 +135,8 @@ void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
}
void OptimizingCompileDispatcher::Flush(BlockingBehavior blocking_behavior) {
- if (FLAG_block_concurrent_recompilation) Unblock();
if (blocking_behavior == BlockingBehavior::kDontBlock) {
+ if (FLAG_block_concurrent_recompilation) Unblock();
base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
while (input_queue_length_ > 0) {
CompilationJob* job = input_queue_[InputQueueIndex(0)];
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index f1cdd7c9b0..c2d63fb041 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -8,7 +8,6 @@
#include <memory>
#include "src/asmjs/asm-js.h"
-#include "src/asmjs/asm-typer.h"
#include "src/assembler-inl.h"
#include "src/ast/ast-numbering.h"
#include "src/ast/prettyprinter.h"
@@ -30,6 +29,7 @@
#include "src/isolate-inl.h"
#include "src/log-inl.h"
#include "src/messages.h"
+#include "src/objects/map.h"
#include "src/parsing/parsing.h"
#include "src/parsing/rewriter.h"
#include "src/parsing/scanner-character-streams.h"
@@ -233,7 +233,7 @@ void CompilationJob::RegisterWeakObjectsInOptimizedCode(Handle<Code> code) {
// TODO(turbofan): Move this to pipeline.cc once Crankshaft dies.
Isolate* const isolate = code->GetIsolate();
DCHECK(code->is_optimized_code());
- std::vector<Handle<Map>> maps;
+ MapHandles maps;
std::vector<Handle<HeapObject>> objects;
{
DisallowHeapAllocation no_gc;
@@ -471,6 +471,10 @@ CompilationJob::Status FinalizeUnoptimizedCompilationJob(CompilationJob* job) {
void SetSharedFunctionFlagsFromLiteral(FunctionLiteral* literal,
Handle<SharedFunctionInfo> shared_info) {
+ // Don't overwrite values set by the bootstrapper.
+ if (!shared_info->HasLength()) {
+ shared_info->set_length(literal->function_length());
+ }
shared_info->set_ast_node_count(literal->ast_node_count());
shared_info->set_has_duplicate_parameters(
literal->has_duplicate_parameters());
@@ -705,15 +709,25 @@ MUST_USE_RESULT MaybeHandle<Code> GetUnoptimizedCode(
return info->code();
}
-MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
+MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeCache(
Handle<JSFunction> function, BailoutId osr_ast_id) {
RuntimeCallTimerScope runtimeTimer(
function->GetIsolate(),
&RuntimeCallStats::CompileGetFromOptimizedCodeMap);
Handle<SharedFunctionInfo> shared(function->shared());
DisallowHeapAllocation no_gc;
- Code* code = shared->SearchOptimizedCodeMap(
- function->context()->native_context(), osr_ast_id);
+ Code* code = nullptr;
+ if (osr_ast_id.IsNone()) {
+ if (function->feedback_vector_cell()->value()->IsFeedbackVector()) {
+ FeedbackVector* feedback_vector = function->feedback_vector();
+ feedback_vector->EvictOptimizedCodeMarkedForDeoptimization(
+ function->shared(), "GetCodeFromOptimizedCodeCache");
+ code = feedback_vector->optimized_code();
+ }
+ } else {
+ code = function->context()->native_context()->SearchOSROptimizedCodeCache(
+ function->shared(), osr_ast_id);
+ }
if (code != nullptr) {
// Caching of optimized code enabled and optimized code found.
DCHECK(!code->marked_for_deoptimization());
@@ -723,7 +737,7 @@ MUST_USE_RESULT MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
return MaybeHandle<Code>();
}
-void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
+void InsertCodeIntoOptimizedCodeCache(CompilationInfo* info) {
Handle<Code> code = info->code();
if (code->kind() != Code::OPTIMIZED_FUNCTION) return; // Nothing to do.
@@ -737,8 +751,14 @@ void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
Handle<JSFunction> function = info->closure();
Handle<SharedFunctionInfo> shared(function->shared());
Handle<Context> native_context(function->context()->native_context());
- SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code,
- info->osr_ast_id());
+ if (info->osr_ast_id().IsNone()) {
+ Handle<FeedbackVector> vector =
+ handle(function->feedback_vector(), function->GetIsolate());
+ FeedbackVector::SetOptimizedCode(vector, code);
+ } else {
+ Context::AddToOSROptimizedCodeCache(native_context, shared, code,
+ info->osr_ast_id());
+ }
}
bool GetOptimizedCodeNow(CompilationJob* job) {
@@ -773,7 +793,7 @@ bool GetOptimizedCodeNow(CompilationJob* job) {
// Success!
job->RecordOptimizedCompilationStats();
DCHECK(!isolate->has_pending_exception());
- InsertCodeIntoOptimizedCodeMap(info);
+ InsertCodeIntoOptimizedCodeCache(info);
RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
return true;
}
@@ -848,7 +868,7 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
DCHECK_IMPLIES(ignition_osr, FLAG_ignition_osr);
Handle<Code> cached_code;
- if (GetCodeFromOptimizedCodeMap(function, osr_ast_id)
+ if (GetCodeFromOptimizedCodeCache(function, osr_ast_id)
.ToHandle(&cached_code)) {
if (FLAG_trace_opt) {
PrintF("[found optimized code for ");
@@ -1001,10 +1021,7 @@ CompilationJob::Status FinalizeOptimizedCompilationJob(CompilationJob* job) {
} else if (job->FinalizeJob() == CompilationJob::SUCCEEDED) {
job->RecordOptimizedCompilationStats();
RecordFunctionCompilation(CodeEventListener::LAZY_COMPILE_TAG, info);
- if (shared->SearchOptimizedCodeMap(info->context()->native_context(),
- info->osr_ast_id()) == nullptr) {
- InsertCodeIntoOptimizedCodeMap(info);
- }
+ InsertCodeIntoOptimizedCodeCache(info);
if (FLAG_trace_opt) {
PrintF("[completed optimizing ");
info->closure()->ShortPrint();
@@ -1035,73 +1052,71 @@ MaybeHandle<Code> GetLazyCode(Handle<JSFunction> function) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileCode");
AggregatedHistogramTimerScope timer(isolate->counters()->compile_lazy());
- Handle<Code> cached_code;
- if (GetCodeFromOptimizedCodeMap(function, BailoutId::None())
- .ToHandle(&cached_code)) {
- if (FLAG_trace_opt) {
- PrintF("[found optimized code for ");
- function->ShortPrint();
- PrintF(" during unoptimized compile]\n");
+ if (function->shared()->is_compiled()) {
+ // Function has already been compiled, get the optimized code if possible,
+ // otherwise return baseline code.
+ Handle<Code> cached_code;
+ if (GetCodeFromOptimizedCodeCache(function, BailoutId::None())
+ .ToHandle(&cached_code)) {
+ if (FLAG_trace_opt) {
+ PrintF("[found optimized code for ");
+ function->ShortPrint();
+ PrintF(" during unoptimized compile]\n");
+ }
+ DCHECK(function->shared()->is_compiled());
+ return cached_code;
}
- DCHECK(function->shared()->is_compiled());
- return cached_code;
- }
- if (function->shared()->is_compiled() &&
- function->shared()->marked_for_tier_up()) {
- DCHECK(FLAG_mark_shared_functions_for_tier_up);
+ if (function->shared()->marked_for_tier_up()) {
+ DCHECK(FLAG_mark_shared_functions_for_tier_up);
- function->shared()->set_marked_for_tier_up(false);
+ function->shared()->set_marked_for_tier_up(false);
- if (FLAG_trace_opt) {
- PrintF("[optimizing method ");
- function->ShortPrint();
- PrintF(" eagerly (shared function marked for tier up)]\n");
- }
+ if (FLAG_trace_opt) {
+ PrintF("[optimizing method ");
+ function->ShortPrint();
+ PrintF(" eagerly (shared function marked for tier up)]\n");
+ }
- Handle<Code> code;
- if (GetOptimizedCodeMaybeLater(function).ToHandle(&code)) {
- return code;
+ Handle<Code> code;
+ if (GetOptimizedCodeMaybeLater(function).ToHandle(&code)) {
+ return code;
+ }
}
- }
- if (function->shared()->is_compiled()) {
return Handle<Code>(function->shared()->code());
- }
-
- if (function->shared()->HasBytecodeArray()) {
- Handle<Code> entry = isolate->builtins()->InterpreterEntryTrampoline();
- function->shared()->ReplaceCode(*entry);
- return entry;
- }
+ } else {
+ // Function doesn't have any baseline compiled code, compile now.
+ DCHECK(!function->shared()->HasBytecodeArray());
- ParseInfo parse_info(handle(function->shared()));
- Zone compile_zone(isolate->allocator(), ZONE_NAME);
- CompilationInfo info(&compile_zone, &parse_info, isolate, function);
- if (FLAG_preparser_scope_analysis) {
- Handle<SharedFunctionInfo> shared(function->shared());
- Handle<Script> script(Script::cast(function->shared()->script()));
- if (script->HasPreparsedScopeData()) {
- parse_info.preparsed_scope_data()->Deserialize(
- script->GetPreparsedScopeData());
+ ParseInfo parse_info(handle(function->shared()));
+ Zone compile_zone(isolate->allocator(), ZONE_NAME);
+ CompilationInfo info(&compile_zone, &parse_info, isolate, function);
+ if (FLAG_experimental_preparser_scope_analysis) {
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<Script> script(Script::cast(function->shared()->script()));
+ if (script->HasPreparsedScopeData()) {
+ parse_info.preparsed_scope_data()->Deserialize(
+ script->preparsed_scope_data());
+ }
}
- }
- Compiler::ConcurrencyMode inner_function_mode =
- FLAG_compiler_dispatcher_eager_inner ? Compiler::CONCURRENT
- : Compiler::NOT_CONCURRENT;
- Handle<Code> result;
- ASSIGN_RETURN_ON_EXCEPTION(
- isolate, result, GetUnoptimizedCode(&info, inner_function_mode), Code);
-
- if (FLAG_always_opt && !info.shared_info()->HasAsmWasmData()) {
- Handle<Code> opt_code;
- if (GetOptimizedCode(function, Compiler::NOT_CONCURRENT)
- .ToHandle(&opt_code)) {
- result = opt_code;
+ Compiler::ConcurrencyMode inner_function_mode =
+ FLAG_compiler_dispatcher_eager_inner ? Compiler::CONCURRENT
+ : Compiler::NOT_CONCURRENT;
+ Handle<Code> result;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, result, GetUnoptimizedCode(&info, inner_function_mode), Code);
+
+ if (FLAG_always_opt && !info.shared_info()->HasAsmWasmData()) {
+ Handle<Code> opt_code;
+ if (GetOptimizedCode(function, Compiler::NOT_CONCURRENT)
+ .ToHandle(&opt_code)) {
+ result = opt_code;
+ }
}
- }
- return result;
+ return result;
+ }
}
@@ -1177,9 +1192,9 @@ Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
if (!script.is_null()) {
script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
- if (FLAG_preparser_scope_analysis) {
- Handle<FixedUint32Array> data(
- parse_info->preparsed_scope_data()->Serialize(isolate));
+ if (FLAG_experimental_preparser_scope_analysis) {
+ Handle<PodArray<uint32_t>> data =
+ parse_info->preparsed_scope_data()->Serialize(isolate);
script->set_preparsed_scope_data(*data);
}
}
@@ -1874,18 +1889,17 @@ void Compiler::PostInstantiation(Handle<JSFunction> function,
function->MarkForOptimization();
}
- Code* code = shared->SearchOptimizedCodeMap(
- function->context()->native_context(), BailoutId::None());
- if (code != nullptr) {
- // Caching of optimized code enabled and optimized code found.
- DCHECK(!code->marked_for_deoptimization());
- DCHECK(function->shared()->is_compiled());
- function->ReplaceCode(code);
- }
-
if (shared->is_compiled()) {
// TODO(mvstanton): pass pretenure flag to EnsureLiterals.
JSFunction::EnsureLiterals(function);
+
+ Code* code = function->feedback_vector()->optimized_code();
+ if (code != nullptr) {
+ // Caching of optimized code enabled and optimized code found.
+ DCHECK(!code->marked_for_deoptimization());
+ DCHECK(function->shared()->is_compiled());
+ function->ReplaceCode(code);
+ }
}
}
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index fc2e223996..5e22a00139 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -203,6 +203,7 @@ class V8_EXPORT_PRIVATE CompilationJob {
State state() const { return state_; }
CompilationInfo* info() const { return info_; }
Isolate* isolate() const;
+ virtual size_t AllocatedMemory() const { return 0; }
protected:
// Overridden by the actual implementation.
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 015bf85758..3a26acc668 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -9,6 +9,8 @@ danno@chromium.org
tebbi@chromium.org
per-file wasm-*=ahaas@chromium.org
+per-file wasm-*=bbudge@chromium.org
+per-file wasm-*=bradnelson@chromium.org
per-file wasm-*=clemensh@chromium.org
per-file int64-lowering.*=ahaas@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 11925a84db..5fbbdd09da 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -180,6 +180,24 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectContext() {
}
// static
+FieldAccess AccessBuilder::ForJSGeneratorObjectFunction() {
+ FieldAccess access = {kTaggedBase, JSGeneratorObject::kFunctionOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Function(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSGeneratorObjectReceiver() {
+ FieldAccess access = {kTaggedBase, JSGeneratorObject::kReceiverOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSGeneratorObjectContinuation() {
FieldAccess access = {
kTaggedBase, JSGeneratorObject::kContinuationOffset,
@@ -199,15 +217,6 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectInputOrDebugPos() {
return access;
}
-// static
-FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectAwaitInputOrDebugPos() {
- FieldAccess access = {
- kTaggedBase, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
- kFullWriteBarrier};
- return access;
-}
// static
FieldAccess AccessBuilder::ForJSGeneratorObjectRegisterFile() {
@@ -230,6 +239,36 @@ FieldAccess AccessBuilder::ForJSGeneratorObjectResumeMode() {
}
// static
+FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectQueue() {
+ FieldAccess access = {
+ kTaggedBase, JSAsyncGeneratorObject::kQueueOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectAwaitInputOrDebugPos() {
+ FieldAccess access = {
+ kTaggedBase, JSAsyncGeneratorObject::kAwaitInputOrDebugPosOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSAsyncGeneratorObjectAwaitedPromise() {
+ FieldAccess access = {
+ kTaggedBase, JSAsyncGeneratorObject::kAwaitedPromiseOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::NonInternal(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
+ return access;
+}
+
+// static
FieldAccess AccessBuilder::ForJSArrayLength(ElementsKind elements_kind) {
TypeCache const& type_cache = TypeCache::Get();
FieldAccess access = {kTaggedBase,
@@ -412,9 +451,9 @@ FieldAccess AccessBuilder::ForFixedTypedArrayBaseExternalPointer() {
}
// static
-FieldAccess AccessBuilder::ForDescriptorArrayEnumCache() {
+FieldAccess AccessBuilder::ForDescriptorArrayEnumCacheBridge() {
FieldAccess access = {
- kTaggedBase, DescriptorArray::kEnumCacheOffset,
+ kTaggedBase, DescriptorArray::kEnumCacheBridgeOffset,
Handle<Name>(), MaybeHandle<Map>(),
Type::OtherInternal(), MachineType::TaggedPointer(),
kPointerWriteBarrier};
@@ -737,9 +776,9 @@ FieldAccess AccessBuilder::ForArgumentsCallee() {
FieldAccess AccessBuilder::ForFixedArraySlot(
size_t index, WriteBarrierKind write_barrier_kind) {
int offset = FixedArray::OffsetOfElementAt(static_cast<int>(index));
- FieldAccess access = {kTaggedBase, offset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::NonInternal(), MachineType::AnyTagged(),
+ FieldAccess access = {kTaggedBase, offset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
write_barrier_kind};
return access;
}
@@ -816,7 +855,7 @@ ElementAccess AccessBuilder::ForFixedArrayElement(ElementsKind kind) {
access.machine_type = MachineType::Float64();
break;
case FAST_HOLEY_DOUBLE_ELEMENTS:
- access.type = Type::Number();
+ access.type = Type::NumberOrHole();
access.write_barrier_kind = kNoWriteBarrier;
access.machine_type = MachineType::Float64();
break;
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index 668a720740..b4c3ed0615 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -82,16 +82,28 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to JSGeneratorObject::input_or_debug_pos() field.
static FieldAccess ForJSGeneratorObjectInputOrDebugPos();
- // Provides access to JSAsyncGeneratorObject::await_input_or_debug_pos()
- // field.
- static FieldAccess ForJSAsyncGeneratorObjectAwaitInputOrDebugPos();
-
// Provides access to JSGeneratorObject::register_file() field.
static FieldAccess ForJSGeneratorObjectRegisterFile();
+ // Provides access to JSGeneratorObject::function() field.
+ static FieldAccess ForJSGeneratorObjectFunction();
+
+ // Provides access to JSGeneratorObject::receiver() field.
+ static FieldAccess ForJSGeneratorObjectReceiver();
+
// Provides access to JSGeneratorObject::resume_mode() field.
static FieldAccess ForJSGeneratorObjectResumeMode();
+ // Provides access to JSAsyncGeneratorObject::queue() field.
+ static FieldAccess ForJSAsyncGeneratorObjectQueue();
+
+ // Provides access to JSAsyncGeneratorObject::await_input_or_debug_pos()
+ // field.
+ static FieldAccess ForJSAsyncGeneratorObjectAwaitInputOrDebugPos();
+
+ // Provides access to JSAsyncGeneratorObject::awaited_promise() field.
+ static FieldAccess ForJSAsyncGeneratorObjectAwaitedPromise();
+
// Provides access to JSArray::length() field.
static FieldAccess ForJSArrayLength(ElementsKind elements_kind);
@@ -140,8 +152,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to FixedTypedArrayBase::external_pointer() field.
static FieldAccess ForFixedTypedArrayBaseExternalPointer();
- // Provides access to DescriptorArray::enum_cache() field.
- static FieldAccess ForDescriptorArrayEnumCache();
+ // Provides access to DescriptorArray::enum_cache_bridge() field.
+ static FieldAccess ForDescriptorArrayEnumCacheBridge();
// Provides access to DescriptorArray::enum_cache_bridge_cache() field.
static FieldAccess ForDescriptorArrayEnumCacheBridgeCache();
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index c3096e9974..196bf9e896 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -61,26 +61,26 @@ std::ostream& operator<<(std::ostream& os, AccessMode access_mode) {
ElementAccessInfo::ElementAccessInfo() {}
-ElementAccessInfo::ElementAccessInfo(MapList const& receiver_maps,
+ElementAccessInfo::ElementAccessInfo(MapHandles const& receiver_maps,
ElementsKind elements_kind)
: elements_kind_(elements_kind), receiver_maps_(receiver_maps) {}
// static
-PropertyAccessInfo PropertyAccessInfo::NotFound(MapList const& receiver_maps,
+PropertyAccessInfo PropertyAccessInfo::NotFound(MapHandles const& receiver_maps,
MaybeHandle<JSObject> holder) {
return PropertyAccessInfo(holder, receiver_maps);
}
// static
PropertyAccessInfo PropertyAccessInfo::DataConstant(
- MapList const& receiver_maps, Handle<Object> constant,
+ MapHandles const& receiver_maps, Handle<Object> constant,
MaybeHandle<JSObject> holder) {
return PropertyAccessInfo(kDataConstant, holder, constant, receiver_maps);
}
// static
PropertyAccessInfo PropertyAccessInfo::DataField(
- PropertyConstness constness, MapList const& receiver_maps,
+ PropertyConstness constness, MapHandles const& receiver_maps,
FieldIndex field_index, MachineRepresentation field_representation,
Type* field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map) {
@@ -92,7 +92,7 @@ PropertyAccessInfo PropertyAccessInfo::DataField(
// static
PropertyAccessInfo PropertyAccessInfo::AccessorConstant(
- MapList const& receiver_maps, Handle<Object> constant,
+ MapHandles const& receiver_maps, Handle<Object> constant,
MaybeHandle<JSObject> holder) {
return PropertyAccessInfo(kAccessorConstant, holder, constant, receiver_maps);
}
@@ -103,7 +103,7 @@ PropertyAccessInfo::PropertyAccessInfo()
field_type_(Type::None()) {}
PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
- MapList const& receiver_maps)
+ MapHandles const& receiver_maps)
: kind_(kNotFound),
receiver_maps_(receiver_maps),
holder_(holder),
@@ -112,7 +112,7 @@ PropertyAccessInfo::PropertyAccessInfo(MaybeHandle<JSObject> holder,
PropertyAccessInfo::PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
Handle<Object> constant,
- MapList const& receiver_maps)
+ MapHandles const& receiver_maps)
: kind_(kind),
receiver_maps_(receiver_maps),
constant_(constant),
@@ -123,7 +123,8 @@ PropertyAccessInfo::PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
PropertyAccessInfo::PropertyAccessInfo(
Kind kind, MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map,
FieldIndex field_index, MachineRepresentation field_representation,
- Type* field_type, MaybeHandle<Map> field_map, MapList const& receiver_maps)
+ Type* field_type, MaybeHandle<Map> field_map,
+ MapHandles const& receiver_maps)
: kind_(kind),
receiver_maps_(receiver_maps),
transition_map_(transition_map),
@@ -133,7 +134,8 @@ PropertyAccessInfo::PropertyAccessInfo(
field_type_(field_type),
field_map_(field_map) {}
-bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that) {
+bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that,
+ AccessMode access_mode, Zone* zone) {
if (this->kind_ != that->kind_) return false;
if (this->holder_.address() != that->holder_.address()) return false;
@@ -143,14 +145,45 @@ bool PropertyAccessInfo::Merge(PropertyAccessInfo const* that) {
case kDataField:
case kDataConstantField: {
- // Check if we actually access the same field.
- if (this->kind_ == that->kind_ &&
- this->transition_map_.address() == that->transition_map_.address() &&
- this->field_index_ == that->field_index_ &&
- this->field_map_.address() == that->field_map_.address() &&
- this->field_type_->Is(that->field_type_) &&
- that->field_type_->Is(this->field_type_) &&
- this->field_representation_ == that->field_representation_) {
+ // Check if we actually access the same field (we use the
+ // GetFieldAccessStubKey method here just like the ICs do
+ // since that way we only compare the relevant bits of the
+ // field indices).
+ if (this->field_index_.GetFieldAccessStubKey() ==
+ that->field_index_.GetFieldAccessStubKey()) {
+ switch (access_mode) {
+ case AccessMode::kLoad: {
+ if (this->field_representation_ != that->field_representation_) {
+ if (!IsAnyTagged(this->field_representation_) ||
+ !IsAnyTagged(that->field_representation_)) {
+ return false;
+ }
+ this->field_representation_ = MachineRepresentation::kTagged;
+ }
+ if (this->field_map_.address() != that->field_map_.address()) {
+ this->field_map_ = MaybeHandle<Map>();
+ }
+ break;
+ }
+ case AccessMode::kStore:
+ case AccessMode::kStoreInLiteral: {
+ // For stores, the field map and field representation information
+ // must match exactly, otherwise we cannot merge the stores. We
+ // also need to make sure that in case of transitioning stores,
+ // the transition targets match.
+ if (this->field_map_.address() != that->field_map_.address() ||
+ this->field_representation_ != that->field_representation_ ||
+ this->transition_map_.address() !=
+ that->transition_map_.address()) {
+ return false;
+ }
+ break;
+ }
+ }
+ // Merge the field type.
+ this->field_type_ =
+ Type::Union(this->field_type_, that->field_type_, zone);
+ // Merge the receiver maps.
this->receiver_maps_.insert(this->receiver_maps_.end(),
that->receiver_maps_.begin(),
that->receiver_maps_.end());
@@ -199,37 +232,52 @@ bool AccessInfoFactory::ComputeElementAccessInfo(
// Check if it is safe to inline element access for the {map}.
if (!CanInlineElementAccess(map)) return false;
ElementsKind const elements_kind = map->elements_kind();
- *access_info = ElementAccessInfo(MapList{map}, elements_kind);
+ *access_info = ElementAccessInfo(MapHandles{map}, elements_kind);
return true;
}
-
bool AccessInfoFactory::ComputeElementAccessInfos(
- MapHandleList const& maps, AccessMode access_mode,
+ MapHandles const& maps, AccessMode access_mode,
ZoneVector<ElementAccessInfo>* access_infos) {
+ if (access_mode == AccessMode::kLoad) {
+ // For polymorphic loads of similar elements kinds (i.e. all tagged or all
+ // double), always use the "worst case" code without a transition. This is
+ // much faster than transitioning the elements to the worst case, trading a
+ // TransitionElementsKind for a CheckMaps, avoiding mutation of the array.
+ ElementAccessInfo access_info;
+ if (ConsolidateElementLoad(maps, &access_info)) {
+ access_infos->push_back(access_info);
+ return true;
+ }
+ }
+
// Collect possible transition targets.
- MapHandleList possible_transition_targets(maps.length());
+ MapHandles possible_transition_targets;
+ possible_transition_targets.reserve(maps.size());
for (Handle<Map> map : maps) {
if (Map::TryUpdate(map).ToHandle(&map)) {
if (CanInlineElementAccess(map) &&
IsFastElementsKind(map->elements_kind()) &&
GetInitialFastElementsKind() != map->elements_kind()) {
- possible_transition_targets.Add(map);
+ possible_transition_targets.push_back(map);
}
}
}
// Separate the actual receiver maps and the possible transition sources.
- MapHandleList receiver_maps(maps.length());
- MapTransitionList transitions(maps.length());
+ MapHandles receiver_maps;
+ receiver_maps.reserve(maps.size());
+ MapTransitionList transitions(maps.size());
for (Handle<Map> map : maps) {
if (Map::TryUpdate(map).ToHandle(&map)) {
- Map* transition_target =
- map->FindElementsKindTransitionedMap(&possible_transition_targets);
+ // Don't generate elements kind transitions from stable maps.
+ Map* transition_target = map->is_stable()
+ ? nullptr
+ : map->FindElementsKindTransitionedMap(
+ possible_transition_targets);
if (transition_target == nullptr) {
- receiver_maps.Add(map);
+ receiver_maps.push_back(map);
} else {
- DCHECK(!map->is_stable());
transitions.push_back(std::make_pair(map, handle(transition_target)));
}
}
@@ -335,7 +383,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
}
}
*access_info = PropertyAccessInfo::DataField(
- details.constness(), MapList{receiver_map}, field_index,
+ details.constness(), MapHandles{receiver_map}, field_index,
field_representation, field_type, field_map, holder);
return true;
} else {
@@ -349,7 +397,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
if (details.kind() == kData) {
DCHECK(!FLAG_track_constant_fields);
*access_info = PropertyAccessInfo::DataConstant(
- MapList{receiver_map},
+ MapHandles{receiver_map},
handle(descriptors->GetValue(number), isolate()), holder);
return true;
} else {
@@ -380,7 +428,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
}
}
*access_info = PropertyAccessInfo::AccessorConstant(
- MapList{receiver_map}, accessor, holder);
+ MapHandles{receiver_map}, accessor, holder);
return true;
}
}
@@ -423,7 +471,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
// on the language mode of the load operation.
// Implemented according to ES6 section 9.1.8 [[Get]] (P, Receiver)
*access_info =
- PropertyAccessInfo::NotFound(MapList{receiver_map}, holder);
+ PropertyAccessInfo::NotFound(MapHandles{receiver_map}, holder);
return true;
} else {
return false;
@@ -442,7 +490,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfo(
}
bool AccessInfoFactory::ComputePropertyAccessInfos(
- MapHandleList const& maps, Handle<Name> name, AccessMode access_mode,
+ MapHandles const& maps, Handle<Name> name, AccessMode access_mode,
ZoneVector<PropertyAccessInfo>* access_infos) {
for (Handle<Map> map : maps) {
if (Map::TryUpdate(map).ToHandle(&map)) {
@@ -453,7 +501,7 @@ bool AccessInfoFactory::ComputePropertyAccessInfos(
// Try to merge the {access_info} with an existing one.
bool merged = false;
for (PropertyAccessInfo& other_info : *access_infos) {
- if (other_info.Merge(&access_info)) {
+ if (other_info.Merge(&access_info, access_mode, zone())) {
merged = true;
break;
}
@@ -464,6 +512,47 @@ bool AccessInfoFactory::ComputePropertyAccessInfos(
return true;
}
+namespace {
+
+Maybe<ElementsKind> GeneralizeElementsKind(ElementsKind this_kind,
+ ElementsKind that_kind) {
+ if (IsHoleyElementsKind(this_kind)) {
+ that_kind = GetHoleyElementsKind(that_kind);
+ } else if (IsHoleyElementsKind(that_kind)) {
+ this_kind = GetHoleyElementsKind(this_kind);
+ }
+ if (this_kind == that_kind) return Just(this_kind);
+ if (IsFastDoubleElementsKind(that_kind) ==
+ IsFastDoubleElementsKind(this_kind)) {
+ if (IsMoreGeneralElementsKindTransition(that_kind, this_kind)) {
+ return Just(this_kind);
+ }
+ if (IsMoreGeneralElementsKindTransition(this_kind, that_kind)) {
+ return Just(that_kind);
+ }
+ }
+ return Nothing<ElementsKind>();
+}
+
+} // namespace
+
+bool AccessInfoFactory::ConsolidateElementLoad(MapHandles const& maps,
+ ElementAccessInfo* access_info) {
+ if (maps.empty()) return false;
+ InstanceType instance_type = maps.front()->instance_type();
+ ElementsKind elements_kind = maps.front()->elements_kind();
+ for (Handle<Map> map : maps) {
+ if (!CanInlineElementAccess(map) || map->instance_type() != instance_type) {
+ return false;
+ }
+ if (!GeneralizeElementsKind(elements_kind, map->elements_kind())
+ .To(&elements_kind)) {
+ return false;
+ }
+ }
+ *access_info = ElementAccessInfo(maps, elements_kind);
+ return true;
+}
bool AccessInfoFactory::LookupSpecialFieldAccessor(
Handle<Map> map, Handle<Name> name, PropertyAccessInfo* access_info) {
@@ -497,8 +586,9 @@ bool AccessInfoFactory::LookupSpecialFieldAccessor(
}
}
// Special fields are always mutable.
- *access_info = PropertyAccessInfo::DataField(
- kMutable, MapList{map}, field_index, field_representation, field_type);
+ *access_info =
+ PropertyAccessInfo::DataField(kMutable, MapHandles{map}, field_index,
+ field_representation, field_type);
return true;
}
return false;
@@ -556,8 +646,8 @@ bool AccessInfoFactory::LookupTransition(Handle<Map> map, Handle<Name> name,
dependencies()->AssumeMapNotDeprecated(transition_map);
// Transitioning stores are never stores to constant fields.
*access_info = PropertyAccessInfo::DataField(
- kMutable, MapList{map}, field_index, field_representation, field_type,
- field_map, holder, transition_map);
+ kMutable, MapHandles{map}, field_index, field_representation,
+ field_type, field_map, holder, transition_map);
return true;
}
return false;
diff --git a/deps/v8/src/compiler/access-info.h b/deps/v8/src/compiler/access-info.h
index 809aa83e47..7ec8deb8f0 100644
--- a/deps/v8/src/compiler/access-info.h
+++ b/deps/v8/src/compiler/access-info.h
@@ -10,6 +10,7 @@
#include "src/field-index.h"
#include "src/machine-type.h"
#include "src/objects.h"
+#include "src/objects/map.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@@ -31,8 +32,6 @@ enum class AccessMode { kLoad, kStore, kStoreInLiteral };
std::ostream& operator<<(std::ostream&, AccessMode);
-typedef std::vector<Handle<Map>> MapList;
-
// Mapping of transition source to transition target.
typedef std::vector<std::pair<Handle<Map>, Handle<Map>>> MapTransitionList;
@@ -40,16 +39,17 @@ typedef std::vector<std::pair<Handle<Map>, Handle<Map>>> MapTransitionList;
class ElementAccessInfo final {
public:
ElementAccessInfo();
- ElementAccessInfo(MapList const& receiver_maps, ElementsKind elements_kind);
+ ElementAccessInfo(MapHandles const& receiver_maps,
+ ElementsKind elements_kind);
ElementsKind elements_kind() const { return elements_kind_; }
- MapList const& receiver_maps() const { return receiver_maps_; }
+ MapHandles const& receiver_maps() const { return receiver_maps_; }
MapTransitionList& transitions() { return transitions_; }
MapTransitionList const& transitions() const { return transitions_; }
private:
ElementsKind elements_kind_;
- MapList receiver_maps_;
+ MapHandles receiver_maps_;
MapTransitionList transitions_;
};
@@ -66,24 +66,25 @@ class PropertyAccessInfo final {
kAccessorConstant
};
- static PropertyAccessInfo NotFound(MapList const& receiver_maps,
+ static PropertyAccessInfo NotFound(MapHandles const& receiver_maps,
MaybeHandle<JSObject> holder);
- static PropertyAccessInfo DataConstant(MapList const& receiver_maps,
+ static PropertyAccessInfo DataConstant(MapHandles const& receiver_maps,
Handle<Object> constant,
MaybeHandle<JSObject> holder);
static PropertyAccessInfo DataField(
- PropertyConstness constness, MapList const& receiver_maps,
+ PropertyConstness constness, MapHandles const& receiver_maps,
FieldIndex field_index, MachineRepresentation field_representation,
Type* field_type, MaybeHandle<Map> field_map = MaybeHandle<Map>(),
MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
MaybeHandle<Map> transition_map = MaybeHandle<Map>());
- static PropertyAccessInfo AccessorConstant(MapList const& receiver_maps,
+ static PropertyAccessInfo AccessorConstant(MapHandles const& receiver_maps,
Handle<Object> constant,
MaybeHandle<JSObject> holder);
PropertyAccessInfo();
- bool Merge(PropertyAccessInfo const* that) WARN_UNUSED_RESULT;
+ bool Merge(PropertyAccessInfo const* that, AccessMode access_mode,
+ Zone* zone) WARN_UNUSED_RESULT;
bool IsNotFound() const { return kind() == kNotFound; }
bool IsDataConstant() const { return kind() == kDataConstant; }
@@ -105,21 +106,21 @@ class PropertyAccessInfo final {
return field_representation_;
}
MaybeHandle<Map> field_map() const { return field_map_; }
- MapList const& receiver_maps() const { return receiver_maps_; }
+ MapHandles const& receiver_maps() const { return receiver_maps_; }
private:
PropertyAccessInfo(MaybeHandle<JSObject> holder,
- MapList const& receiver_maps);
+ MapHandles const& receiver_maps);
PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
- Handle<Object> constant, MapList const& receiver_maps);
+ Handle<Object> constant, MapHandles const& receiver_maps);
PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
MaybeHandle<Map> transition_map, FieldIndex field_index,
MachineRepresentation field_representation,
Type* field_type, MaybeHandle<Map> field_map,
- MapList const& receiver_maps);
+ MapHandles const& receiver_maps);
Kind kind_;
- MapList receiver_maps_;
+ MapHandles receiver_maps_;
Handle<Object> constant_;
MaybeHandle<Map> transition_map_;
MaybeHandle<JSObject> holder_;
@@ -138,17 +139,18 @@ class AccessInfoFactory final {
bool ComputeElementAccessInfo(Handle<Map> map, AccessMode access_mode,
ElementAccessInfo* access_info);
- bool ComputeElementAccessInfos(MapHandleList const& maps,
- AccessMode access_mode,
+ bool ComputeElementAccessInfos(MapHandles const& maps, AccessMode access_mode,
ZoneVector<ElementAccessInfo>* access_infos);
bool ComputePropertyAccessInfo(Handle<Map> map, Handle<Name> name,
AccessMode access_mode,
PropertyAccessInfo* access_info);
- bool ComputePropertyAccessInfos(MapHandleList const& maps, Handle<Name> name,
+ bool ComputePropertyAccessInfos(MapHandles const& maps, Handle<Name> name,
AccessMode access_mode,
ZoneVector<PropertyAccessInfo>* access_infos);
private:
+ bool ConsolidateElementLoad(MapHandles const& maps,
+ ElementAccessInfo* access_info);
bool LookupSpecialFieldAccessor(Handle<Map> map, Handle<Name> name,
PropertyAccessInfo* access_info);
bool LookupTransition(Handle<Map> map, Handle<Name> name,
diff --git a/deps/v8/src/compiler/arm/code-generator-arm.cc b/deps/v8/src/compiler/arm/code-generator-arm.cc
index f2b7912ec5..953b6a15ea 100644
--- a/deps/v8/src/compiler/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/arm/code-generator-arm.cc
@@ -347,6 +347,14 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
return kNoCondition;
}
+int GetVtblTableSize(const Simd128Register& src0, const Simd128Register& src1) {
+ // If unary shuffle, table is src0 (2 d-registers).
+ if (src0.is(src1)) return 2;
+ // Binary shuffle, table is src0, src1. They must be consecutive
+ DCHECK_EQ(src0.code() + 1, src1.code());
+ return 4; // 4 d-registers.
+}
+
} // namespace
#define ASSEMBLE_CHECKED_LOAD_FP(Type) \
@@ -496,6 +504,41 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
DCHECK_EQ(LeaveCC, i.OutputSBit()); \
} while (0)
+#define ASSEMBLE_NEON_NARROWING_OP(dt) \
+ do { \
+ Simd128Register dst = i.OutputSimd128Register(), \
+ src0 = i.InputSimd128Register(0), \
+ src1 = i.InputSimd128Register(1); \
+ if (dst.is(src0) && dst.is(src1)) { \
+ __ vqmovn(dt, dst.low(), src0); \
+ __ vmov(dst.high(), dst.low()); \
+ } else if (dst.is(src0)) { \
+ __ vqmovn(dt, dst.low(), src0); \
+ __ vqmovn(dt, dst.high(), src1); \
+ } else { \
+ __ vqmovn(dt, dst.high(), src1); \
+ __ vqmovn(dt, dst.low(), src0); \
+ } \
+ } while (0)
+
+#define ASSEMBLE_NEON_PAIRWISE_OP(op, size) \
+ do { \
+ Simd128Register dst = i.OutputSimd128Register(), \
+ src0 = i.InputSimd128Register(0), \
+ src1 = i.InputSimd128Register(1); \
+ if (dst.is(src0)) { \
+ __ op(size, dst.low(), src0.low(), src0.high()); \
+ if (dst.is(src1)) { \
+ __ vmov(dst.high(), dst.low()); \
+ } else { \
+ __ op(size, dst.high(), src1.low(), src1.high()); \
+ } \
+ } else { \
+ __ op(size, dst.high(), src1.low(), src1.high()); \
+ __ op(size, dst.low(), src0.low(), src0.high()); \
+ } \
+ } while (0)
+
void CodeGenerator::AssembleDeconstructFrame() {
__ LeaveFrame(StackFrame::MANUAL);
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
@@ -503,9 +546,6 @@ void CodeGenerator::AssembleDeconstructFrame() {
void CodeGenerator::AssemblePrepareTailCall() {
if (frame_access_state()->has_frame()) {
- if (FLAG_enable_embedded_constant_pool) {
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
- }
__ ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
__ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
}
@@ -1572,17 +1612,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmF32x4Splat: {
- __ vdup(i.OutputSimd128Register(), i.InputFloatRegister(0));
+ int src_code = i.InputFloatRegister(0).code();
+ __ vdup(Neon32, i.OutputSimd128Register(),
+ DwVfpRegister::from_code(src_code / 2), src_code & 0x1);
break;
}
case kArmF32x4ExtractLane: {
__ ExtractLane(i.OutputFloatRegister(), i.InputSimd128Register(0),
- kScratchReg, i.InputInt8(1));
+ i.InputInt8(1));
break;
}
case kArmF32x4ReplaceLane: {
__ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputFloatRegister(2), kScratchReg, i.InputInt8(1));
+ i.InputFloatRegister(2), i.InputInt8(1));
break;
}
case kArmF32x4SConvertI32x4: {
@@ -1614,6 +1656,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kArmF32x4AddHoriz: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // Make sure we don't overwrite source data before it's used.
+ if (dst.is(src0)) {
+ __ vpadd(dst.low(), src0.low(), src0.high());
+ if (dst.is(src1)) {
+ __ vmov(dst.high(), dst.low());
+ } else {
+ __ vpadd(dst.high(), src1.low(), src1.high());
+ }
+ } else {
+ __ vpadd(dst.high(), src1.low(), src1.high());
+ __ vpadd(dst.low(), src0.low(), src0.high());
+ }
+ break;
+ }
case kArmF32x4Sub: {
__ vsub(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -1634,16 +1694,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kArmF32x4RecipRefine: {
- __ vrecps(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- break;
- }
- case kArmF32x4RecipSqrtRefine: {
- __ vrsqrts(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
- break;
- }
case kArmF32x4Eq: {
__ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -1712,6 +1762,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kArmI32x4AddHoriz:
+ ASSEMBLE_NEON_PAIRWISE_OP(vpadd, Neon32);
+ break;
case kArmI32x4Sub: {
__ vsub(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -1831,25 +1884,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputInt4(1));
break;
}
- case kArmI16x8SConvertI32x4: {
- Simd128Register dst = i.OutputSimd128Register(),
- src0 = i.InputSimd128Register(0),
- src1 = i.InputSimd128Register(1);
- // Take care not to overwrite a source register before it's used.
- if (dst.is(src0) && dst.is(src1)) {
- __ vqmovn(NeonS16, dst.low(), src0);
- __ vmov(dst.high(), dst.low());
- } else if (dst.is(src0)) {
- // dst is src0, so narrow src0 first.
- __ vqmovn(NeonS16, dst.low(), src0);
- __ vqmovn(NeonS16, dst.high(), src1);
- } else {
- // dst may alias src1, so narrow src1 first.
- __ vqmovn(NeonS16, dst.high(), src1);
- __ vqmovn(NeonS16, dst.low(), src0);
- }
+ case kArmI16x8SConvertI32x4:
+ ASSEMBLE_NEON_NARROWING_OP(NeonS16);
break;
- }
case kArmI16x8Add: {
__ vadd(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -1860,6 +1897,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
+ case kArmI16x8AddHoriz:
+ ASSEMBLE_NEON_PAIRWISE_OP(vpadd, Neon16);
+ break;
case kArmI16x8Sub: {
__ vsub(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -1922,25 +1962,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputInt4(1));
break;
}
- case kArmI16x8UConvertI32x4: {
- Simd128Register dst = i.OutputSimd128Register(),
- src0 = i.InputSimd128Register(0),
- src1 = i.InputSimd128Register(1);
- // Take care not to overwrite a source register before it's used.
- if (dst.is(src0) && dst.is(src1)) {
- __ vqmovn(NeonU16, dst.low(), src0);
- __ vmov(dst.high(), dst.low());
- } else if (dst.is(src0)) {
- // dst is src0, so narrow src0 first.
- __ vqmovn(NeonU16, dst.low(), src0);
- __ vqmovn(NeonU16, dst.high(), src1);
- } else {
- // dst may alias src1, so narrow src1 first.
- __ vqmovn(NeonU16, dst.high(), src1);
- __ vqmovn(NeonU16, dst.low(), src0);
- }
+ case kArmI16x8UConvertI32x4:
+ ASSEMBLE_NEON_NARROWING_OP(NeonU16);
break;
- }
case kArmI16x8AddSaturateU: {
__ vqadd(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -1999,25 +2023,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputInt3(1));
break;
}
- case kArmI8x16SConvertI16x8: {
- Simd128Register dst = i.OutputSimd128Register(),
- src0 = i.InputSimd128Register(0),
- src1 = i.InputSimd128Register(1);
- // Take care not to overwrite a source register before it's used.
- if (dst.is(src0) && dst.is(src1)) {
- __ vqmovn(NeonS8, dst.low(), src0);
- __ vmov(dst.high(), dst.low());
- } else if (dst.is(src0)) {
- // dst is src0, so narrow src0 first.
- __ vqmovn(NeonS8, dst.low(), src0);
- __ vqmovn(NeonS8, dst.high(), src1);
- } else {
- // dst may alias src1, so narrow src1 first.
- __ vqmovn(NeonS8, dst.high(), src1);
- __ vqmovn(NeonS8, dst.low(), src0);
- }
+ case kArmI8x16SConvertI16x8:
+ ASSEMBLE_NEON_NARROWING_OP(NeonS8);
break;
- }
case kArmI8x16Add: {
__ vadd(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2079,25 +2087,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputInt3(1));
break;
}
- case kArmI8x16UConvertI16x8: {
- Simd128Register dst = i.OutputSimd128Register(),
- src0 = i.InputSimd128Register(0),
- src1 = i.InputSimd128Register(1);
- // Take care not to overwrite a source register before it's used.
- if (dst.is(src0) && dst.is(src1)) {
- __ vqmovn(NeonU8, dst.low(), src0);
- __ vmov(dst.high(), dst.low());
- } else if (dst.is(src0)) {
- // dst is src0, so narrow src0 first.
- __ vqmovn(NeonU8, dst.low(), src0);
- __ vqmovn(NeonU8, dst.high(), src1);
- } else {
- // dst may alias src1, so narrow src1 first.
- __ vqmovn(NeonU8, dst.high(), src1);
- __ vqmovn(NeonU8, dst.low(), src0);
- }
+ case kArmI8x16UConvertI16x8:
+ ASSEMBLE_NEON_NARROWING_OP(NeonU8);
break;
- }
case kArmI8x16AddSaturateU: {
__ vqadd(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -2159,6 +2151,286 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(2));
break;
}
+ case kArmS32x4ZipLeft: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
+ __ vmov(dst.high(), src1.low()); // dst = [0, 1, 4, 5]
+ __ vtrn(Neon32, dst.low(), dst.high()); // dst = [0, 4, 1, 5]
+ break;
+ }
+ case kArmS32x4ZipRight: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from ZipLeft).
+ __ vmov(dst.low(), src1.high()); // dst = [2, 3, 6, 7]
+ __ vtrn(Neon32, dst.low(), dst.high()); // dst = [2, 6, 3, 7]
+ break;
+ }
+ case kArmS32x4UnzipLeft: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
+ __ vmov(kScratchQuadReg, src1);
+ __ vuzp(Neon32, dst, kScratchQuadReg); // dst = [0, 2, 4, 6]
+ break;
+ }
+ case kArmS32x4UnzipRight: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from UnzipLeft).
+ __ vmov(kScratchQuadReg, src1);
+ __ vuzp(Neon32, kScratchQuadReg, dst); // dst = [1, 3, 5, 7]
+ break;
+ }
+ case kArmS32x4TransposeLeft: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
+ __ vmov(kScratchQuadReg, src1);
+ __ vtrn(Neon32, dst, kScratchQuadReg); // dst = [0, 4, 2, 6]
+ break;
+ }
+ case kArmS32x4Shuffle: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ // Check for in-place shuffles.
+ // If dst == src0 == src1, then the shuffle is unary and we only use src0.
+ if (dst.is(src0)) {
+ __ vmov(kScratchQuadReg, src0);
+ src0 = kScratchQuadReg;
+ } else if (dst.is(src1)) {
+ __ vmov(kScratchQuadReg, src1);
+ src1 = kScratchQuadReg;
+ }
+ // Perform shuffle as a vmov per lane.
+ int dst_code = dst.code() * 4;
+ int src0_code = src0.code() * 4;
+ int src1_code = src1.code() * 4;
+ int32_t shuffle = i.InputInt32(2);
+ for (int i = 0; i < 4; i++) {
+ int lane = shuffle & 0x7;
+ int src_code = src0_code;
+ if (lane >= 4) {
+ src_code = src1_code;
+ lane &= 0x3;
+ }
+ __ VmovExtended(dst_code + i, src_code + lane);
+ shuffle >>= 8;
+ }
+ break;
+ }
+ case kArmS32x4TransposeRight: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from TransposeLeft).
+ __ vmov(kScratchQuadReg, src1);
+ __ vtrn(Neon32, kScratchQuadReg, dst); // dst = [1, 5, 3, 7]
+ break;
+ }
+ case kArmS16x8ZipLeft: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ // src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ __ vmov(dst.high(), src1.low()); // dst = [0, 1, 2, 3, 8, ... 11]
+ __ vzip(Neon16, dst.low(), dst.high()); // dst = [0, 8, 1, 9, ... 11]
+ break;
+ }
+ case kArmS16x8ZipRight: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
+ __ vmov(dst.low(), src1.high());
+ __ vzip(Neon16, dst.low(), dst.high()); // dst = [4, 12, 5, 13, ... 15]
+ break;
+ }
+ case kArmS16x8UnzipLeft: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
+ __ vmov(kScratchQuadReg, src1);
+ __ vuzp(Neon16, dst, kScratchQuadReg); // dst = [0, 2, 4, 6, ... 14]
+ break;
+ }
+ case kArmS16x8UnzipRight: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
+ __ vmov(kScratchQuadReg, src1);
+ __ vuzp(Neon16, kScratchQuadReg, dst); // dst = [1, 3, 5, 7, ... 15]
+ break;
+ }
+ case kArmS16x8TransposeLeft: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
+ __ vmov(kScratchQuadReg, src1);
+ __ vtrn(Neon16, dst, kScratchQuadReg); // dst = [0, 8, 2, 10, ... 14]
+ break;
+ }
+ case kArmS16x8TransposeRight: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
+ __ vmov(kScratchQuadReg, src1);
+ __ vtrn(Neon16, kScratchQuadReg, dst); // dst = [1, 9, 3, 11, ... 15]
+ break;
+ }
+ case kArmS16x8Shuffle: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ DwVfpRegister table_base = src0.low();
+ int table_size = GetVtblTableSize(src0, src1);
+ // Convert the shuffle lane masks to byte masks in kScratchQuadReg.
+ int scratch_s_base = kScratchQuadReg.code() * 4;
+ for (int j = 0; j < 2; j++) {
+ int32_t four_lanes = i.InputInt32(2 + j);
+ for (int k = 0; k < 2; k++) {
+ uint8_t w0 = (four_lanes & 0xF) * kShortSize;
+ four_lanes >>= 8;
+ uint8_t w1 = (four_lanes & 0xF) * kShortSize;
+ four_lanes >>= 8;
+ int32_t mask = w0 | ((w0 + 1) << 8) | (w1 << 16) | ((w1 + 1) << 24);
+ // Ensure byte indices are in [0, 31] so masks are never NaNs.
+ four_lanes &= 0x1F1F1F1F;
+ __ vmov(SwVfpRegister::from_code(scratch_s_base + 2 * j + k),
+ bit_cast<float>(mask));
+ }
+ }
+ NeonListOperand table(table_base, table_size);
+ if (!dst.is(src0) && !dst.is(src1)) {
+ __ vtbl(dst.low(), table, kScratchQuadReg.low());
+ __ vtbl(dst.high(), table, kScratchQuadReg.high());
+ } else {
+ __ vtbl(kScratchQuadReg.low(), table, kScratchQuadReg.low());
+ __ vtbl(kScratchQuadReg.high(), table, kScratchQuadReg.high());
+ __ vmov(dst, kScratchQuadReg);
+ }
+ break;
+ }
+ case kArmS8x16ZipLeft: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
+ __ vmov(dst.high(), src1.low());
+ __ vzip(Neon8, dst.low(), dst.high()); // dst = [0, 16, 1, 17, ... 23]
+ break;
+ }
+ case kArmS8x16ZipRight: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
+ __ vmov(dst.low(), src1.high());
+ __ vzip(Neon8, dst.low(), dst.high()); // dst = [8, 24, 9, 25, ... 31]
+ break;
+ }
+ case kArmS8x16UnzipLeft: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
+ __ vmov(kScratchQuadReg, src1);
+ __ vuzp(Neon8, dst, kScratchQuadReg); // dst = [0, 2, 4, 6, ... 30]
+ break;
+ }
+ case kArmS8x16UnzipRight: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
+ __ vmov(kScratchQuadReg, src1);
+ __ vuzp(Neon8, kScratchQuadReg, dst); // dst = [1, 3, 5, 7, ... 31]
+ break;
+ }
+ case kArmS8x16TransposeLeft: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
+ __ vmov(kScratchQuadReg, src1);
+ __ vtrn(Neon8, dst, kScratchQuadReg); // dst = [0, 16, 2, 18, ... 30]
+ break;
+ }
+ case kArmS8x16TransposeRight: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src1 = i.InputSimd128Register(1);
+ DCHECK(dst.is(i.InputSimd128Register(0)));
+ // src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
+ __ vmov(kScratchQuadReg, src1);
+ __ vtrn(Neon8, kScratchQuadReg, dst); // dst = [1, 17, 3, 19, ... 31]
+ break;
+ }
+ case kArmS8x16Concat: {
+ __ vext(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputInt4(2));
+ break;
+ }
+ case kArmS8x16Shuffle: {
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+ DwVfpRegister table_base = src0.low();
+ int table_size = GetVtblTableSize(src0, src1);
+ // The shuffle lane mask is a byte mask, materialize in kScratchQuadReg.
+ int scratch_s_base = kScratchQuadReg.code() * 4;
+ for (int j = 0; j < 4; j++) {
+ int32_t four_lanes = i.InputInt32(2 + j);
+ // Ensure byte indices are in [0, 31] so masks are never NaNs.
+ four_lanes &= 0x1F1F1F1F;
+ __ vmov(SwVfpRegister::from_code(scratch_s_base + j),
+ bit_cast<float>(four_lanes));
+ }
+ NeonListOperand table(table_base, table_size);
+ if (!dst.is(src0) && !dst.is(src1)) {
+ __ vtbl(dst.low(), table, kScratchQuadReg.low());
+ __ vtbl(dst.high(), table, kScratchQuadReg.high());
+ } else {
+ __ vtbl(kScratchQuadReg.low(), table, kScratchQuadReg.low());
+ __ vtbl(kScratchQuadReg.high(), table, kScratchQuadReg.high());
+ __ vmov(dst, kScratchQuadReg);
+ }
+ break;
+ }
+ case kArmS32x2Reverse: {
+ __ vrev64(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmS16x4Reverse: {
+ __ vrev64(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmS16x2Reverse: {
+ __ vrev32(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmS8x8Reverse: {
+ __ vrev64(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmS8x4Reverse: {
+ __ vrev32(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kArmS8x2Reverse: {
+ __ vrev16(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
case kArmS1x4AnyTrue: {
const QwNeonRegister& src = i.InputSimd128Register(0);
__ vpmax(NeonU32, kScratchDoubleReg, src.low(), src.high());
@@ -2508,9 +2780,7 @@ void CodeGenerator::FinishFrame(Frame* frame) {
frame->AllocateSavedCalleeRegisterSlots((last - first + 1) *
(kDoubleSize / kPointerSize));
}
- const RegList saves = FLAG_enable_embedded_constant_pool
- ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
- : descriptor->CalleeSavedRegisters();
+ const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
// Save callee-saved registers.
frame->AllocateSavedCalleeRegisterSlots(
@@ -2522,14 +2792,8 @@ void CodeGenerator::AssembleConstructFrame() {
CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
if (frame_access_state()->has_frame()) {
if (descriptor->IsCFunctionCall()) {
- if (FLAG_enable_embedded_constant_pool) {
- __ Push(lr, fp, pp);
- // Adjust FP to point to saved FP.
- __ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
- } else {
- __ Push(lr, fp);
- __ mov(fp, sp);
- }
+ __ Push(lr, fp);
+ __ mov(fp, sp);
} else if (descriptor->IsJSFunctionCall()) {
__ Prologue(this->info()->GeneratePreagedPrologue());
if (descriptor->PushArgumentCount()) {
@@ -2615,9 +2879,7 @@ void CodeGenerator::AssembleConstructFrame() {
__ vstm(db_w, sp, DwVfpRegister::from_code(first),
DwVfpRegister::from_code(last));
}
- const RegList saves = FLAG_enable_embedded_constant_pool
- ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
- : descriptor->CalleeSavedRegisters();
+ const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
// Save callee-saved registers.
__ stm(db_w, sp, saves);
@@ -2629,9 +2891,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
int pop_count = static_cast<int>(descriptor->StackParameterCount());
// Restore registers.
- const RegList saves = FLAG_enable_embedded_constant_pool
- ? (descriptor->CalleeSavedRegisters() & ~pp.bit())
- : descriptor->CalleeSavedRegisters();
+ const RegList saves = descriptor->CalleeSavedRegisters();
if (saves != 0) {
__ ldm(ia_w, sp, saves);
}
@@ -2780,10 +3040,10 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
int src_code = LocationOperand::cast(source)->register_code();
if (destination->IsFloatRegister()) {
int dst_code = LocationOperand::cast(destination)->register_code();
- __ VmovExtended(dst_code, src_code, kScratchReg);
+ __ VmovExtended(dst_code, src_code);
} else {
DCHECK(destination->IsFloatStackSlot());
- __ VmovExtended(g.ToMemOperand(destination), src_code, kScratchReg);
+ __ VmovExtended(g.ToMemOperand(destination), src_code);
}
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
@@ -2810,7 +3070,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
// GapResolver may give us reg codes that don't map to actual
// s-registers. Generate code to work around those cases.
int dst_code = LocationOperand::cast(destination)->register_code();
- __ VmovExtended(dst_code, src, kScratchReg);
+ __ VmovExtended(dst_code, src);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
QwNeonRegister dst = g.ToSimd128Register(destination);
@@ -2837,7 +3097,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ add(kScratchReg, dst.rn(), Operand(dst.offset()));
__ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
NeonMemOperand(kScratchReg));
- __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
}
}
} else {
@@ -2895,14 +3154,14 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
int src_code = LocationOperand::cast(source)->register_code();
if (destination->IsFPRegister()) {
int dst_code = LocationOperand::cast(destination)->register_code();
- __ VmovExtended(temp.low().code(), src_code, kScratchReg);
- __ VmovExtended(src_code, dst_code, kScratchReg);
- __ VmovExtended(dst_code, temp.low().code(), kScratchReg);
+ __ VmovExtended(temp.low().code(), src_code);
+ __ VmovExtended(src_code, dst_code);
+ __ VmovExtended(dst_code, temp.low().code());
} else {
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
- __ VmovExtended(temp.low().code(), src_code, kScratchReg);
- __ VmovExtended(src_code, dst, kScratchReg);
+ __ VmovExtended(temp.low().code(), src_code);
+ __ VmovExtended(src_code, dst);
__ vstr(temp.low(), dst);
}
} else {
@@ -2920,40 +3179,41 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
NeonMemOperand(kScratchReg));
__ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
NeonMemOperand(kScratchReg));
- __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
}
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPStackSlot());
- MemOperand src = g.ToMemOperand(source);
- MemOperand dst = g.ToMemOperand(destination);
+ Register temp_0 = kScratchReg;
+ LowDwVfpRegister temp_1 = kScratchDoubleReg;
+ MemOperand src0 = g.ToMemOperand(source);
+ MemOperand dst0 = g.ToMemOperand(destination);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) {
- __ vldr(kScratchDoubleReg, dst);
- __ vldr(kDoubleRegZero, src);
- __ vstr(kScratchDoubleReg, src);
- __ vstr(kDoubleRegZero, dst);
- // Restore the 0 register.
- __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
+ MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
+ MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
+ __ vldr(temp_1, dst0); // Save destination in temp_1.
+ __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ str(temp_0, dst0);
+ __ ldr(temp_0, src1);
+ __ str(temp_0, dst1);
+ __ vstr(temp_1, src0);
} else if (rep == MachineRepresentation::kFloat32) {
- __ vldr(kScratchDoubleReg.low(), dst);
- __ vldr(kScratchDoubleReg.high(), src);
- __ vstr(kScratchDoubleReg.low(), src);
- __ vstr(kScratchDoubleReg.high(), dst);
+ __ vldr(temp_1.low(), dst0); // Save destination in temp_1.
+ __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ str(temp_0, dst0);
+ __ vstr(temp_1.low(), src0);
} else {
DCHECK_EQ(MachineRepresentation::kSimd128, rep);
- __ vldr(kScratchDoubleReg, dst);
- __ vldr(kDoubleRegZero, src);
- __ vstr(kScratchDoubleReg, src);
- __ vstr(kDoubleRegZero, dst);
- src.set_offset(src.offset() + kDoubleSize);
- dst.set_offset(dst.offset() + kDoubleSize);
- __ vldr(kScratchDoubleReg, dst);
- __ vldr(kDoubleRegZero, src);
- __ vstr(kScratchDoubleReg, src);
- __ vstr(kDoubleRegZero, dst);
- // Restore the 0 register.
- __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
+ MemOperand src1(src0.rn(), src0.offset() + kDoubleSize);
+ MemOperand dst1(dst0.rn(), dst0.offset() + kDoubleSize);
+ __ vldr(kScratchQuadReg.low(), dst0);
+ __ vldr(kScratchQuadReg.high(), src0);
+ __ vstr(kScratchQuadReg.low(), src0);
+ __ vstr(kScratchQuadReg.high(), dst0);
+ __ vldr(kScratchQuadReg.low(), dst1);
+ __ vldr(kScratchQuadReg.high(), src1);
+ __ vstr(kScratchQuadReg.low(), src1);
+ __ vstr(kScratchQuadReg.high(), dst1);
}
} else {
// No other combinations are possible.
diff --git a/deps/v8/src/compiler/arm/instruction-codes-arm.h b/deps/v8/src/compiler/arm/instruction-codes-arm.h
index e709a23f5c..db3e515c40 100644
--- a/deps/v8/src/compiler/arm/instruction-codes-arm.h
+++ b/deps/v8/src/compiler/arm/instruction-codes-arm.h
@@ -134,12 +134,11 @@ namespace compiler {
V(ArmF32x4RecipApprox) \
V(ArmF32x4RecipSqrtApprox) \
V(ArmF32x4Add) \
+ V(ArmF32x4AddHoriz) \
V(ArmF32x4Sub) \
V(ArmF32x4Mul) \
V(ArmF32x4Min) \
V(ArmF32x4Max) \
- V(ArmF32x4RecipRefine) \
- V(ArmF32x4RecipSqrtRefine) \
V(ArmF32x4Eq) \
V(ArmF32x4Ne) \
V(ArmF32x4Lt) \
@@ -154,6 +153,7 @@ namespace compiler {
V(ArmI32x4Shl) \
V(ArmI32x4ShrS) \
V(ArmI32x4Add) \
+ V(ArmI32x4AddHoriz) \
V(ArmI32x4Sub) \
V(ArmI32x4Mul) \
V(ArmI32x4MinS) \
@@ -181,6 +181,7 @@ namespace compiler {
V(ArmI16x8SConvertI32x4) \
V(ArmI16x8Add) \
V(ArmI16x8AddSaturateS) \
+ V(ArmI16x8AddHoriz) \
V(ArmI16x8Sub) \
V(ArmI16x8SubSaturateS) \
V(ArmI16x8Mul) \
@@ -232,6 +233,34 @@ namespace compiler {
V(ArmS128Xor) \
V(ArmS128Not) \
V(ArmS128Select) \
+ V(ArmS32x4ZipLeft) \
+ V(ArmS32x4ZipRight) \
+ V(ArmS32x4UnzipLeft) \
+ V(ArmS32x4UnzipRight) \
+ V(ArmS32x4TransposeLeft) \
+ V(ArmS32x4TransposeRight) \
+ V(ArmS32x4Shuffle) \
+ V(ArmS16x8ZipLeft) \
+ V(ArmS16x8ZipRight) \
+ V(ArmS16x8UnzipLeft) \
+ V(ArmS16x8UnzipRight) \
+ V(ArmS16x8TransposeLeft) \
+ V(ArmS16x8TransposeRight) \
+ V(ArmS16x8Shuffle) \
+ V(ArmS8x16ZipLeft) \
+ V(ArmS8x16ZipRight) \
+ V(ArmS8x16UnzipLeft) \
+ V(ArmS8x16UnzipRight) \
+ V(ArmS8x16TransposeLeft) \
+ V(ArmS8x16TransposeRight) \
+ V(ArmS8x16Concat) \
+ V(ArmS8x16Shuffle) \
+ V(ArmS32x2Reverse) \
+ V(ArmS16x4Reverse) \
+ V(ArmS16x2Reverse) \
+ V(ArmS8x8Reverse) \
+ V(ArmS8x4Reverse) \
+ V(ArmS8x2Reverse) \
V(ArmS1x4AnyTrue) \
V(ArmS1x4AllTrue) \
V(ArmS1x8AnyTrue) \
diff --git a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
index e6f3464bb5..549752d09e 100644
--- a/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-scheduler-arm.cc
@@ -118,12 +118,11 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmF32x4RecipApprox:
case kArmF32x4RecipSqrtApprox:
case kArmF32x4Add:
+ case kArmF32x4AddHoriz:
case kArmF32x4Sub:
case kArmF32x4Mul:
case kArmF32x4Min:
case kArmF32x4Max:
- case kArmF32x4RecipRefine:
- case kArmF32x4RecipSqrtRefine:
case kArmF32x4Eq:
case kArmF32x4Ne:
case kArmF32x4Lt:
@@ -138,6 +137,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI32x4Shl:
case kArmI32x4ShrS:
case kArmI32x4Add:
+ case kArmI32x4AddHoriz:
case kArmI32x4Sub:
case kArmI32x4Mul:
case kArmI32x4MinS:
@@ -165,6 +165,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmI16x8SConvertI32x4:
case kArmI16x8Add:
case kArmI16x8AddSaturateS:
+ case kArmI16x8AddHoriz:
case kArmI16x8Sub:
case kArmI16x8SubSaturateS:
case kArmI16x8Mul:
@@ -216,6 +217,34 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArmS128Xor:
case kArmS128Not:
case kArmS128Select:
+ case kArmS32x4ZipLeft:
+ case kArmS32x4ZipRight:
+ case kArmS32x4UnzipLeft:
+ case kArmS32x4UnzipRight:
+ case kArmS32x4TransposeLeft:
+ case kArmS32x4TransposeRight:
+ case kArmS32x4Shuffle:
+ case kArmS16x8ZipLeft:
+ case kArmS16x8ZipRight:
+ case kArmS16x8UnzipLeft:
+ case kArmS16x8UnzipRight:
+ case kArmS16x8TransposeLeft:
+ case kArmS16x8TransposeRight:
+ case kArmS16x8Shuffle:
+ case kArmS8x16ZipLeft:
+ case kArmS8x16ZipRight:
+ case kArmS8x16UnzipLeft:
+ case kArmS8x16UnzipRight:
+ case kArmS8x16TransposeLeft:
+ case kArmS8x16TransposeRight:
+ case kArmS8x16Concat:
+ case kArmS8x16Shuffle:
+ case kArmS32x2Reverse:
+ case kArmS16x4Reverse:
+ case kArmS16x2Reverse:
+ case kArmS8x8Reverse:
+ case kArmS8x4Reverse:
+ case kArmS8x2Reverse:
case kArmS1x4AnyTrue:
case kArmS1x4AllTrue:
case kArmS1x8AnyTrue:
diff --git a/deps/v8/src/compiler/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
index d69a82c608..8983c9b115 100644
--- a/deps/v8/src/compiler/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/arm/instruction-selector-arm.cc
@@ -91,6 +91,27 @@ void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
g.UseRegister(node->InputAt(1)));
}
+void VisitRRRShuffle(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ ArmOperandGenerator g(selector);
+ // Swap inputs to save an instruction in the CodeGenerator for High ops.
+ if (opcode == kArmS32x4ZipRight || opcode == kArmS32x4UnzipRight ||
+ opcode == kArmS32x4TransposeRight || opcode == kArmS16x8ZipRight ||
+ opcode == kArmS16x8UnzipRight || opcode == kArmS16x8TransposeRight ||
+ opcode == kArmS8x16ZipRight || opcode == kArmS8x16UnzipRight ||
+ opcode == kArmS8x16TransposeRight) {
+ Node* in0 = node->InputAt(0);
+ Node* in1 = node->InputAt(1);
+ node->ReplaceInput(0, in1);
+ node->ReplaceInput(1, in0);
+ }
+ // Use DefineSameAsFirst for binary ops that clobber their inputs, e.g. the
+ // NEON vzip, vuzp, and vtrn instructions.
+ selector->Emit(opcode, g.DefineSameAsFirst(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
ArmOperandGenerator g(selector);
// Use DefineSameAsFirst for ternary ops that clobber their first input,
@@ -398,6 +419,14 @@ void EmitStore(InstructionSelector* selector, InstructionCode opcode,
} // namespace
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int slot = frame_->AllocateSpillSlot(rep.size());
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -2414,80 +2443,81 @@ VISIT_ATOMIC_BINOP(Xor)
V(I8x16ShrS) \
V(I8x16ShrU)
-#define SIMD_BINOP_LIST(V) \
- V(F32x4Add, kArmF32x4Add) \
- V(F32x4Sub, kArmF32x4Sub) \
- V(F32x4Mul, kArmF32x4Mul) \
- V(F32x4Min, kArmF32x4Min) \
- V(F32x4Max, kArmF32x4Max) \
- V(F32x4RecipRefine, kArmF32x4RecipRefine) \
- V(F32x4RecipSqrtRefine, kArmF32x4RecipSqrtRefine) \
- V(F32x4Eq, kArmF32x4Eq) \
- V(F32x4Ne, kArmF32x4Ne) \
- V(F32x4Lt, kArmF32x4Lt) \
- V(F32x4Le, kArmF32x4Le) \
- V(I32x4Add, kArmI32x4Add) \
- V(I32x4Sub, kArmI32x4Sub) \
- V(I32x4Mul, kArmI32x4Mul) \
- V(I32x4MinS, kArmI32x4MinS) \
- V(I32x4MaxS, kArmI32x4MaxS) \
- V(I32x4Eq, kArmI32x4Eq) \
- V(I32x4Ne, kArmI32x4Ne) \
- V(I32x4LtS, kArmI32x4LtS) \
- V(I32x4LeS, kArmI32x4LeS) \
- V(I32x4MinU, kArmI32x4MinU) \
- V(I32x4MaxU, kArmI32x4MaxU) \
- V(I32x4LtU, kArmI32x4LtU) \
- V(I32x4LeU, kArmI32x4LeU) \
- V(I16x8SConvertI32x4, kArmI16x8SConvertI32x4) \
- V(I16x8Add, kArmI16x8Add) \
- V(I16x8AddSaturateS, kArmI16x8AddSaturateS) \
- V(I16x8Sub, kArmI16x8Sub) \
- V(I16x8SubSaturateS, kArmI16x8SubSaturateS) \
- V(I16x8Mul, kArmI16x8Mul) \
- V(I16x8MinS, kArmI16x8MinS) \
- V(I16x8MaxS, kArmI16x8MaxS) \
- V(I16x8Eq, kArmI16x8Eq) \
- V(I16x8Ne, kArmI16x8Ne) \
- V(I16x8LtS, kArmI16x8LtS) \
- V(I16x8LeS, kArmI16x8LeS) \
- V(I16x8UConvertI32x4, kArmI16x8UConvertI32x4) \
- V(I16x8AddSaturateU, kArmI16x8AddSaturateU) \
- V(I16x8SubSaturateU, kArmI16x8SubSaturateU) \
- V(I16x8MinU, kArmI16x8MinU) \
- V(I16x8MaxU, kArmI16x8MaxU) \
- V(I16x8LtU, kArmI16x8LtU) \
- V(I16x8LeU, kArmI16x8LeU) \
- V(I8x16SConvertI16x8, kArmI8x16SConvertI16x8) \
- V(I8x16Add, kArmI8x16Add) \
- V(I8x16AddSaturateS, kArmI8x16AddSaturateS) \
- V(I8x16Sub, kArmI8x16Sub) \
- V(I8x16SubSaturateS, kArmI8x16SubSaturateS) \
- V(I8x16Mul, kArmI8x16Mul) \
- V(I8x16MinS, kArmI8x16MinS) \
- V(I8x16MaxS, kArmI8x16MaxS) \
- V(I8x16Eq, kArmI8x16Eq) \
- V(I8x16Ne, kArmI8x16Ne) \
- V(I8x16LtS, kArmI8x16LtS) \
- V(I8x16LeS, kArmI8x16LeS) \
- V(I8x16UConvertI16x8, kArmI8x16UConvertI16x8) \
- V(I8x16AddSaturateU, kArmI8x16AddSaturateU) \
- V(I8x16SubSaturateU, kArmI8x16SubSaturateU) \
- V(I8x16MinU, kArmI8x16MinU) \
- V(I8x16MaxU, kArmI8x16MaxU) \
- V(I8x16LtU, kArmI8x16LtU) \
- V(I8x16LeU, kArmI8x16LeU) \
- V(S128And, kArmS128And) \
- V(S128Or, kArmS128Or) \
- V(S128Xor, kArmS128Xor) \
- V(S1x4And, kArmS128And) \
- V(S1x4Or, kArmS128Or) \
- V(S1x4Xor, kArmS128Xor) \
- V(S1x8And, kArmS128And) \
- V(S1x8Or, kArmS128Or) \
- V(S1x8Xor, kArmS128Xor) \
- V(S1x16And, kArmS128And) \
- V(S1x16Or, kArmS128Or) \
+#define SIMD_BINOP_LIST(V) \
+ V(F32x4Add, kArmF32x4Add) \
+ V(F32x4AddHoriz, kArmF32x4AddHoriz) \
+ V(F32x4Sub, kArmF32x4Sub) \
+ V(F32x4Mul, kArmF32x4Mul) \
+ V(F32x4Min, kArmF32x4Min) \
+ V(F32x4Max, kArmF32x4Max) \
+ V(F32x4Eq, kArmF32x4Eq) \
+ V(F32x4Ne, kArmF32x4Ne) \
+ V(F32x4Lt, kArmF32x4Lt) \
+ V(F32x4Le, kArmF32x4Le) \
+ V(I32x4Add, kArmI32x4Add) \
+ V(I32x4AddHoriz, kArmI32x4AddHoriz) \
+ V(I32x4Sub, kArmI32x4Sub) \
+ V(I32x4Mul, kArmI32x4Mul) \
+ V(I32x4MinS, kArmI32x4MinS) \
+ V(I32x4MaxS, kArmI32x4MaxS) \
+ V(I32x4Eq, kArmI32x4Eq) \
+ V(I32x4Ne, kArmI32x4Ne) \
+ V(I32x4LtS, kArmI32x4LtS) \
+ V(I32x4LeS, kArmI32x4LeS) \
+ V(I32x4MinU, kArmI32x4MinU) \
+ V(I32x4MaxU, kArmI32x4MaxU) \
+ V(I32x4LtU, kArmI32x4LtU) \
+ V(I32x4LeU, kArmI32x4LeU) \
+ V(I16x8SConvertI32x4, kArmI16x8SConvertI32x4) \
+ V(I16x8Add, kArmI16x8Add) \
+ V(I16x8AddSaturateS, kArmI16x8AddSaturateS) \
+ V(I16x8AddHoriz, kArmI16x8AddHoriz) \
+ V(I16x8Sub, kArmI16x8Sub) \
+ V(I16x8SubSaturateS, kArmI16x8SubSaturateS) \
+ V(I16x8Mul, kArmI16x8Mul) \
+ V(I16x8MinS, kArmI16x8MinS) \
+ V(I16x8MaxS, kArmI16x8MaxS) \
+ V(I16x8Eq, kArmI16x8Eq) \
+ V(I16x8Ne, kArmI16x8Ne) \
+ V(I16x8LtS, kArmI16x8LtS) \
+ V(I16x8LeS, kArmI16x8LeS) \
+ V(I16x8UConvertI32x4, kArmI16x8UConvertI32x4) \
+ V(I16x8AddSaturateU, kArmI16x8AddSaturateU) \
+ V(I16x8SubSaturateU, kArmI16x8SubSaturateU) \
+ V(I16x8MinU, kArmI16x8MinU) \
+ V(I16x8MaxU, kArmI16x8MaxU) \
+ V(I16x8LtU, kArmI16x8LtU) \
+ V(I16x8LeU, kArmI16x8LeU) \
+ V(I8x16SConvertI16x8, kArmI8x16SConvertI16x8) \
+ V(I8x16Add, kArmI8x16Add) \
+ V(I8x16AddSaturateS, kArmI8x16AddSaturateS) \
+ V(I8x16Sub, kArmI8x16Sub) \
+ V(I8x16SubSaturateS, kArmI8x16SubSaturateS) \
+ V(I8x16Mul, kArmI8x16Mul) \
+ V(I8x16MinS, kArmI8x16MinS) \
+ V(I8x16MaxS, kArmI8x16MaxS) \
+ V(I8x16Eq, kArmI8x16Eq) \
+ V(I8x16Ne, kArmI8x16Ne) \
+ V(I8x16LtS, kArmI8x16LtS) \
+ V(I8x16LeS, kArmI8x16LeS) \
+ V(I8x16UConvertI16x8, kArmI8x16UConvertI16x8) \
+ V(I8x16AddSaturateU, kArmI8x16AddSaturateU) \
+ V(I8x16SubSaturateU, kArmI8x16SubSaturateU) \
+ V(I8x16MinU, kArmI8x16MinU) \
+ V(I8x16MaxU, kArmI8x16MaxU) \
+ V(I8x16LtU, kArmI8x16LtU) \
+ V(I8x16LeU, kArmI8x16LeU) \
+ V(S128And, kArmS128And) \
+ V(S128Or, kArmS128Or) \
+ V(S128Xor, kArmS128Xor) \
+ V(S1x4And, kArmS128And) \
+ V(S1x4Or, kArmS128Or) \
+ V(S1x4Xor, kArmS128Xor) \
+ V(S1x8And, kArmS128And) \
+ V(S1x8Or, kArmS128Or) \
+ V(S1x8Xor, kArmS128Xor) \
+ V(S1x16And, kArmS128And) \
+ V(S1x16Or, kArmS128Or) \
V(S1x16Xor, kArmS128Xor)
#define SIMD_VISIT_SPLAT(Type) \
@@ -2547,6 +2577,216 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
SIMD_FORMAT_LIST(SIMD_VISIT_SELECT_OP)
#undef SIMD_VISIT_SELECT_OP
+namespace {
+template <int LANES>
+struct ShuffleEntry {
+ uint8_t shuffle[LANES];
+ ArchOpcode opcode;
+};
+
+static const ShuffleEntry<4> arch_s32x4_shuffles[] = {
+ {{0, 4, 1, 5}, kArmS32x4ZipLeft},
+ {{2, 6, 3, 7}, kArmS32x4ZipRight},
+ {{0, 2, 4, 6}, kArmS32x4UnzipLeft},
+ {{1, 3, 5, 7}, kArmS32x4UnzipRight},
+ {{0, 4, 2, 6}, kArmS32x4TransposeLeft},
+ {{1, 5, 3, 7}, kArmS32x4TransposeRight},
+ {{1, 0, 3, 2}, kArmS32x2Reverse}};
+
+static const ShuffleEntry<8> arch_s16x8_shuffles[] = {
+ {{0, 8, 1, 9, 2, 10, 3, 11}, kArmS16x8ZipLeft},
+ {{4, 12, 5, 13, 6, 14, 7, 15}, kArmS16x8ZipRight},
+ {{0, 2, 4, 6, 8, 10, 12, 14}, kArmS16x8UnzipLeft},
+ {{1, 3, 5, 7, 9, 11, 13, 15}, kArmS16x8UnzipRight},
+ {{0, 8, 2, 10, 4, 12, 6, 14}, kArmS16x8TransposeLeft},
+ {{1, 9, 3, 11, 5, 13, 7, 15}, kArmS16x8TransposeRight},
+ {{3, 2, 1, 0, 7, 6, 5, 4}, kArmS16x4Reverse},
+ {{1, 0, 3, 2, 5, 4, 7, 6}, kArmS16x2Reverse}};
+
+static const ShuffleEntry<16> arch_s8x16_shuffles[] = {
+ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+ kArmS8x16ZipLeft},
+ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+ kArmS8x16ZipRight},
+ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+ kArmS8x16UnzipLeft},
+ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+ kArmS8x16UnzipRight},
+ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+ kArmS8x16TransposeLeft},
+ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+ kArmS8x16TransposeRight},
+ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kArmS8x8Reverse},
+ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kArmS8x4Reverse},
+ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kArmS8x2Reverse}};
+
+// Use a non-shuffle opcode to signal no match.
+static const ArchOpcode kNoShuffle = kArmS128Not;
+
+template <int LANES>
+ArchOpcode TryMatchArchShuffle(const uint8_t* shuffle,
+ const ShuffleEntry<LANES>* table,
+ size_t num_entries, uint8_t mask) {
+ for (size_t i = 0; i < num_entries; i++) {
+ const ShuffleEntry<LANES>& entry = table[i];
+ int j = 0;
+ for (; j < LANES; j++) {
+ if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
+ break;
+ }
+ }
+ if (j == LANES) return entry.opcode;
+ }
+ return kNoShuffle;
+}
+
+// Returns the bias if shuffle is a concatenation, 0 otherwise.
+template <int LANES>
+uint8_t TryMatchConcat(const uint8_t* shuffle, uint8_t mask) {
+ uint8_t start = shuffle[0];
+ int i = 1;
+ for (; i < LANES - start; i++) {
+ if ((shuffle[i] & mask) != ((shuffle[i - 1] + 1) & mask)) return 0;
+ }
+ uint8_t wrap = LANES;
+ for (; i < LANES; i++, wrap++) {
+ if ((shuffle[i] & mask) != (wrap & mask)) return 0;
+ }
+ return start;
+}
+
+// Canonicalize shuffles to make pattern matching simpler. Returns a mask that
+// will ignore the high bit of indices in some cases.
+uint8_t CanonicalizeShuffle(InstructionSelector* selector, Node* node,
+ int num_lanes) {
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ uint8_t mask = 0xff;
+ // If shuffle is unary, set 'mask' to ignore the high bit of the indices.
+ // Replace any unused source with the other.
+ if (selector->GetVirtualRegister(node->InputAt(0)) ==
+ selector->GetVirtualRegister(node->InputAt(1))) {
+ // unary, src0 == src1.
+ mask = num_lanes - 1;
+ } else {
+ bool src0_is_used = false;
+ bool src1_is_used = false;
+ for (int i = 0; i < num_lanes; i++) {
+ if (shuffle[i] < num_lanes) {
+ src0_is_used = true;
+ } else {
+ src1_is_used = true;
+ }
+ }
+ if (src0_is_used && !src1_is_used) {
+ node->ReplaceInput(1, node->InputAt(0));
+ mask = num_lanes - 1;
+ } else if (src1_is_used && !src0_is_used) {
+ node->ReplaceInput(0, node->InputAt(1));
+ mask = num_lanes - 1;
+ }
+ }
+ return mask;
+}
+
+int32_t Pack4Lanes(const uint8_t* shuffle, uint8_t mask) {
+ int32_t result = 0;
+ for (int i = 3; i >= 0; i--) {
+ result <<= 8;
+ result |= shuffle[i] & mask;
+ }
+ return result;
+}
+
+void ArrangeShuffleTable(ArmOperandGenerator* g, Node* input0, Node* input1,
+ InstructionOperand* src0, InstructionOperand* src1) {
+ if (input0 == input1) {
+ // Unary, any q-register can be the table.
+ *src0 = *src1 = g->UseRegister(input0);
+ } else {
+ // Binary, table registers must be consecutive.
+ *src0 = g->UseFixed(input0, q0);
+ *src1 = g->UseFixed(input1, q1);
+ }
+}
+
+} // namespace
+
+void InstructionSelector::VisitS32x4Shuffle(Node* node) {
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ uint8_t mask = CanonicalizeShuffle(this, node, 4);
+ ArchOpcode opcode = TryMatchArchShuffle<4>(
+ shuffle, arch_s32x4_shuffles, arraysize(arch_s32x4_shuffles), mask);
+ if (opcode != kNoShuffle) {
+ VisitRRRShuffle(this, opcode, node);
+ return;
+ }
+ ArmOperandGenerator g(this);
+ uint8_t lanes = TryMatchConcat<4>(shuffle, mask);
+ if (lanes != 0) {
+ Emit(kArmS8x16Concat, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseImmediate(lanes * 4));
+ return;
+ }
+ Emit(kArmS32x4Shuffle, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
+ g.UseImmediate(Pack4Lanes(shuffle, mask)));
+}
+
+void InstructionSelector::VisitS16x8Shuffle(Node* node) {
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ uint8_t mask = CanonicalizeShuffle(this, node, 8);
+ ArchOpcode opcode = TryMatchArchShuffle<8>(
+ shuffle, arch_s16x8_shuffles, arraysize(arch_s16x8_shuffles), mask);
+ if (opcode != kNoShuffle) {
+ VisitRRRShuffle(this, opcode, node);
+ return;
+ }
+ ArmOperandGenerator g(this);
+ Node* input0 = node->InputAt(0);
+ Node* input1 = node->InputAt(1);
+ uint8_t lanes = TryMatchConcat<8>(shuffle, mask);
+ if (lanes != 0) {
+ Emit(kArmS8x16Concat, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1), g.UseImmediate(lanes * 2));
+ return;
+ }
+ // Code generator uses vtbl, arrange sources to form a valid lookup table.
+ InstructionOperand src0, src1;
+ ArrangeShuffleTable(&g, input0, input1, &src0, &src1);
+ Emit(kArmS16x8Shuffle, g.DefineAsRegister(node), src0, src1,
+ g.UseImmediate(Pack4Lanes(shuffle, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 4, mask)));
+}
+
+void InstructionSelector::VisitS8x16Shuffle(Node* node) {
+ const uint8_t* shuffle = OpParameter<uint8_t*>(node);
+ uint8_t mask = CanonicalizeShuffle(this, node, 16);
+ ArchOpcode opcode = TryMatchArchShuffle<16>(
+ shuffle, arch_s8x16_shuffles, arraysize(arch_s8x16_shuffles), mask);
+ if (opcode != kNoShuffle) {
+ VisitRRRShuffle(this, opcode, node);
+ return;
+ }
+ ArmOperandGenerator g(this);
+ Node* input0 = node->InputAt(0);
+ Node* input1 = node->InputAt(1);
+ uint8_t lanes = TryMatchConcat<16>(shuffle, mask);
+ if (lanes != 0) {
+ Emit(kArmS8x16Concat, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1), g.UseImmediate(lanes));
+ return;
+ }
+ // Code generator uses vtbl, arrange sources to form a valid lookup table.
+ InstructionOperand src0, src1;
+ ArrangeShuffleTable(&g, input0, input1, &src0, &src1);
+ Emit(kArmS8x16Shuffle, g.DefineAsRegister(node), src0, src1,
+ g.UseImmediate(Pack4Lanes(shuffle, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 4, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 8, mask)),
+ g.UseImmediate(Pack4Lanes(shuffle + 12, mask)));
+}
+
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
index a72070a06d..88311c35e8 100644
--- a/deps/v8/src/compiler/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/arm64/code-generator-arm64.cc
@@ -772,8 +772,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArchPrepareCallCFunction:
// We don't need kArchPrepareCallCFunction on arm64 as the instruction
- // selector already perform a Claim to reserve space on the stack and
- // guarantee correct alignment of stack pointer.
+ // selector has already performed a Claim to reserve space on the stack.
+ // Frame alignment is always 16 bytes, and the stack pointer is already
+ // 16-byte aligned, therefore we do not need to align the stack pointer
+ // by an unknown value, and it is safe to continue accessing the frame
+ // via the stack pointer.
UNREACHABLE();
break;
case kArchPrepareTailCall:
@@ -788,9 +791,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters, 0);
}
- // CallCFunction only supports register arguments so we never need to call
- // frame()->ClearOutgoingParameterSlots() here.
- DCHECK(frame_access_state()->sp_delta() == 0);
+ frame_access_state()->SetFrameAccessToDefault();
+ frame_access_state()->ClearSPDelta();
break;
}
case kArchJmp:
@@ -1228,14 +1230,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register prev = __ StackPointer();
if (prev.Is(jssp)) {
// TODO(titzer): make this a macro-assembler method.
- // Align the CSP and store the previous JSSP on the stack.
+ // Align the CSP and store the previous JSSP on the stack. We do not
+ // need to modify the SP delta here, as we will continue to access the
+ // frame via JSSP.
UseScratchRegisterScope scope(masm());
Register tmp = scope.AcquireX();
+ // TODO(arm64): Storing JSSP on the stack is redundant when calling a C
+ // function, as JSSP is callee-saved (we still need to do this when
+ // calling a code object that uses the CSP as the stack pointer). See
+ // the code generation for kArchCallCodeObject vs. kArchCallCFunction
+ // (the latter does not restore CSP/JSSP).
+ // MacroAssembler::CallCFunction() (safely) drops this extra slot
+ // anyway.
int sp_alignment = __ ActivationFrameAlignment();
__ Sub(tmp, jssp, kPointerSize);
- __ And(tmp, tmp, Operand(~static_cast<uint64_t>(sp_alignment - 1)));
- __ Mov(csp, tmp);
+ __ Bic(csp, tmp, sp_alignment - 1);
__ Str(jssp, MemOperand(csp));
if (count > 0) {
__ SetStackPointer(csp);
@@ -1259,7 +1269,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (count > 0) {
int even = RoundUp(count, 2);
__ Sub(jssp, csp, count * kPointerSize);
+ // We must also update CSP to maintain stack consistency:
__ Sub(csp, csp, even * kPointerSize); // Must always be aligned.
+ __ AssertStackConsistency();
frame_access_state()->IncreaseSPDelta(even);
} else {
__ Mov(jssp, csp);
@@ -1994,6 +2006,53 @@ void CodeGenerator::AssembleConstructFrame() {
osr_pc_offset_ = __ pc_offset();
shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
}
+
+ if (info()->IsWasm() && shrink_slots > 128) {
+ // For WebAssembly functions with big frames we have to do the stack
+ // overflow check before we construct the frame. Otherwise we may not
+ // have enough space on the stack to call the runtime for the stack
+ // overflow.
+ Label done;
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ if (shrink_slots * kPointerSize < FLAG_stack_size * 1024) {
+ UseScratchRegisterScope scope(masm());
+ Register scratch = scope.AcquireX();
+ __ Mov(
+ scratch,
+ Operand(ExternalReference::address_of_real_stack_limit(isolate())));
+ __ Ldr(scratch, MemOperand(scratch));
+ __ Add(scratch, scratch, Operand(shrink_slots * kPointerSize));
+ __ Cmp(__ StackPointer(), scratch);
+ __ B(cs, &done);
+ }
+
+ if (!frame_access_state()->has_frame()) {
+ __ set_has_frame(true);
+ // There is no need to leave the frame, we will not return from the
+ // runtime call.
+ __ EnterFrame(StackFrame::WASM_COMPILED);
+ }
+ DCHECK(__ StackPointer().Is(csp));
+ __ SetStackPointer(jssp);
+ __ AssertStackConsistency();
+ // Initialize the jssp because it is required for the runtime call.
+ __ Mov(jssp, csp);
+ __ Move(cp, Smi::kZero);
+ __ CallRuntime(Runtime::kThrowWasmStackOverflow);
+ // We come from WebAssembly, there are no references for the GC.
+ ReferenceMap* reference_map = new (zone()) ReferenceMap(zone());
+ RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+ Safepoint::kNoLazyDeopt);
+ if (FLAG_debug_code) {
+ __ Brk(0);
+ }
+ __ SetStackPointer(csp);
+ __ AssertStackConsistency();
+ __ bind(&done);
+ }
+
// Build remainder of frame, including accounting for and filling-in
// frame-specific header information, e.g. claiming the extra slot that
// other platforms explicitly push for STUB frames and frames recording
diff --git a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
index a471a2b8b3..0e9fd0ca2b 100644
--- a/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
@@ -527,6 +527,15 @@ int32_t LeftShiftForReducedMultiply(Matcher* m) {
} // namespace
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int slot = frame_->AllocateSpillSlot(rep.size());
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
+
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
ImmediateMode immediate_mode, MachineRepresentation rep,
Node* output = nullptr) {
@@ -919,7 +928,8 @@ void InstructionSelector::VisitWord32And(Node* node) {
uint32_t mask = m.right().Value();
uint32_t mask_width = base::bits::CountPopulation32(mask);
uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
- if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ if ((mask_width != 0) && (mask_width != 32) &&
+ (mask_msb + mask_width == 32)) {
// The mask must be contiguous, and occupy the least-significant bits.
DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
@@ -1775,7 +1785,8 @@ void InstructionSelector::EmitPrepareArguments(
// TODO(titzer): it would be better to bump the csp here only
// and emit paired stores with increment for non c frames.
ArchOpcode claim = to_native_stack ? kArm64ClaimCSP : kArm64ClaimJSSP;
- // Claim(0) isn't a nop if there is a mismatch between CSP and JSSP.
+ // ClaimJSSP(0) or ClaimCSP(0) isn't a nop if there is a mismatch between
+ // CSP and JSSP.
Emit(claim, g.NoOutput(), g.TempImmediate(claim_count));
}
diff --git a/deps/v8/src/compiler/ast-graph-builder.cc b/deps/v8/src/compiler/ast-graph-builder.cc
index b92a205600..fd2209ed53 100644
--- a/deps/v8/src/compiler/ast-graph-builder.cc
+++ b/deps/v8/src/compiler/ast-graph-builder.cc
@@ -268,9 +268,9 @@ class AstGraphBuilder::ControlScopeForIteration : public ControlScope {
LoopBuilder* control_;
};
-
AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
- JSGraph* jsgraph, float invocation_frequency,
+ JSGraph* jsgraph,
+ CallFrequency invocation_frequency,
LoopAssignmentAnalysis* loop)
: isolate_(info->isolate()),
local_zone_(local_zone),
@@ -1692,7 +1692,7 @@ void AstGraphBuilder::VisitCall(Call* expr) {
VisitForValues(args);
// Create node to perform the function call.
- float const frequency = ComputeCallFrequency(expr->CallFeedbackICSlot());
+ CallFrequency frequency = ComputeCallFrequency(expr->CallFeedbackICSlot());
VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
const Operator* call =
javascript()->Call(args->length() + 2, frequency, feedback, receiver_hint,
@@ -1720,7 +1720,7 @@ void AstGraphBuilder::VisitCallNew(CallNew* expr) {
environment()->Push(environment()->Peek(args->length()));
// Create node to perform the construct call.
- float const frequency = ComputeCallFrequency(expr->CallNewFeedbackSlot());
+ CallFrequency frequency = ComputeCallFrequency(expr->CallNewFeedbackSlot());
VectorSlotPair feedback = CreateVectorSlotPair(expr->CallNewFeedbackSlot());
const Operator* call =
javascript()->Construct(args->length() + 2, frequency, feedback);
@@ -2120,7 +2120,8 @@ void AstGraphBuilder::VisitDelete(UnaryOperation* expr) {
VisitForValue(property->key());
Node* key = environment()->Pop();
Node* object = environment()->Pop();
- value = NewNode(javascript()->DeleteProperty(language_mode()), object, key);
+ Node* mode = jsgraph()->Constant(static_cast<int32_t>(language_mode()));
+ value = NewNode(javascript()->DeleteProperty(), object, key, mode);
PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
} else {
VisitForEffect(expr->expression());
@@ -2239,12 +2240,15 @@ void AstGraphBuilder::VisitRewritableExpression(RewritableExpression* node) {
Visit(node->expression());
}
-float AstGraphBuilder::ComputeCallFrequency(FeedbackSlot slot) const {
- if (slot.IsInvalid()) return 0.0f;
+CallFrequency AstGraphBuilder::ComputeCallFrequency(FeedbackSlot slot) const {
+ if (invocation_frequency_.IsUnknown() || slot.IsInvalid()) {
+ return CallFrequency();
+ }
Handle<FeedbackVector> feedback_vector(info()->closure()->feedback_vector(),
isolate());
CallICNexus nexus(feedback_vector, slot);
- return nexus.ComputeCallFrequency() * invocation_frequency_;
+ return CallFrequency(nexus.ComputeCallFrequency() *
+ invocation_frequency_.value());
}
Node* AstGraphBuilder::ProcessArguments(const Operator* op, int arity) {
@@ -2453,8 +2457,9 @@ Node* AstGraphBuilder::BuildVariableDelete(Variable* variable,
// Global var, const, or let variable.
Node* global = BuildLoadGlobalObject();
Node* name = jsgraph()->Constant(variable->name());
- const Operator* op = javascript()->DeleteProperty(language_mode());
- Node* result = NewNode(op, global, name);
+ Node* mode = jsgraph()->Constant(static_cast<int32_t>(language_mode()));
+ const Operator* op = javascript()->DeleteProperty();
+ Node* result = NewNode(op, global, name, mode);
PrepareFrameState(result, bailout_id, combine);
return result;
}
@@ -3014,8 +3019,9 @@ void AstGraphBuilder::Environment::PrepareForOsrEntry() {
// Set the control and effect to the OSR loop entry.
Node* osr_loop_entry = graph->NewNode(builder_->common()->OsrLoopEntry(),
graph->start(), graph->start());
+ Node* effect = osr_loop_entry;
UpdateControlDependency(osr_loop_entry);
- UpdateEffectDependency(osr_loop_entry);
+ UpdateEffectDependency(effect);
// Set OSR values.
for (int i = 0; i < size; ++i) {
@@ -3028,30 +3034,11 @@ void AstGraphBuilder::Environment::PrepareForOsrEntry() {
builder_->common()->OsrValue(Linkage::kOsrContextSpillSlotIndex);
contexts()->back() = graph->NewNode(op_inner, osr_loop_entry);
- // Create a checkpoint.
- Node* frame_state = Checkpoint(builder_->info()->osr_ast_id());
- Node* checkpoint = graph->NewNode(common()->Checkpoint(), frame_state,
- osr_loop_entry, osr_loop_entry);
- UpdateEffectDependency(checkpoint);
-
- // Create the OSR guard nodes.
- const Operator* guard_op =
- builder_->info()->is_deoptimization_enabled()
- ? builder_->common()->OsrGuard(OsrGuardType::kUninitialized)
- : builder_->common()->OsrGuard(OsrGuardType::kAny);
- Node* effect = checkpoint;
- for (int i = 0; i < size; ++i) {
- values()->at(i) = effect =
- graph->NewNode(guard_op, values()->at(i), effect, osr_loop_entry);
- }
- contexts()->back() = effect =
- graph->NewNode(guard_op, contexts()->back(), effect, osr_loop_entry);
-
// The innermost context is the OSR value, and the outer contexts are
// reconstructed by dynamically walking up the context chain.
const Operator* load_op =
builder_->javascript()->LoadContext(0, Context::PREVIOUS_INDEX, true);
- Node* osr_context = effect = contexts()->back();
+ Node* osr_context = contexts()->back();
int last = static_cast<int>(contexts()->size() - 1);
for (int i = last - 1; i >= 0; i--) {
osr_context = effect = graph->NewNode(load_op, osr_context, effect);
@@ -3172,7 +3159,7 @@ Node* AstGraphBuilder::MergeValue(Node* value, Node* other, Node* control) {
AstGraphBuilderWithPositions::AstGraphBuilderWithPositions(
Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
- float invocation_frequency, LoopAssignmentAnalysis* loop_assignment,
+ CallFrequency invocation_frequency, LoopAssignmentAnalysis* loop_assignment,
SourcePositionTable* source_positions, int inlining_id)
: AstGraphBuilder(local_zone, info, jsgraph, invocation_frequency,
loop_assignment),
diff --git a/deps/v8/src/compiler/ast-graph-builder.h b/deps/v8/src/compiler/ast-graph-builder.h
index 4fd3f35e78..1d0ba3a9c2 100644
--- a/deps/v8/src/compiler/ast-graph-builder.h
+++ b/deps/v8/src/compiler/ast-graph-builder.h
@@ -37,7 +37,7 @@ class Node;
class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
public:
AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
- float invocation_frequency,
+ CallFrequency invocation_frequency,
LoopAssignmentAnalysis* loop_assignment = nullptr);
virtual ~AstGraphBuilder() {}
@@ -78,7 +78,7 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
Zone* local_zone_;
CompilationInfo* info_;
JSGraph* jsgraph_;
- float const invocation_frequency_;
+ CallFrequency const invocation_frequency_;
Environment* environment_;
AstContext* ast_context_;
@@ -249,7 +249,7 @@ class AstGraphBuilder : public AstVisitor<AstGraphBuilder> {
VectorSlotPair CreateVectorSlotPair(FeedbackSlot slot) const;
// Computes the frequency for JSCall and JSConstruct nodes.
- float ComputeCallFrequency(FeedbackSlot slot) const;
+ CallFrequency ComputeCallFrequency(FeedbackSlot slot) const;
// ===========================================================================
// The following build methods all generate graph fragments and return one
@@ -559,7 +559,8 @@ class AstGraphBuilder::Environment : public ZoneObject {
class AstGraphBuilderWithPositions final : public AstGraphBuilder {
public:
AstGraphBuilderWithPositions(Zone* local_zone, CompilationInfo* info,
- JSGraph* jsgraph, float invocation_frequency,
+ JSGraph* jsgraph,
+ CallFrequency invocation_frequency,
LoopAssignmentAnalysis* loop_assignment,
SourcePositionTable* source_positions,
int inlining_id = SourcePosition::kNotInlined);
diff --git a/deps/v8/src/compiler/bytecode-analysis.cc b/deps/v8/src/compiler/bytecode-analysis.cc
index 6d8afe1744..e531e75b8c 100644
--- a/deps/v8/src/compiler/bytecode-analysis.cc
+++ b/deps/v8/src/compiler/bytecode-analysis.cc
@@ -90,6 +90,7 @@ BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
loop_end_index_queue_(zone),
end_to_header_(zone),
header_to_info_(zone),
+ osr_entry_point_(-1),
liveness_map_(bytecode_array->length(), zone) {}
namespace {
@@ -187,6 +188,10 @@ void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
if (Bytecodes::IsForwardJump(bytecode)) {
int target_offset = accessor.GetJumpTargetOffset();
out_liveness.Union(*liveness_map.GetInLiveness(target_offset));
+ } else if (Bytecodes::IsSwitch(bytecode)) {
+ for (const auto& entry : accessor.GetJumpTableTargetOffsets()) {
+ out_liveness.Union(*liveness_map.GetInLiveness(entry.target_offset));
+ }
}
// Update from next bytecode (unless there isn't one or this is an
@@ -256,7 +261,8 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
// Every byte up to and including the last byte within the backwards jump
// instruction is considered part of the loop, set loop end accordingly.
int loop_end = current_offset + iterator.current_bytecode_size();
- PushLoop(iterator.GetJumpTargetOffset(), loop_end);
+ int loop_header = iterator.GetJumpTargetOffset();
+ PushLoop(loop_header, loop_end);
// Normally prefixed bytecodes are treated as if the prefix's offset was
// the actual bytecode's offset. However, the OSR id is the offset of the
@@ -270,9 +276,10 @@ void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
DCHECK(!is_osr_loop ||
iterator.OffsetWithinBytecode(osr_loop_end_offset));
- // OSR "assigns" everything to OSR values on entry into an OSR loop, so we
- // need to make sure to considered everything to be assigned.
if (is_osr_loop) {
+ osr_entry_point_ = loop_header;
+ // OSR "assigns" everything to OSR values on entry into an OSR loop, so
+ // we need to make sure to considered everything to be assigned.
loop_stack_.top().loop_info->assignments().AddAll();
}
diff --git a/deps/v8/src/compiler/bytecode-analysis.h b/deps/v8/src/compiler/bytecode-analysis.h
index ad93f8a652..63dfa3107c 100644
--- a/deps/v8/src/compiler/bytecode-analysis.h
+++ b/deps/v8/src/compiler/bytecode-analysis.h
@@ -80,6 +80,11 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis BASE_EMBEDDED {
// Get the loop info of the loop header at {header_offset}.
const LoopInfo& GetLoopInfoFor(int header_offset) const;
+ // True if the current analysis has an OSR entry point.
+ bool HasOSREntryPoint() const { return osr_entry_point_ != -1; }
+ // True if {offset} is the OSR entry loop header.
+ bool IsOSREntryPoint(int offset) const { return osr_entry_point_ == offset; }
+
// Gets the in-liveness for the bytecode at {offset}.
const BytecodeLivenessState* GetInLivenessFor(int offset) const;
@@ -113,6 +118,7 @@ class V8_EXPORT_PRIVATE BytecodeAnalysis BASE_EMBEDDED {
ZoneMap<int, int> end_to_header_;
ZoneMap<int, LoopInfo> header_to_info_;
+ int osr_entry_point_;
BytecodeLivenessMap liveness_map_;
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index dcaed97481..5bb9a8e976 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -82,9 +82,8 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
bool StateValuesRequireUpdate(Node** state_values, Node** values, int count);
void UpdateStateValues(Node** state_values, Node** values, int count);
- void UpdateStateValuesWithCache(Node** state_values, Node** values, int count,
- const BitVector* liveness,
- int liveness_offset);
+ Node* GetStateValuesFromCache(Node** values, int count,
+ const BitVector* liveness, int liveness_offset);
int RegisterToValuesIndex(interpreter::Register the_register) const;
@@ -105,12 +104,22 @@ class BytecodeGraphBuilder::Environment : public ZoneObject {
Node* effect_dependency_;
NodeVector values_;
Node* parameters_state_values_;
- Node* registers_state_values_;
- Node* accumulator_state_values_;
int register_base_;
int accumulator_base_;
};
+// A helper for creating a temporary sub-environment for simple branches.
+struct BytecodeGraphBuilder::SubEnvironment final {
+ public:
+ explicit SubEnvironment(BytecodeGraphBuilder* builder)
+ : builder_(builder), parent_(builder->environment()->Copy()) {}
+
+ ~SubEnvironment() { builder_->set_environment(parent_); }
+
+ private:
+ BytecodeGraphBuilder* builder_;
+ BytecodeGraphBuilder::Environment* parent_;
+};
// Issues:
// - Scopes - intimately tied to AST. Need to eval what is needed.
@@ -127,9 +136,7 @@ BytecodeGraphBuilder::Environment::Environment(BytecodeGraphBuilder* builder,
control_dependency_(control_dependency),
effect_dependency_(control_dependency),
values_(builder->local_zone()),
- parameters_state_values_(nullptr),
- registers_state_values_(nullptr),
- accumulator_state_values_(nullptr) {
+ parameters_state_values_(nullptr) {
// The layout of values_ is:
//
// [receiver] [parameters] [registers] [accumulator]
@@ -165,9 +172,7 @@ BytecodeGraphBuilder::Environment::Environment(
control_dependency_(other->control_dependency_),
effect_dependency_(other->effect_dependency_),
values_(other->zone()),
- parameters_state_values_(nullptr),
- registers_state_values_(nullptr),
- accumulator_state_values_(nullptr),
+ parameters_state_values_(other->parameters_state_values_),
register_base_(other->register_base_),
accumulator_base_(other->accumulator_base_) {
values_ = other->values_;
@@ -325,24 +330,6 @@ void BytecodeGraphBuilder::Environment::PrepareForOsrEntry() {
if (i >= accumulator_base()) idx = Linkage::kOsrAccumulatorRegisterIndex;
values()->at(i) = graph()->NewNode(common()->OsrValue(idx), entry);
}
-
- BailoutId loop_id(builder_->bytecode_iterator().current_offset());
- Node* frame_state =
- Checkpoint(loop_id, OutputFrameStateCombine::Ignore(), false, nullptr);
- Node* checkpoint =
- graph()->NewNode(common()->Checkpoint(), frame_state, entry, entry);
- UpdateEffectDependency(checkpoint);
-
- // Create the OSR guard nodes.
- const Operator* guard_op = common()->OsrGuard(OsrGuardType::kUninitialized);
- Node* effect = checkpoint;
- for (int i = 0; i < size; i++) {
- values()->at(i) = effect =
- graph()->NewNode(guard_op, values()->at(i), effect, entry);
- }
- Node* context = effect = graph()->NewNode(guard_op, Context(), effect, entry);
- SetContext(context);
- UpdateEffectDependency(effect);
}
bool BytecodeGraphBuilder::Environment::StateValuesRequireUpdate(
@@ -411,10 +398,9 @@ void BytecodeGraphBuilder::Environment::UpdateStateValues(Node** state_values,
}
}
-void BytecodeGraphBuilder::Environment::UpdateStateValuesWithCache(
- Node** state_values, Node** values, int count, const BitVector* liveness,
- int liveness_offset) {
- *state_values = builder_->state_values_cache_.GetNodeForValues(
+Node* BytecodeGraphBuilder::Environment::GetStateValuesFromCache(
+ Node** values, int count, const BitVector* liveness, int liveness_offset) {
+ return builder_->state_values_cache_.GetNodeForValues(
values, static_cast<size_t>(count), liveness, liveness_offset);
}
@@ -424,37 +410,27 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint(
if (parameter_count() == register_count()) {
// Re-use the state-value cache if the number of local registers happens
// to match the parameter count.
- UpdateStateValuesWithCache(&parameters_state_values_, &values()->at(0),
- parameter_count(), nullptr, 0);
+ parameters_state_values_ = GetStateValuesFromCache(
+ &values()->at(0), parameter_count(), nullptr, 0);
} else {
UpdateStateValues(&parameters_state_values_, &values()->at(0),
parameter_count());
}
- UpdateStateValuesWithCache(&registers_state_values_,
- &values()->at(register_base()), register_count(),
- liveness ? &liveness->bit_vector() : nullptr, 0);
+ Node* registers_state_values =
+ GetStateValuesFromCache(&values()->at(register_base()), register_count(),
+ liveness ? &liveness->bit_vector() : nullptr, 0);
bool accumulator_is_live = !liveness || liveness->AccumulatorIsLive();
- if (parameter_count() == 1 && accumulator_is_live &&
- values()->at(accumulator_base()) == values()->at(0)) {
- // Re-use the parameter state values if there happens to only be one
- // parameter and the accumulator is live and holds that parameter's value.
- accumulator_state_values_ = parameters_state_values_;
- } else {
- // Otherwise, use the state values cache to hopefully re-use local register
- // state values (if there is only one local register), or at the very least
- // re-use previous accumulator state values.
- UpdateStateValuesWithCache(
- &accumulator_state_values_, &values()->at(accumulator_base()), 1,
- liveness ? &liveness->bit_vector() : nullptr, register_count());
- }
+ Node* accumulator_state_value =
+ accumulator_is_live ? values()->at(accumulator_base())
+ : builder()->jsgraph()->OptimizedOutConstant();
const Operator* op = common()->FrameState(
bailout_id, combine, builder()->frame_state_function_info());
Node* result = graph()->NewNode(
- op, parameters_state_values_, registers_state_values_,
- accumulator_state_values_, Context(), builder()->GetFunctionClosure(),
+ op, parameters_state_values_, registers_state_values,
+ accumulator_state_value, Context(), builder()->GetFunctionClosure(),
builder()->graph()->start());
return result;
@@ -463,7 +439,7 @@ Node* BytecodeGraphBuilder::Environment::Checkpoint(
BytecodeGraphBuilder::BytecodeGraphBuilder(
Zone* local_zone, Handle<SharedFunctionInfo> shared_info,
Handle<FeedbackVector> feedback_vector, BailoutId osr_ast_id,
- JSGraph* jsgraph, float invocation_frequency,
+ JSGraph* jsgraph, CallFrequency invocation_frequency,
SourcePositionTable* source_positions, int inlining_id,
JSTypeHintLowering::Flags flags)
: local_zone_(local_zone),
@@ -482,7 +458,6 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
bytecode_analysis_(nullptr),
environment_(nullptr),
osr_ast_id_(osr_ast_id),
- osr_loop_offset_(-1),
merge_environments_(local_zone),
exception_handlers_(local_zone),
current_exception_handler_(0),
@@ -638,7 +613,7 @@ void BytecodeGraphBuilder::VisitBytecodes(bool stack_check) {
interpreter::BytecodeArrayIterator iterator(bytecode_array());
set_bytecode_iterator(&iterator);
SourcePositionTableIterator source_position_iterator(
- bytecode_array()->source_position_table());
+ handle(bytecode_array()->SourcePositionTable()));
if (FLAG_trace_environment_liveness) {
OFStream of(stdout);
@@ -907,9 +882,10 @@ BytecodeGraphBuilder::Environment* BytecodeGraphBuilder::CheckContextExtensions(
jsgraph()->TheHoleConstant());
NewBranch(check_no_extension);
- Environment* true_environment = environment()->Copy();
{
+ SubEnvironment sub_environment(this);
+
NewIfFalse();
// If there is an extension, merge into the slow path.
if (slow_environment == nullptr) {
@@ -920,12 +896,9 @@ BytecodeGraphBuilder::Environment* BytecodeGraphBuilder::CheckContextExtensions(
}
}
- {
- set_environment(true_environment);
- NewIfTrue();
- // Do nothing on if there is no extension, eventually falling through to
- // the fast path.
- }
+ NewIfTrue();
+ // Do nothing on if there is no extension, eventually falling through to
+ // the fast path.
}
// The depth can be zero, in which case no slow-path checks are built, and the
@@ -1398,7 +1371,7 @@ void BytecodeGraphBuilder::BuildCall(TailCallMode tail_call_mode,
STATIC_ASSERT(FeedbackVector::kReservedIndexCount > 0);
VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
- float const frequency = ComputeCallFrequency(slot_id);
+ CallFrequency frequency = ComputeCallFrequency(slot_id);
const Operator* call = javascript()->Call(arg_count, frequency, feedback,
receiver_mode, tail_call_mode);
Node* value = ProcessCallArguments(call, args, static_cast<int>(arg_count));
@@ -1680,7 +1653,7 @@ void BytecodeGraphBuilder::VisitConstruct() {
Node* new_target = environment()->LookupAccumulator();
Node* callee = environment()->LookupRegister(callee_reg);
- float const frequency = ComputeCallFrequency(slot_id);
+ CallFrequency frequency = ComputeCallFrequency(slot_id);
const Operator* call = javascript()->Construct(
static_cast<uint32_t>(reg_count + 2), frequency, feedback);
Node* value =
@@ -1748,9 +1721,11 @@ CompareOperationHint BytecodeGraphBuilder::GetCompareOperationHint() {
return nexus.GetCompareOperationFeedback();
}
-float BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
+CallFrequency BytecodeGraphBuilder::ComputeCallFrequency(int slot_id) const {
+ if (invocation_frequency_.IsUnknown()) return CallFrequency();
CallICNexus nexus(feedback_vector(), feedback_vector()->ToSlot(slot_id));
- return nexus.ComputeCallFrequency() * invocation_frequency_;
+ return CallFrequency(nexus.ComputeCallFrequency() *
+ invocation_frequency_.value());
}
void BytecodeGraphBuilder::VisitAdd() {
@@ -1920,8 +1895,8 @@ void BytecodeGraphBuilder::BuildDelete(LanguageMode language_mode) {
Node* key = environment()->LookupAccumulator();
Node* object =
environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
- Node* node =
- NewNode(javascript()->DeleteProperty(language_mode), object, key);
+ Node* mode = jsgraph()->Constant(static_cast<int32_t>(language_mode));
+ Node* node = NewNode(javascript()->DeleteProperty(), object, key, mode);
environment()->BindAccumulator(node, Environment::kAttachFrameState);
}
@@ -2190,6 +2165,27 @@ void BytecodeGraphBuilder::VisitJumpIfNotUndefinedConstant() {
void BytecodeGraphBuilder::VisitJumpLoop() { BuildJump(); }
+void BytecodeGraphBuilder::BuildSwitchOnSmi(Node* condition) {
+ interpreter::JumpTableTargetOffsets offsets =
+ bytecode_iterator().GetJumpTableTargetOffsets();
+
+ NewSwitch(condition, offsets.size() + 1);
+ for (const auto& entry : offsets) {
+ SubEnvironment sub_environment(this);
+ NewIfValue(entry.case_value);
+ MergeIntoSuccessorEnvironment(entry.target_offset);
+ }
+ NewIfDefault();
+}
+
+void BytecodeGraphBuilder::VisitSwitchOnSmiNoFeedback() {
+ PrepareEagerCheckpoint();
+
+ Node* acc = environment()->LookupAccumulator();
+ Node* acc_smi = NewNode(simplified()->CheckSmi(), acc);
+ BuildSwitchOnSmi(acc_smi);
+}
+
void BytecodeGraphBuilder::VisitStackCheck() {
PrepareEagerCheckpoint();
Node* node = NewNode(javascript()->StackCheck());
@@ -2300,8 +2296,6 @@ void BytecodeGraphBuilder::VisitSuspendGenerator() {
}
void BytecodeGraphBuilder::VisitResumeGenerator() {
- PrepareEagerCheckpoint();
-
Node* generator = environment()->LookupRegister(
bytecode_iterator().GetRegisterOperand(0));
@@ -2315,7 +2309,7 @@ void BytecodeGraphBuilder::VisitResumeGenerator() {
Node* state =
NewNode(javascript()->GeneratorRestoreContinuation(), generator);
- environment()->BindAccumulator(state, Environment::kAttachFrameState);
+ environment()->BindAccumulator(state);
}
void BytecodeGraphBuilder::VisitWide() {
@@ -2387,7 +2381,7 @@ void BytecodeGraphBuilder::MergeControlToLeaveFunction(Node* exit) {
void BytecodeGraphBuilder::BuildOSRLoopEntryPoint(int current_offset) {
DCHECK(bytecode_analysis()->IsLoopHeader(current_offset));
- if (!osr_ast_id_.IsNone() && osr_loop_offset_ == current_offset) {
+ if (bytecode_analysis()->IsOSREntryPoint(current_offset)) {
// For OSR add a special {OsrLoopEntry} node into the current loop header.
// It will be turned into a usable entry by the OSR deconstruction.
Environment* osr_env = environment()->Copy();
@@ -2397,15 +2391,10 @@ void BytecodeGraphBuilder::BuildOSRLoopEntryPoint(int current_offset) {
}
void BytecodeGraphBuilder::BuildOSRNormalEntryPoint() {
- if (!osr_ast_id_.IsNone()) {
+ if (bytecode_analysis()->HasOSREntryPoint()) {
// For OSR add an {OsrNormalEntry} as the the top-level environment start.
// It will be replaced with {Dead} by the OSR deconstruction.
NewNode(common()->OsrNormalEntry());
- // Translate the offset of the jump instruction to the jump target offset of
- // that instruction so that the derived BailoutId points to the loop header.
- osr_loop_offset_ =
- bytecode_analysis()->GetLoopOffsetFor(osr_ast_id_.ToInt());
- DCHECK(bytecode_analysis()->IsLoopHeader(osr_loop_offset_));
}
}
@@ -2440,19 +2429,21 @@ void BytecodeGraphBuilder::BuildJump() {
void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
NewBranch(condition);
- Environment* if_false_environment = environment()->Copy();
- NewIfTrue();
- MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
- set_environment(if_false_environment);
+ {
+ SubEnvironment sub_environment(this);
+ NewIfTrue();
+ MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
+ }
NewIfFalse();
}
void BytecodeGraphBuilder::BuildJumpIfNot(Node* condition) {
NewBranch(condition);
- Environment* if_true_environment = environment()->Copy();
- NewIfFalse();
- MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
- set_environment(if_true_environment);
+ {
+ SubEnvironment sub_environment(this);
+ NewIfFalse();
+ MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
+ }
NewIfTrue();
}
@@ -2472,24 +2463,26 @@ void BytecodeGraphBuilder::BuildJumpIfNotEqual(Node* comperand) {
void BytecodeGraphBuilder::BuildJumpIfFalse() {
NewBranch(environment()->LookupAccumulator());
- Environment* if_true_environment = environment()->Copy();
- environment()->BindAccumulator(jsgraph()->FalseConstant());
- NewIfFalse();
- MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
- if_true_environment->BindAccumulator(jsgraph()->TrueConstant());
- set_environment(if_true_environment);
+ {
+ SubEnvironment sub_environment(this);
+ NewIfFalse();
+ environment()->BindAccumulator(jsgraph()->FalseConstant());
+ MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
+ }
NewIfTrue();
+ environment()->BindAccumulator(jsgraph()->TrueConstant());
}
void BytecodeGraphBuilder::BuildJumpIfTrue() {
NewBranch(environment()->LookupAccumulator());
- Environment* if_false_environment = environment()->Copy();
- environment()->BindAccumulator(jsgraph()->TrueConstant());
- NewIfTrue();
- MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
- if_false_environment->BindAccumulator(jsgraph()->FalseConstant());
- set_environment(if_false_environment);
+ {
+ SubEnvironment sub_environment(this);
+ NewIfTrue();
+ environment()->BindAccumulator(jsgraph()->TrueConstant());
+ MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
+ }
NewIfFalse();
+ environment()->BindAccumulator(jsgraph()->FalseConstant());
}
void BytecodeGraphBuilder::BuildJumpIfToBooleanTrue() {
@@ -2552,7 +2545,7 @@ Node* BytecodeGraphBuilder::TryBuildSimplifiedLoadNamed(const Operator* op,
// TODO(mstarzinger,6112): This is a workaround for OSR loop entries being
// pruned from the graph by a soft-deopt. It can happen that a LoadIC that
// control-dominates the OSR entry is still in "uninitialized" state.
- if (!osr_ast_id_.IsNone()) return nullptr;
+ if (bytecode_analysis()->HasOSREntryPoint()) return nullptr;
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
Reduction early_reduction = type_hint_lowering().ReduceLoadNamedOperation(
@@ -2571,7 +2564,7 @@ Node* BytecodeGraphBuilder::TryBuildSimplifiedLoadKeyed(const Operator* op,
// TODO(mstarzinger,6112): This is a workaround for OSR loop entries being
// pruned from the graph by a soft-deopt. It can happen that a LoadIC that
// control-dominates the OSR entry is still in "uninitialized" state.
- if (!osr_ast_id_.IsNone()) return nullptr;
+ if (bytecode_analysis()->HasOSREntryPoint()) return nullptr;
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
Reduction early_reduction = type_hint_lowering().ReduceLoadKeyedOperation(
@@ -2590,7 +2583,7 @@ Node* BytecodeGraphBuilder::TryBuildSimplifiedStoreNamed(const Operator* op,
// TODO(mstarzinger,6112): This is a workaround for OSR loop entries being
// pruned from the graph by a soft-deopt. It can happen that a LoadIC that
// control-dominates the OSR entry is still in "uninitialized" state.
- if (!osr_ast_id_.IsNone()) return nullptr;
+ if (bytecode_analysis()->HasOSREntryPoint()) return nullptr;
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
Reduction early_reduction = type_hint_lowering().ReduceStoreNamedOperation(
@@ -2609,7 +2602,7 @@ Node* BytecodeGraphBuilder::TryBuildSimplifiedStoreKeyed(const Operator* op,
// TODO(mstarzinger,6112): This is a workaround for OSR loop entries being
// pruned from the graph by a soft-deopt. It can happen that a LoadIC that
// control-dominates the OSR entry is still in "uninitialized" state.
- if (!osr_ast_id_.IsNone()) return nullptr;
+ if (bytecode_analysis()->HasOSREntryPoint()) return nullptr;
Node* effect = environment()->GetEffectDependency();
Node* control = environment()->GetControlDependency();
Reduction early_reduction = type_hint_lowering().ReduceStoreKeyedOperation(
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.h b/deps/v8/src/compiler/bytecode-graph-builder.h
index 809a995dff..b963c6a197 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.h
+++ b/deps/v8/src/compiler/bytecode-graph-builder.h
@@ -29,7 +29,7 @@ class BytecodeGraphBuilder {
BytecodeGraphBuilder(
Zone* local_zone, Handle<SharedFunctionInfo> shared,
Handle<FeedbackVector> feedback_vector, BailoutId osr_ast_id,
- JSGraph* jsgraph, float invocation_frequency,
+ JSGraph* jsgraph, CallFrequency invocation_frequency,
SourcePositionTable* source_positions,
int inlining_id = SourcePosition::kNotInlined,
JSTypeHintLowering::Flags flags = JSTypeHintLowering::kNoFlags);
@@ -39,6 +39,7 @@ class BytecodeGraphBuilder {
private:
class Environment;
+ struct SubEnvironment;
void VisitBytecodes(bool stack_check);
@@ -90,11 +91,16 @@ class BytecodeGraphBuilder {
// Helpers to create new control nodes.
Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
+ Node* NewIfValue(int32_t value) { return NewNode(common()->IfValue(value)); }
+ Node* NewIfDefault() { return NewNode(common()->IfDefault()); }
Node* NewMerge() { return NewNode(common()->Merge(1), true); }
Node* NewLoop() { return NewNode(common()->Loop(1), true); }
Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone) {
return NewNode(common()->Branch(hint), condition);
}
+ Node* NewSwitch(Node* condition, int control_output_count) {
+ return NewNode(common()->Switch(control_output_count), condition);
+ }
// Creates a new Phi node having {count} input values.
Node* NewPhi(int count, Node* input, Node* control);
@@ -206,7 +212,7 @@ class BytecodeGraphBuilder {
// Helper function to compute call frequency from the recorded type
// feedback.
- float ComputeCallFrequency(int slot_id) const;
+ CallFrequency ComputeCallFrequency(int slot_id) const;
// Control flow plumbing.
void BuildJump();
@@ -221,6 +227,8 @@ class BytecodeGraphBuilder {
void BuildJumpIfNotHole();
void BuildJumpIfJSReceiver();
+ void BuildSwitchOnSmi(Node* condition);
+
// Simulates control flow by forward-propagating environments.
void MergeIntoSuccessorEnvironment(int target_offset);
void BuildLoopHeaderEnvironment(int current_offset);
@@ -315,7 +323,7 @@ class BytecodeGraphBuilder {
Zone* local_zone_;
JSGraph* jsgraph_;
- float const invocation_frequency_;
+ CallFrequency const invocation_frequency_;
Handle<BytecodeArray> bytecode_array_;
Handle<HandlerTable> exception_handler_table_;
Handle<FeedbackVector> feedback_vector_;
@@ -325,7 +333,6 @@ class BytecodeGraphBuilder {
const BytecodeAnalysis* bytecode_analysis_;
Environment* environment_;
BailoutId osr_ast_id_;
- int osr_loop_offset_;
// Merge environments are snapshots of the environment at points where the
// control flow merges. This models a forward data flow propagation of all
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index e4795ad0b2..d8fc12624d 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -155,6 +155,8 @@ LinkageLocation regloc(Register reg, MachineType type) {
// General code uses the above configuration data.
CallDescriptor* Linkage::GetSimplifiedCDescriptor(
Zone* zone, const MachineSignature* msig, bool set_initialize_root_flag) {
+ DCHECK_LE(msig->parameter_count(), static_cast<size_t>(kMaxCParameters));
+
LocationSignature::Builder locations(zone, msig->return_count(),
msig->parameter_count());
// Check the types of the signature.
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 1bde4c6a4c..19bb76b125 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -639,7 +639,7 @@ Node* CodeAssembler::CallStubR(const CallInterfaceDescriptor& descriptor,
#define INSTANTIATE(...) \
template V8_EXPORT_PRIVATE Node* CodeAssembler::CallStubR( \
const CallInterfaceDescriptor& descriptor, size_t, Node*, __VA_ARGS__);
-REPEAT_1_TO_8(INSTANTIATE, Node*)
+REPEAT_1_TO_11(INSTANTIATE, Node*)
#undef INSTANTIATE
Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
@@ -709,6 +709,13 @@ Node* CodeAssembler::CallCFunctionN(Signature<MachineType>* signature,
return raw_assembler()->CallN(desc, input_count, inputs);
}
+Node* CodeAssembler::CallCFunction1(MachineType return_type,
+ MachineType arg0_type, Node* function,
+ Node* arg0) {
+ return raw_assembler()->CallCFunction1(return_type, arg0_type, function,
+ arg0);
+}
+
Node* CodeAssembler::CallCFunction2(MachineType return_type,
MachineType arg0_type,
MachineType arg1_type, Node* function,
@@ -726,6 +733,28 @@ Node* CodeAssembler::CallCFunction3(MachineType return_type,
arg2_type, function, arg0, arg1, arg2);
}
+Node* CodeAssembler::CallCFunction6(
+ MachineType return_type, MachineType arg0_type, MachineType arg1_type,
+ MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, Node* function, Node* arg0, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5) {
+ return raw_assembler()->CallCFunction6(
+ return_type, arg0_type, arg1_type, arg2_type, arg3_type, arg4_type,
+ arg5_type, function, arg0, arg1, arg2, arg3, arg4, arg5);
+}
+
+Node* CodeAssembler::CallCFunction9(
+ MachineType return_type, MachineType arg0_type, MachineType arg1_type,
+ MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, MachineType arg6_type, MachineType arg7_type,
+ MachineType arg8_type, Node* function, Node* arg0, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5, Node* arg6, Node* arg7, Node* arg8) {
+ return raw_assembler()->CallCFunction9(
+ return_type, arg0_type, arg1_type, arg2_type, arg3_type, arg4_type,
+ arg5_type, arg6_type, arg7_type, arg8_type, function, arg0, arg1, arg2,
+ arg3, arg4, arg5, arg6, arg7, arg8);
+}
+
void CodeAssembler::Goto(Label* label) {
label->MergeVariables();
raw_assembler()->Goto(label->label_);
@@ -967,7 +996,13 @@ void CodeAssemblerLabel::MergeVariables() {
#if DEBUG
void CodeAssemblerLabel::Bind(AssemblerDebugInfo debug_info) {
- DCHECK(!bound_);
+ if (bound_) {
+ std::stringstream str;
+ str << "Cannot bind the same label twice:"
+ << "\n# current: " << debug_info
+ << "\n# previous: " << *label_->block();
+ FATAL(str.str().c_str());
+ }
state_->raw_assembler_->Bind(label_, debug_info);
UpdateVariablesAfterBind();
}
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 86275ee0a0..1f2e4d8f4f 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -407,6 +407,10 @@ class V8_EXPORT_PRIVATE CodeAssembler {
Node* CallCFunctionN(Signature<MachineType>* signature, int input_count,
Node* const* inputs);
+ // Call to a C function with one argument.
+ Node* CallCFunction1(MachineType return_type, MachineType arg0_type,
+ Node* function, Node* arg0);
+
// Call to a C function with two arguments.
Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, Node* function, Node* arg0,
@@ -417,6 +421,24 @@ class V8_EXPORT_PRIVATE CodeAssembler {
MachineType arg1_type, MachineType arg2_type,
Node* function, Node* arg0, Node* arg1, Node* arg2);
+ // Call to a C function with six arguments.
+ Node* CallCFunction6(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, MachineType arg2_type,
+ MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, Node* function, Node* arg0,
+ Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+ Node* arg5);
+
+ // Call to a C function with nine arguments.
+ Node* CallCFunction9(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, MachineType arg2_type,
+ MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, MachineType arg6_type,
+ MachineType arg7_type, MachineType arg8_type,
+ Node* function, Node* arg0, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5, Node* arg6,
+ Node* arg7, Node* arg8);
+
// Exception handling support.
void GotoIfException(Node* node, Label* if_exception,
Variable* exception_var = nullptr);
diff --git a/deps/v8/src/compiler/code-generator.cc b/deps/v8/src/compiler/code-generator.cc
index 3723a98ebe..66232aa06f 100644
--- a/deps/v8/src/compiler/code-generator.cc
+++ b/deps/v8/src/compiler/code-generator.cc
@@ -60,7 +60,8 @@ CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
osr_pc_offset_(-1),
optimized_out_literal_id_(-1),
source_position_table_builder_(code->zone(),
- info->SourcePositionRecordingMode()) {
+ info->SourcePositionRecordingMode()),
+ result_(kSuccess) {
for (int i = 0; i < code->InstructionBlockCount(); ++i) {
new (&labels_[i]) Label;
}
@@ -74,8 +75,7 @@ void CodeGenerator::CreateFrameAccessState(Frame* frame) {
frame_access_state_ = new (code()->zone()) FrameAccessState(frame);
}
-
-Handle<Code> CodeGenerator::GenerateCode() {
+void CodeGenerator::AssembleCode() {
CompilationInfo* info = this->info();
// Open a frame scope to indicate that there is a frame on the stack. The
@@ -99,8 +99,9 @@ Handle<Code> CodeGenerator::GenerateCode() {
DCHECK_EQ(0u, deoptimization_literals_.size());
for (CompilationInfo::InlinedFunctionHolder& inlined :
info->inlined_functions()) {
- if (!inlined.shared_info.is_identical_to(info->shared_info())) {
- int index = DefineDeoptimizationLiteral(inlined.shared_info);
+ if (!inlined.shared_info.equals(info->shared_info())) {
+ int index = DefineDeoptimizationLiteral(
+ DeoptimizationLiteral(inlined.shared_info));
inlined.RegisterInlinedFunctionId(index);
}
}
@@ -110,8 +111,9 @@ Handle<Code> CodeGenerator::GenerateCode() {
// functions. This ensures unoptimized code is kept alive by optimized code.
for (const CompilationInfo::InlinedFunctionHolder& inlined :
info->inlined_functions()) {
- if (!inlined.shared_info.is_identical_to(info->shared_info())) {
- DefineDeoptimizationLiteral(inlined.inlined_code_object_root);
+ if (!inlined.shared_info.equals(info->shared_info())) {
+ DefineDeoptimizationLiteral(
+ DeoptimizationLiteral(inlined.inlined_code_object_root));
}
}
@@ -173,14 +175,13 @@ Handle<Code> CodeGenerator::GenerateCode() {
}
}
- CodeGenResult result;
if (FLAG_enable_embedded_constant_pool && !block->needs_frame()) {
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
- result = AssembleBlock(block);
+ result_ = AssembleBlock(block);
} else {
- result = AssembleBlock(block);
+ result_ = AssembleBlock(block);
}
- if (result != kSuccess) return Handle<Code>();
+ if (result_ != kSuccess) return;
unwinding_info_writer_.EndInstructionBlock(block);
}
}
@@ -226,9 +227,15 @@ Handle<Code> CodeGenerator::GenerateCode() {
unwinding_info_writer_.Finish(masm()->pc_offset());
safepoints()->Emit(masm(), frame()->GetTotalFrameSlotCount());
+ result_ = kSuccess;
+}
+
+Handle<Code> CodeGenerator::FinalizeCode() {
+ if (result_ != kSuccess) return Handle<Code>();
Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue(
- masm(), unwinding_info_writer_.eh_frame_writer(), info, Handle<Object>());
+ masm(), unwinding_info_writer_.eh_frame_writer(), info(),
+ Handle<Object>());
result->set_is_turbofanned(true);
result->set_stack_slots(frame()->GetTotalFrameSlotCount());
result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
@@ -253,7 +260,7 @@ Handle<Code> CodeGenerator::GenerateCode() {
PopulateDeoptimizationData(result);
// Ensure there is space for lazy deoptimization in the relocation info.
- if (info->ShouldEnsureSpaceForLazyDeopt()) {
+ if (info()->ShouldEnsureSpaceForLazyDeopt()) {
Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(result);
}
@@ -299,12 +306,9 @@ bool CodeGenerator::IsMaterializableFromRoot(
const CallDescriptor* incoming_descriptor =
linkage()->GetIncomingDescriptor();
if (incoming_descriptor->flags() & CallDescriptor::kCanUseRoots) {
- RootIndexMap map(isolate());
- int root_index = map.Lookup(*object);
- if (root_index != RootIndexMap::kInvalidRootIndex) {
- *index_return = static_cast<Heap::RootListIndex>(root_index);
- return true;
- }
+ Heap* heap = isolate()->heap();
+ return heap->IsRootHandle(object, index_return) &&
+ heap->RootCanBeTreatedAsConstant(*index_return);
}
return false;
}
@@ -578,13 +582,11 @@ void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(
static_cast<int>(deoptimization_literals_.size()), TENURED);
- {
- AllowDeferredHandleDereference copy_handles;
- for (unsigned i = 0; i < deoptimization_literals_.size(); i++) {
- literals->set(i, *deoptimization_literals_[i]);
- }
- data->SetLiteralArray(*literals);
+ for (unsigned i = 0; i < deoptimization_literals_.size(); i++) {
+ Handle<Object> object = deoptimization_literals_[i].Reify(isolate());
+ literals->set(i, *object);
}
+ data->SetLiteralArray(*literals);
Handle<PodArray<InliningPosition>> inl_pos = CreateInliningPositions(info);
data->SetInliningPositions(*inl_pos);
@@ -659,11 +661,10 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
}
}
-
-int CodeGenerator::DefineDeoptimizationLiteral(Handle<Object> literal) {
+int CodeGenerator::DefineDeoptimizationLiteral(DeoptimizationLiteral literal) {
int result = static_cast<int>(deoptimization_literals_.size());
for (unsigned i = 0; i < deoptimization_literals_.size(); ++i) {
- if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+ if (deoptimization_literals_[i] == literal) return i;
}
deoptimization_literals_.push_back(literal);
return result;
@@ -725,8 +726,8 @@ void CodeGenerator::TranslateStateValueDescriptor(
DCHECK(desc->IsOptimizedOut());
if (translation != nullptr) {
if (optimized_out_literal_id_ == -1) {
- optimized_out_literal_id_ =
- DefineDeoptimizationLiteral(isolate()->factory()->optimized_out());
+ optimized_out_literal_id_ = DefineDeoptimizationLiteral(
+ DeoptimizationLiteral(isolate()->factory()->optimized_out()));
}
translation->StoreLiteral(optimized_out_literal_id_);
}
@@ -793,7 +794,8 @@ void CodeGenerator::BuildTranslationForFrameStateDescriptor(
}
shared_info = info()->shared_info();
}
- int shared_info_id = DefineDeoptimizationLiteral(shared_info);
+ int shared_info_id =
+ DefineDeoptimizationLiteral(DeoptimizationLiteral(shared_info));
switch (descriptor->type()) {
case FrameStateType::kJavaScriptFunction:
@@ -909,22 +911,23 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
CHECK(op->IsImmediate());
InstructionOperandConverter converter(this, instr);
Constant constant = converter.ToConstant(op);
- Handle<Object> constant_object;
+ DeoptimizationLiteral literal;
switch (constant.type()) {
case Constant::kInt32:
if (type.representation() == MachineRepresentation::kTagged) {
// When pointers are 4 bytes, we can use int32 constants to represent
// Smis.
DCHECK_EQ(4, kPointerSize);
- constant_object =
- handle(reinterpret_cast<Smi*>(constant.ToInt32()), isolate());
- DCHECK(constant_object->IsSmi());
+ Smi* smi = reinterpret_cast<Smi*>(constant.ToInt32());
+ DCHECK(smi->IsSmi());
+ literal = DeoptimizationLiteral(smi->value());
} else if (type.representation() == MachineRepresentation::kBit) {
if (constant.ToInt32() == 0) {
- constant_object = isolate()->factory()->false_value();
+ literal =
+ DeoptimizationLiteral(isolate()->factory()->false_value());
} else {
DCHECK_EQ(1, constant.ToInt32());
- constant_object = isolate()->factory()->true_value();
+ literal = DeoptimizationLiteral(isolate()->factory()->true_value());
}
} else {
// TODO(jarin,bmeurer): We currently pass in raw pointers to the
@@ -936,11 +939,10 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
DCHECK(type.representation() != MachineRepresentation::kNone ||
constant.ToInt32() == FrameStateDescriptor::kImpossibleValue);
if (type == MachineType::Uint32()) {
- constant_object =
- isolate()->factory()->NewNumberFromUint(constant.ToInt32());
+ literal = DeoptimizationLiteral(
+ static_cast<uint32_t>(constant.ToInt32()));
} else {
- constant_object =
- isolate()->factory()->NewNumberFromInt(constant.ToInt32());
+ literal = DeoptimizationLiteral(constant.ToInt32());
}
}
break;
@@ -952,31 +954,33 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
DCHECK(type.representation() == MachineRepresentation::kWord64 ||
type.representation() == MachineRepresentation::kTagged);
DCHECK_EQ(8, kPointerSize);
- constant_object =
- handle(reinterpret_cast<Smi*>(constant.ToInt64()), isolate());
- DCHECK(constant_object->IsSmi());
+ {
+ Smi* smi = reinterpret_cast<Smi*>(constant.ToInt64());
+ DCHECK(smi->IsSmi());
+ literal = DeoptimizationLiteral(smi->value());
+ }
break;
case Constant::kFloat32:
DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
type.representation() == MachineRepresentation::kTagged);
- constant_object = isolate()->factory()->NewNumber(constant.ToFloat32());
+ literal = DeoptimizationLiteral(constant.ToFloat32());
break;
case Constant::kFloat64:
DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
type.representation() == MachineRepresentation::kTagged);
- constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
+ literal = DeoptimizationLiteral(constant.ToFloat64());
break;
case Constant::kHeapObject:
DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
- constant_object = constant.ToHeapObject();
+ literal = DeoptimizationLiteral(constant.ToHeapObject());
break;
default:
UNREACHABLE();
}
- if (constant_object.is_identical_to(info()->closure())) {
+ if (literal.object().equals(info()->closure())) {
translation->StoreJSFrameFunction();
} else {
- int literal_id = DefineDeoptimizationLiteral(constant_object);
+ int literal_id = DefineDeoptimizationLiteral(literal);
translation->StoreLiteral(literal_id);
}
}
diff --git a/deps/v8/src/compiler/code-generator.h b/deps/v8/src/compiler/code-generator.h
index b4873ff2d8..5d879a28a5 100644
--- a/deps/v8/src/compiler/code-generator.h
+++ b/deps/v8/src/compiler/code-generator.h
@@ -48,6 +48,31 @@ class InstructionOperandIterator {
size_t pos_;
};
+// Either a non-null Handle<Object> or a double.
+class DeoptimizationLiteral {
+ public:
+ DeoptimizationLiteral() : object_(), number_(0) {}
+ explicit DeoptimizationLiteral(Handle<Object> object)
+ : object_(object), number_(0) {
+ DCHECK(!object_.is_null());
+ }
+ explicit DeoptimizationLiteral(double number) : object_(), number_(number) {}
+
+ Handle<Object> object() const { return object_; }
+
+ bool operator==(const DeoptimizationLiteral& other) const {
+ return object_.equals(other.object_) &&
+ bit_cast<uint64_t>(number_) == bit_cast<uint64_t>(other.number_);
+ }
+
+ Handle<Object> Reify(Isolate* isolate) const {
+ return object_.is_null() ? isolate->factory()->NewNumber(number_) : object_;
+ }
+
+ private:
+ Handle<Object> object_;
+ double number_;
+};
// Generates native code for a sequence of instructions.
class CodeGenerator final : public GapResolver::Assembler {
@@ -55,8 +80,11 @@ class CodeGenerator final : public GapResolver::Assembler {
explicit CodeGenerator(Frame* frame, Linkage* linkage,
InstructionSequence* code, CompilationInfo* info);
- // Generate native code.
- Handle<Code> GenerateCode();
+ // Generate native code. After calling AssembleCode, call FinalizeCode to
+ // produce the actual code object. If an error occurs during either phase,
+ // FinalizeCode returns a null handle.
+ void AssembleCode();
+ Handle<Code> FinalizeCode();
InstructionSequence* code() const { return code_; }
FrameAccessState* frame_access_state() const { return frame_access_state_; }
@@ -208,7 +236,7 @@ class CodeGenerator final : public GapResolver::Assembler {
void RecordCallPosition(Instruction* instr);
void PopulateDeoptimizationData(Handle<Code> code);
- int DefineDeoptimizationLiteral(Handle<Object> literal);
+ int DefineDeoptimizationLiteral(DeoptimizationLiteral literal);
DeoptimizationEntry const& GetDeoptimizationEntry(Instruction* instr,
size_t frame_state_offset);
DeoptimizeKind GetDeoptimizationKind(int deoptimization_id) const;
@@ -283,7 +311,7 @@ class CodeGenerator final : public GapResolver::Assembler {
ZoneVector<HandlerInfo> handlers_;
ZoneDeque<DeoptimizationExit*> deoptimization_exits_;
ZoneDeque<DeoptimizationState*> deoptimization_states_;
- ZoneDeque<Handle<Object>> deoptimization_literals_;
+ ZoneDeque<DeoptimizationLiteral> deoptimization_literals_;
size_t inlined_function_count_;
TranslationBuffer translations_;
int last_lazy_deopt_pc_;
@@ -292,6 +320,7 @@ class CodeGenerator final : public GapResolver::Assembler {
int osr_pc_offset_;
int optimized_out_literal_id_;
SourcePositionTableBuilder source_position_table_builder_;
+ CodeGenResult result_;
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index 0b98d575b1..f87c0755b8 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -307,26 +307,6 @@ int OsrValueIndexOf(Operator const* op) {
return OpParameter<int>(op);
}
-size_t hash_value(OsrGuardType type) { return static_cast<size_t>(type); }
-
-std::ostream& operator<<(std::ostream& os, OsrGuardType type) {
- switch (type) {
- case OsrGuardType::kUninitialized:
- return os << "Uninitialized";
- case OsrGuardType::kSignedSmall:
- return os << "SignedSmall";
- case OsrGuardType::kAny:
- return os << "Any";
- }
- UNREACHABLE();
- return os;
-}
-
-OsrGuardType OsrGuardTypeOf(Operator const* op) {
- DCHECK_EQ(IrOpcode::kOsrGuard, op->opcode());
- return OpParameter<OsrGuardType>(op);
-}
-
SparseInputMask SparseInputMaskOf(Operator const* op) {
DCHECK(op->opcode() == IrOpcode::kStateValues ||
op->opcode() == IrOpcode::kTypedStateValues);
@@ -1010,14 +990,6 @@ const Operator* CommonOperatorBuilder::OsrValue(int index) {
index); // parameter
}
-const Operator* CommonOperatorBuilder::OsrGuard(OsrGuardType type) {
- return new (zone()) Operator1<OsrGuardType>( // --
- IrOpcode::kOsrGuard, Operator::kNoThrow, // opcode
- "OsrGuard", // name
- 1, 1, 1, 1, 1, 0, // counts
- type); // parameter
-}
-
const Operator* CommonOperatorBuilder::Int32Constant(int32_t value) {
return new (zone()) Operator1<int32_t>( // --
IrOpcode::kInt32Constant, Operator::kPure, // opcode
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index d54bcc5311..2b51a814fe 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -287,11 +287,6 @@ Type* TypeGuardTypeOf(Operator const*) WARN_UNUSED_RESULT;
int OsrValueIndexOf(Operator const*);
-enum class OsrGuardType { kUninitialized, kSignedSmall, kAny };
-size_t hash_value(OsrGuardType type);
-std::ostream& operator<<(std::ostream&, OsrGuardType);
-OsrGuardType OsrGuardTypeOf(Operator const*);
-
SparseInputMask SparseInputMaskOf(Operator const*);
ZoneVector<MachineType> const* MachineTypesOf(Operator const*)
@@ -337,7 +332,6 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* OsrNormalEntry();
const Operator* OsrLoopEntry();
const Operator* OsrValue(int index);
- const Operator* OsrGuard(OsrGuardType type);
const Operator* Int32Constant(int32_t);
const Operator* Int64Constant(int64_t);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index 0e48932c8d..6a75e8cff2 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -26,7 +26,8 @@ EffectControlLinearizer::EffectControlLinearizer(
schedule_(schedule),
temp_zone_(temp_zone),
source_positions_(source_positions),
- graph_assembler_(js_graph, nullptr, nullptr, temp_zone) {}
+ graph_assembler_(js_graph, nullptr, nullptr, temp_zone),
+ frame_state_zapper_(nullptr) {}
Graph* EffectControlLinearizer::graph() const { return js_graph_->graph(); }
CommonOperatorBuilder* EffectControlLinearizer::common() const {
@@ -429,6 +430,7 @@ void EffectControlLinearizer::Run() {
if (block_effects.For(block->PredecessorAt(i), block)
.current_frame_state != frame_state) {
frame_state = nullptr;
+ frame_state_zapper_ = graph()->end();
break;
}
}
@@ -502,6 +504,7 @@ void EffectControlLinearizer::ProcessNode(Node* node, Node** frame_state,
if (region_observability_ == RegionObservability::kObservable &&
!node->op()->HasProperty(Operator::kNoWrite)) {
*frame_state = nullptr;
+ frame_state_zapper_ = node;
}
// Remove the end markers of 'atomic' allocation region because the
@@ -681,6 +684,11 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
result = LowerCheckedFloat64ToInt32(node, frame_state);
break;
case IrOpcode::kCheckedTaggedSignedToInt32:
+ if (frame_state == nullptr) {
+ V8_Fatal(__FILE__, __LINE__, "No frame state (zapped by #%d: %s)",
+ frame_state_zapper_->id(),
+ frame_state_zapper_->op()->mnemonic());
+ }
result = LowerCheckedTaggedSignedToInt32(node, frame_state);
break;
case IrOpcode::kCheckedTaggedToInt32:
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index a1eb03cd11..bc18ff8162 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -155,6 +155,7 @@ class V8_EXPORT_PRIVATE EffectControlLinearizer {
RegionObservability region_observability_ = RegionObservability::kObservable;
SourcePositionTable* source_positions_;
GraphAssembler graph_assembler_;
+ Node* frame_state_zapper_; // For tracking down compiler::Node::New crashes.
};
} // namespace compiler
diff --git a/deps/v8/src/compiler/escape-analysis.cc b/deps/v8/src/compiler/escape-analysis.cc
index 75a73ffce9..52935e0041 100644
--- a/deps/v8/src/compiler/escape-analysis.cc
+++ b/deps/v8/src/compiler/escape-analysis.cc
@@ -168,6 +168,9 @@ class VirtualObject : public ZoneObject {
bool IsCreatedPhi(size_t offset) { return phi_[offset]; }
void SetField(size_t offset, Node* node, bool created_phi = false) {
+ TRACE(" VirtualObject(%p)[%zu] changes from #%i to #%i\n",
+ static_cast<void*>(this), offset,
+ fields_[offset] ? fields_[offset]->id() : -1, node ? node->id() : -1);
fields_[offset] = node;
phi_[offset] = created_phi;
}
@@ -234,6 +237,8 @@ class VirtualObject : public ZoneObject {
DEFINE_OPERATORS_FOR_FLAGS(VirtualObject::StatusFlags)
bool VirtualObject::UpdateFrom(const VirtualObject& other) {
+ TRACE("%p.UpdateFrom(%p)\n", static_cast<void*>(this),
+ static_cast<const void*>(&other));
bool changed = status_ != other.status_;
status_ = other.status_;
phi_ = other.phi_;
@@ -425,19 +430,6 @@ bool IsEquivalentPhi(Node* node1, Node* node2) {
return true;
}
-bool IsEquivalentPhi(Node* phi, ZoneVector<Node*>& inputs) {
- if (phi->opcode() != IrOpcode::kPhi) return false;
- if (static_cast<size_t>(phi->op()->ValueInputCount()) != inputs.size()) {
- return false;
- }
- for (size_t i = 0; i < inputs.size(); ++i) {
- Node* input = NodeProperties::GetValueInput(phi, static_cast<int>(i));
- if (!IsEquivalentPhi(input, inputs[i])) {
- return false;
- }
- }
- return true;
-}
} // namespace
bool VirtualObject::MergeFields(size_t i, Node* at, MergeCache* cache,
@@ -446,19 +438,16 @@ bool VirtualObject::MergeFields(size_t i, Node* at, MergeCache* cache,
int value_input_count = static_cast<int>(cache->fields().size());
Node* rep = GetField(i);
if (!rep || !IsCreatedPhi(i)) {
- Type* phi_type = Type::None();
for (Node* input : cache->fields()) {
CHECK_NOT_NULL(input);
CHECK(!input->IsDead());
- Type* input_type = NodeProperties::GetType(input);
- phi_type = Type::Union(phi_type, input_type, graph->zone());
}
Node* control = NodeProperties::GetControlInput(at);
cache->fields().push_back(control);
Node* phi = graph->NewNode(
common->Phi(MachineRepresentation::kTagged, value_input_count),
value_input_count + 1, &cache->fields().front());
- NodeProperties::SetType(phi, phi_type);
+ NodeProperties::SetType(phi, Type::Any());
SetField(i, phi, true);
#ifdef DEBUG
@@ -1269,6 +1258,11 @@ void EscapeAnalysis::ForwardVirtualState(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
DCHECK_NOT_NULL(virtual_states_[effect->id()]);
if (virtual_states_[node->id()]) {
+ TRACE("Updating virtual state %p at %s#%d from virtual state %p at %s#%d\n",
+ static_cast<void*>(virtual_states_[node->id()]),
+ node->op()->mnemonic(), node->id(),
+ static_cast<void*>(virtual_states_[effect->id()]),
+ effect->op()->mnemonic(), effect->id());
virtual_states_[node->id()]->UpdateFrom(virtual_states_[effect->id()],
zone());
} else {
@@ -1452,6 +1446,7 @@ bool EscapeAnalysis::CompareVirtualObjects(Node* left, Node* right) {
namespace {
+#ifdef DEBUG
bool IsOffsetForFieldAccessCorrect(const FieldAccess& access) {
#if V8_TARGET_LITTLE_ENDIAN
return (access.offset % kPointerSize) == 0;
@@ -1461,6 +1456,7 @@ bool IsOffsetForFieldAccessCorrect(const FieldAccess& access) {
kPointerSize) == 0;
#endif
}
+#endif
int OffsetForFieldAccess(Node* node) {
FieldAccess access = FieldAccessOf(node->op());
@@ -1478,48 +1474,6 @@ int OffsetForElementAccess(Node* node, int index) {
} // namespace
-void EscapeAnalysis::ProcessLoadFromPhi(int offset, Node* from, Node* load,
- VirtualState* state) {
- TRACE("Load #%d from phi #%d", load->id(), from->id());
-
- cache_->fields().clear();
- for (int i = 0; i < load->op()->ValueInputCount(); ++i) {
- Node* input = NodeProperties::GetValueInput(load, i);
- cache_->fields().push_back(input);
- }
-
- cache_->LoadVirtualObjectsForFieldsFrom(state,
- status_analysis_->GetAliasMap());
- if (cache_->objects().size() == cache_->fields().size()) {
- cache_->GetFields(offset);
- if (cache_->fields().size() == cache_->objects().size()) {
- Node* rep = replacement(load);
- if (!rep || !IsEquivalentPhi(rep, cache_->fields())) {
- int value_input_count = static_cast<int>(cache_->fields().size());
- Type* phi_type = Type::None();
- for (Node* input : cache_->fields()) {
- Type* input_type = NodeProperties::GetType(input);
- phi_type = Type::Union(phi_type, input_type, graph()->zone());
- }
- cache_->fields().push_back(NodeProperties::GetControlInput(from));
- Node* phi = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, value_input_count),
- value_input_count + 1, &cache_->fields().front());
- NodeProperties::SetType(phi, phi_type);
- status_analysis_->ResizeStatusVector();
- SetReplacement(load, phi);
- TRACE(" got phi created.\n");
- } else {
- TRACE(" has already phi #%d.\n", rep->id());
- }
- } else {
- TRACE(" has incomplete field info.\n");
- }
- } else {
- TRACE(" has incomplete virtual object info.\n");
- }
-}
-
void EscapeAnalysis::ProcessLoadField(Node* node) {
DCHECK_EQ(node->opcode(), IrOpcode::kLoadField);
ForwardVirtualState(node);
@@ -1548,11 +1502,6 @@ void EscapeAnalysis::ProcessLoadField(Node* node) {
}
// Record that the load has this alias.
UpdateReplacement(state, node, value);
- } else if (from->opcode() == IrOpcode::kPhi &&
- IsOffsetForFieldAccessCorrect(FieldAccessOf(node->op()))) {
- int offset = OffsetForFieldAccess(node);
- // Only binary phis are supported for now.
- ProcessLoadFromPhi(offset, from, node, state);
} else {
UpdateReplacement(state, node, nullptr);
}
@@ -1620,9 +1569,6 @@ void EscapeAnalysis::ProcessLoadElement(Node* node) {
}
// Record that the load has this alias.
UpdateReplacement(state, node, value);
- } else if (from->opcode() == IrOpcode::kPhi) {
- int offset = OffsetForElementAccess(node, index.Value());
- ProcessLoadFromPhi(offset, from, node, state);
} else {
UpdateReplacement(state, node, nullptr);
}
@@ -1670,8 +1616,8 @@ void EscapeAnalysis::ProcessStoreField(Node* node) {
FieldAccessOf(node->op()).offset == Name::kHashFieldOffset);
val = slot_not_analyzed_;
}
+ object = CopyForModificationAt(object, state, node);
if (object->GetField(offset) != val) {
- object = CopyForModificationAt(object, state, node);
object->SetField(offset, val);
}
}
@@ -1694,8 +1640,8 @@ void EscapeAnalysis::ProcessStoreElement(Node* node) {
int offset = OffsetForElementAccess(node, index.Value());
if (static_cast<size_t>(offset) >= object->field_count()) return;
Node* val = ResolveReplacement(NodeProperties::GetValueInput(node, 2));
+ object = CopyForModificationAt(object, state, node);
if (object->GetField(offset) != val) {
- object = CopyForModificationAt(object, state, node);
object->SetField(offset, val);
}
}
@@ -1710,8 +1656,8 @@ void EscapeAnalysis::ProcessStoreElement(Node* node) {
}
if (VirtualObject* object = GetVirtualObject(state, to)) {
if (!object->IsTracked()) return;
+ object = CopyForModificationAt(object, state, node);
if (!object->AllFieldsClear()) {
- object = CopyForModificationAt(object, state, node);
object->ClearAllFields();
TRACE("Cleared all fields of @%d:#%d\n",
status_analysis_->GetAlias(object->id()), object->id());
diff --git a/deps/v8/src/compiler/escape-analysis.h b/deps/v8/src/compiler/escape-analysis.h
index e5e8aa362a..a136e568da 100644
--- a/deps/v8/src/compiler/escape-analysis.h
+++ b/deps/v8/src/compiler/escape-analysis.h
@@ -53,8 +53,6 @@ class V8_EXPORT_PRIVATE EscapeAnalysis {
void ProcessCall(Node* node);
void ProcessStart(Node* node);
bool ProcessEffectPhi(Node* node);
- void ProcessLoadFromPhi(int offset, Node* from, Node* node,
- VirtualState* states);
void ForwardVirtualState(Node* node);
VirtualState* CopyForModificationAt(VirtualState* state, Node* node);
diff --git a/deps/v8/src/compiler/frame.h b/deps/v8/src/compiler/frame.h
index a4d6829cfa..8a6c18951a 100644
--- a/deps/v8/src/compiler/frame.h
+++ b/deps/v8/src/compiler/frame.h
@@ -111,9 +111,18 @@ class Frame : public ZoneObject {
frame_slot_count_ += count;
}
- int AllocateSpillSlot(int width) {
+ int AllocateSpillSlot(int width, int alignment = 0) {
int frame_slot_count_before = frame_slot_count_;
- AllocateAlignedFrameSlots(width);
+ if (alignment <= kPointerSize) {
+ AllocateAlignedFrameSlots(width);
+ } else {
+ // We need to allocate more place for spill slot
+ // in case we need an aligned spill slot to be
+ // able to properly align start of spill slot
+ // and still have enough place to hold all the
+ // data
+ AllocateAlignedFrameSlots(width + alignment - kPointerSize);
+ }
spill_slot_count_ += frame_slot_count_ - frame_slot_count_before;
return frame_slot_count_ - 1;
}
diff --git a/deps/v8/src/compiler/graph-reducer.cc b/deps/v8/src/compiler/graph-reducer.cc
index 117e569ad8..cf4d9154e4 100644
--- a/deps/v8/src/compiler/graph-reducer.cc
+++ b/deps/v8/src/compiler/graph-reducer.cc
@@ -56,7 +56,7 @@ void GraphReducer::ReduceNode(Node* node) {
ReduceTop();
} else if (!revisit_.empty()) {
// If the stack becomes empty, revisit any nodes in the revisit queue.
- Node* const node = revisit_.top();
+ Node* const node = revisit_.front();
revisit_.pop();
if (state_.Get(node) == State::kRevisit) {
// state can change while in queue.
@@ -146,6 +146,10 @@ void GraphReducer::ReduceTop() {
// Check if the reduction is an in-place update of the {node}.
Node* const replacement = reduction.replacement();
if (replacement == node) {
+ if (FLAG_trace_turbo_reduction) {
+ OFStream os(stdout);
+ os << "- In-place update of " << *replacement << std::endl;
+ }
// In-place update of {node}, may need to recurse on an input.
Node::Inputs node_inputs = node->inputs();
for (int i = 0; i < node_inputs.count(); ++i) {
@@ -240,8 +244,6 @@ void GraphReducer::ReplaceWithValue(Node* node, Node* value, Node* effect,
DCHECK_NOT_NULL(control);
edge.UpdateTo(control);
Revisit(user);
- // TODO(jarin) Check that the node cannot throw (otherwise, it
- // would have to be connected via IfSuccess/IfException).
}
} else if (NodeProperties::IsEffectEdge(edge)) {
DCHECK_NOT_NULL(effect);
diff --git a/deps/v8/src/compiler/graph-reducer.h b/deps/v8/src/compiler/graph-reducer.h
index b95cf9df2d..d271881872 100644
--- a/deps/v8/src/compiler/graph-reducer.h
+++ b/deps/v8/src/compiler/graph-reducer.h
@@ -174,7 +174,7 @@ class V8_EXPORT_PRIVATE GraphReducer
Node* const dead_;
NodeMarker<State> state_;
ZoneVector<Reducer*> reducers_;
- ZoneStack<Node*> revisit_;
+ ZoneQueue<Node*> revisit_;
ZoneStack<NodeState> stack_;
DISALLOW_COPY_AND_ASSIGN(GraphReducer);
diff --git a/deps/v8/src/compiler/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
index 9dbf19c3f5..dabdab3810 100644
--- a/deps/v8/src/compiler/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/ia32/code-generator-ia32.cc
@@ -780,7 +780,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Label binop; \
__ bind(&binop); \
__ mov_inst(eax, i.MemoryOperand(1)); \
- __ mov_inst(i.TempRegister(0), Operand(eax)); \
+ __ Move(i.TempRegister(0), eax); \
__ bin_inst(i.TempRegister(0), i.InputRegister(0)); \
__ lock(); \
__ cmpxchg_inst(i.MemoryOperand(1), i.TempRegister(0)); \
diff --git a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
index 6fd1ad5656..dccfced9e1 100644
--- a/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
@@ -212,6 +212,14 @@ void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
} // namespace
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int slot = frame_->AllocateSpillSlot(rep.size());
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -1864,11 +1872,7 @@ void InstructionSelector::VisitAtomicBinaryOperation(
AddressingMode addressing_mode;
InstructionOperand inputs[3];
size_t input_count = 0;
- if (type == MachineType::Int8() || type == MachineType::Uint8()) {
- inputs[input_count++] = g.UseByteRegister(value);
- } else {
- inputs[input_count++] = g.UseUniqueRegister(value);
- }
+ inputs[input_count++] = g.UseUniqueRegister(value);
inputs[input_count++] = g.UseUniqueRegister(base);
if (g.CanBeImmediate(index)) {
inputs[input_count++] = g.UseImmediate(index);
@@ -1879,7 +1883,11 @@ void InstructionSelector::VisitAtomicBinaryOperation(
}
outputs[0] = g.DefineAsFixed(node, eax);
InstructionOperand temp[1];
- temp[0] = g.TempRegister();
+ if (type == MachineType::Int8() || type == MachineType::Uint8()) {
+ temp[0] = g.UseByteRegister(node);
+ } else {
+ temp[0] = g.TempRegister();
+ }
InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
Emit(code, 1, outputs, input_count, inputs, 1, temp);
}
diff --git a/deps/v8/src/compiler/instruction-selector.cc b/deps/v8/src/compiler/instruction-selector.cc
index a9b935d5b6..1d07799511 100644
--- a/deps/v8/src/compiler/instruction-selector.cc
+++ b/deps/v8/src/compiler/instruction-selector.cc
@@ -513,6 +513,11 @@ size_t InstructionSelector::AddOperandToStateValueDescriptor(
switch (input->opcode()) {
case IrOpcode::kArgumentsElementsState: {
values->PushArgumentsElements(IsRestOf(input->op()));
+ // The elements backing store of an arguments object participates in the
+ // duplicate object counting, but can itself never appear duplicated.
+ DCHECK_EQ(StateObjectDeduplicator::kNotDuplicated,
+ deduplicator->GetObjectId(input));
+ deduplicator->InsertObject(input);
return 0;
}
case IrOpcode::kArgumentsLengthState: {
@@ -921,9 +926,14 @@ void InstructionSelector::VisitControl(BasicBlock* block) {
if (block->SuccessorCount() > 1) {
for (BasicBlock* const successor : block->successors()) {
for (Node* const node : *successor) {
- // If this CHECK fails, you might have specified merged variables
- // for a label with only one predecessor.
- CHECK(!IrOpcode::IsPhiOpcode(node->opcode()));
+ if (IrOpcode::IsPhiOpcode(node->opcode())) {
+ std::ostringstream str;
+ str << "You might have specified merged variables for a label with "
+ << "only one predecessor." << std::endl
+ << "# Current Block: " << *successor << std::endl
+ << "# Node: " << *node;
+ FATAL(str.str().c_str());
+ }
}
}
}
@@ -1500,10 +1510,12 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF32x4Neg(node);
case IrOpcode::kF32x4RecipApprox:
return MarkAsSimd128(node), VisitF32x4RecipApprox(node);
- case IrOpcode::kF32x4RecipRefine:
- return MarkAsSimd128(node), VisitF32x4RecipRefine(node);
+ case IrOpcode::kF32x4RecipSqrtApprox:
+ return MarkAsSimd128(node), VisitF32x4RecipSqrtApprox(node);
case IrOpcode::kF32x4Add:
return MarkAsSimd128(node), VisitF32x4Add(node);
+ case IrOpcode::kF32x4AddHoriz:
+ return MarkAsSimd128(node), VisitF32x4AddHoriz(node);
case IrOpcode::kF32x4Sub:
return MarkAsSimd128(node), VisitF32x4Sub(node);
case IrOpcode::kF32x4Mul:
@@ -1512,10 +1524,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitF32x4Min(node);
case IrOpcode::kF32x4Max:
return MarkAsSimd128(node), VisitF32x4Max(node);
- case IrOpcode::kF32x4RecipSqrtApprox:
- return MarkAsSimd128(node), VisitF32x4RecipSqrtApprox(node);
- case IrOpcode::kF32x4RecipSqrtRefine:
- return MarkAsSimd128(node), VisitF32x4RecipSqrtRefine(node);
case IrOpcode::kF32x4Eq:
return MarkAsSimd1x4(node), VisitF32x4Eq(node);
case IrOpcode::kF32x4Ne:
@@ -1544,6 +1552,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI32x4ShrS(node);
case IrOpcode::kI32x4Add:
return MarkAsSimd128(node), VisitI32x4Add(node);
+ case IrOpcode::kI32x4AddHoriz:
+ return MarkAsSimd128(node), VisitI32x4AddHoriz(node);
case IrOpcode::kI32x4Sub:
return MarkAsSimd128(node), VisitI32x4Sub(node);
case IrOpcode::kI32x4Mul:
@@ -1598,6 +1608,8 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitI16x8Add(node);
case IrOpcode::kI16x8AddSaturateS:
return MarkAsSimd128(node), VisitI16x8AddSaturateS(node);
+ case IrOpcode::kI16x8AddHoriz:
+ return MarkAsSimd128(node), VisitI16x8AddHoriz(node);
case IrOpcode::kI16x8Sub:
return MarkAsSimd128(node), VisitI16x8Sub(node);
case IrOpcode::kI16x8SubSaturateS:
@@ -1698,10 +1710,16 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsSimd128(node), VisitS128Xor(node);
case IrOpcode::kS128Not:
return MarkAsSimd128(node), VisitS128Not(node);
+ case IrOpcode::kS32x4Shuffle:
+ return MarkAsSimd128(node), VisitS32x4Shuffle(node);
case IrOpcode::kS32x4Select:
return MarkAsSimd128(node), VisitS32x4Select(node);
+ case IrOpcode::kS16x8Shuffle:
+ return MarkAsSimd128(node), VisitS16x8Shuffle(node);
case IrOpcode::kS16x8Select:
return MarkAsSimd128(node), VisitS16x8Select(node);
+ case IrOpcode::kS8x16Shuffle:
+ return MarkAsSimd128(node), VisitS8x16Shuffle(node);
case IrOpcode::kS8x16Select:
return MarkAsSimd128(node), VisitS8x16Select(node);
case IrOpcode::kS1x4Zero:
@@ -1887,14 +1905,6 @@ void InstructionSelector::EmitLookupSwitch(const SwitchInfo& sw,
Emit(kArchLookupSwitch, 0, nullptr, input_count, inputs, 0, nullptr);
}
-void InstructionSelector::VisitStackSlot(Node* node) {
- int size = StackSlotSizeOf(node->op());
- int slot = frame_->AllocateSpillSlot(size);
- OperandGenerator g(this);
-
- Emit(kArchStackSlot, g.DefineAsRegister(node),
- sequence()->AddImmediate(Constant(slot)), 0, nullptr);
-}
void InstructionSelector::VisitBitcastTaggedToWord(Node* node) {
EmitIdentity(node);
@@ -2088,9 +2098,7 @@ void InstructionSelector::VisitF32x4SConvertI32x4(Node* node) {
void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
-#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitF32x4Abs(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Neg(Node* node) { UNIMPLEMENTED(); }
@@ -2099,12 +2107,14 @@ void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitF32x4RecipSqrtRefine(Node* node) {
- UNIMPLEMENTED();
-}
-
void InstructionSelector::VisitF32x4Add(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitF32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM
+
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitF32x4Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Mul(Node* node) { UNIMPLEMENTED(); }
@@ -2115,8 +2125,6 @@ void InstructionSelector::VisitF32x4Min(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4RecipApprox(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitF32x4RecipRefine(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitF32x4Eq(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
@@ -2124,7 +2132,7 @@ void InstructionSelector::VisitF32x4Ne(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Lt(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Le(Node* node) { UNIMPLEMENTED(); }
-#endif // V8_TARGET_ARCH_ARM
+#endif // V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_IA32 && \
!V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
@@ -2164,11 +2172,21 @@ void InstructionSelector::VisitI32x4ShrU(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
// !V8_TARGET_ARCH_MIPS64
-#if !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_X64
+void InstructionSelector::VisitI32x4AddHoriz(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_X64
+
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
UNIMPLEMENTED();
}
+void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
+ UNIMPLEMENTED();
+}
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+
+#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI32x4SConvertI16x8Low(Node* node) {
UNIMPLEMENTED();
}
@@ -2177,51 +2195,52 @@ void InstructionSelector::VisitI32x4SConvertI16x8High(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI32x4LtS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4UConvertI16x8Low(Node* node) {
+ UNIMPLEMENTED();
+}
-void InstructionSelector::VisitI32x4LeS(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI32x4UConvertI16x8High(Node* node) {
+ UNIMPLEMENTED();
+}
-void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
+void InstructionSelector::VisitI16x8SConvertI8x16Low(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI32x4UConvertI16x8Low(Node* node) {
+void InstructionSelector::VisitI16x8SConvertI8x16High(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI32x4UConvertI16x8High(Node* node) {
+void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
+#endif // !V8_TARGET_ARCH_ARM
+
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+void InstructionSelector::VisitI32x4Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4LtS(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI32x4LeS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4LtU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI32x4LeU(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8SConvertI8x16Low(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8SConvertI8x16High(Node* node) {
- UNIMPLEMENTED();
-}
-
-void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI16x8Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8ShrS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8SConvertI32x4(Node* node) {
- UNIMPLEMENTED();
-}
+void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8Add(Node* node) { UNIMPLEMENTED(); }
@@ -2234,7 +2253,15 @@ void InstructionSelector::VisitI16x8Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
UNIMPLEMENTED();
}
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
+ // !V8_TARGET_ARCH_MIPS64
+
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitI16x8AddHoriz(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitI16x8Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8MinS(Node* node) { UNIMPLEMENTED(); }
@@ -2245,70 +2272,85 @@ void InstructionSelector::VisitI16x8Eq(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8Ne(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8LtS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8LeS(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
+void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
+void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI16x8ShrU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
+ // !V8_TARGET_ARCH_MIPS64
+
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+void InstructionSelector::VisitI16x8Neg(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+
+#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
+void InstructionSelector::VisitI16x8UConvertI8x16Low(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
+void InstructionSelector::VisitI16x8UConvertI8x16High(Node* node) {
UNIMPLEMENTED();
}
+#endif // !V8_TARGET_ARCH_ARM
-void InstructionSelector::VisitI16x8MinU(Node* node) { UNIMPLEMENTED(); }
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+void InstructionSelector::VisitI16x8LtS(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI16x8MaxU(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI16x8LeS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8LtU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI16x8LeU(Node* node) { UNIMPLEMENTED(); }
-void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16ExtractLane(Node* node) { UNIMPLEMENTED(); }
-
-void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI8x16Neg(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16Shl(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16ShrS(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64
+
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
+void InstructionSelector::VisitI8x16Splat(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitI8x16ExtractLane(Node* node) { UNIMPLEMENTED(); }
+void InstructionSelector::VisitI8x16ReplaceLane(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
+ // !V8_TARGET_ARCH_MIPS64
+
+#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI8x16SConvertI16x8(Node* node) {
UNIMPLEMENTED();
}
+#endif // !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI8x16Add(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16AddSaturateS(Node* node) {
UNIMPLEMENTED();
}
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI8x16Sub(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16SubSaturateS(Node* node) {
UNIMPLEMENTED();
}
-void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
-
void InstructionSelector::VisitI8x16MinS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
@@ -2316,6 +2358,10 @@ void InstructionSelector::VisitI8x16MaxS(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16Eq(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16Ne(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+
+#if !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitI8x16Mul(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16LtS(Node* node) { UNIMPLEMENTED(); }
@@ -2326,7 +2372,9 @@ void InstructionSelector::VisitI8x16ShrU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
UNIMPLEMENTED();
}
+#endif // !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI8x16AddSaturateU(Node* node) {
UNIMPLEMENTED();
}
@@ -2338,11 +2386,15 @@ void InstructionSelector::VisitI8x16SubSaturateU(Node* node) {
void InstructionSelector::VisitI8x16MinU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16MaxU(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitI8x16LtU(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI8x16LeU(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_ARM
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitS128And(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
@@ -2350,7 +2402,7 @@ void InstructionSelector::VisitS128Or(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS128Xor(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS128Not(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_ARM
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
!V8_TARGET_ARCH_MIPS64
@@ -2371,10 +2423,30 @@ void InstructionSelector::VisitS32x4Select(Node* node) { UNIMPLEMENTED(); }
// !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitS32x4Shuffle(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitS16x8Shuffle(Node* node) { UNIMPLEMENTED(); }
+
+#endif // !V8_TARGET_ARCH_ARM
+
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitS16x8Select(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
+ // !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitS8x16Shuffle(Node* node) { UNIMPLEMENTED(); }
+
+#endif // !V8_TARGET_ARCH_ARM
+
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \
+ !V8_TARGET_ARCH_MIPS64
void InstructionSelector::VisitS8x16Select(Node* node) { UNIMPLEMENTED(); }
+#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS &&
+ // !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_ARM
void InstructionSelector::VisitS1x4And(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitS1x4Or(Node* node) { UNIMPLEMENTED(); }
diff --git a/deps/v8/src/compiler/instruction.h b/deps/v8/src/compiler/instruction.h
index bbcd03d3ec..5cb28627de 100644
--- a/deps/v8/src/compiler/instruction.h
+++ b/deps/v8/src/compiler/instruction.h
@@ -1109,12 +1109,12 @@ class V8_EXPORT_PRIVATE Constant final {
private:
Type type_;
- int64_t value_;
#if V8_TARGET_ARCH_32_BIT
RelocInfo::Mode rmode_ = RelocInfo::NONE32;
#else
RelocInfo::Mode rmode_ = RelocInfo::NONE64;
#endif
+ int64_t value_;
};
diff --git a/deps/v8/src/compiler/js-builtin-reducer.cc b/deps/v8/src/compiler/js-builtin-reducer.cc
index bea8f18b63..9ca0c63eb9 100644
--- a/deps/v8/src/compiler/js-builtin-reducer.cc
+++ b/deps/v8/src/compiler/js-builtin-reducer.cc
@@ -114,8 +114,10 @@ MaybeHandle<Map> GetMapWitness(Node* node) {
ZoneHandleSet<Map> maps;
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
- if (NodeProperties::InferReceiverMaps(receiver, effect, &maps)) {
- if (maps.size() == 1) return MaybeHandle<Map>(maps[0]);
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &maps);
+ if (result == NodeProperties::kReliableReceiverMaps && maps.size() == 1) {
+ return maps[0];
}
return MaybeHandle<Map>();
}
@@ -734,11 +736,23 @@ Reduction JSBuiltinReducer::ReduceArrayIsArray(Node* node) {
return Replace(value);
}
Node* value = NodeProperties::GetValueInput(node, 2);
+ Type* value_type = NodeProperties::GetType(value);
Node* context = NodeProperties::GetContextInput(node);
Node* frame_state = NodeProperties::GetFrameStateInput(node);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
+ // Constant-fold based on {value} type.
+ if (value_type->Is(Type::Array())) {
+ Node* value = jsgraph()->TrueConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ } else if (!value_type->Maybe(Type::ArrayOrProxy())) {
+ Node* value = jsgraph()->FalseConstant();
+ ReplaceWithValue(node, value);
+ return Replace(value);
+ }
+
int count = 0;
Node* values[5];
Node* effects[5];
@@ -829,11 +843,11 @@ Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
Node* receiver = NodeProperties::GetValueInput(node, 1);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
- // TODO(turbofan): Extend this to also handle fast (holey) double elements
+ // TODO(turbofan): Extend this to also handle fast holey double elements
// once we got the hole NaN mess sorted out in TurboFan/V8.
if (GetMapWitness(node).ToHandle(&receiver_map) &&
CanInlineArrayResizeOperation(receiver_map) &&
- IsFastSmiOrObjectElementsKind(receiver_map->elements_kind())) {
+ receiver_map->elements_kind() != FAST_HOLEY_DOUBLE_ELEMENTS) {
// Install code dependencies on the {receiver} prototype maps and the
// global array protector cell.
dependencies()->AssumePropertyCell(factory()->array_protector());
@@ -859,15 +873,20 @@ Reduction JSBuiltinReducer::ReduceArrayPop(Node* node) {
Node* efalse = effect;
Node* vfalse;
{
+ // TODO(tebbi): We should trim the backing store if the capacity is too
+ // big, as implemented in elements.cc:ElementsAccessorBase::SetLengthImpl.
+
// Load the elements backing store from the {receiver}.
Node* elements = efalse = graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
receiver, efalse, if_false);
// Ensure that we aren't popping from a copy-on-write backing store.
- elements = efalse =
- graph()->NewNode(simplified()->EnsureWritableFastElements(), receiver,
- elements, efalse, if_false);
+ if (IsFastSmiOrObjectElementsKind(receiver_map->elements_kind())) {
+ elements = efalse =
+ graph()->NewNode(simplified()->EnsureWritableFastElements(),
+ receiver, elements, efalse, if_false);
+ }
// Compute the new {length}.
length = graph()->NewNode(simplified()->NumberSubtract(), length,
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index f0febc4d26..1e1d3a92ab 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -12,6 +12,7 @@
#include "src/compiler/node-matchers.h"
#include "src/compiler/simplified-operator.h"
#include "src/feedback-vector-inl.h"
+#include "src/ic/call-optimization.h"
#include "src/objects-inl.h"
namespace v8 {
@@ -123,9 +124,16 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
Node* arg_array = NodeProperties::GetValueInput(node, 3);
if (arg_array->opcode() != IrOpcode::kJSCreateArguments) return NoChange();
for (Edge edge : arg_array->use_edges()) {
- if (edge.from()->opcode() == IrOpcode::kStateValues) continue;
+ Node* const user = edge.from();
+ if (user == node) continue;
+ // Ignore uses as frame state's locals or parameters.
+ if (user->opcode() == IrOpcode::kStateValues) continue;
+ // Ignore uses as frame state's accumulator.
+ if (user->opcode() == IrOpcode::kFrameState &&
+ user->InputAt(2) == arg_array) {
+ continue;
+ }
if (!NodeProperties::IsValueEdge(edge)) continue;
- if (edge.from() == node) continue;
return NoChange();
}
// Check if the arguments can be handled in the fast case (i.e. we don't
@@ -165,7 +173,7 @@ Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
node->RemoveInput(0); // Function.prototype.apply
node->RemoveInput(2); // arguments
NodeProperties::ChangeOp(node, javascript()->CallForwardVarargs(
- start_index, p.tail_call_mode()));
+ 2, start_index, p.tail_call_mode()));
return Changed(node);
}
// Get to the actual frame state from which to extract the arguments;
@@ -272,94 +280,41 @@ Reduction JSCallReducer::ReduceFunctionPrototypeHasInstance(Node* node) {
return Changed(node);
}
-namespace {
-
-bool CanInlineApiCall(Isolate* isolate, Node* node,
- Handle<FunctionTemplateInfo> function_template_info) {
- DCHECK(node->opcode() == IrOpcode::kJSCall);
- if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
- if (function_template_info->call_code()->IsUndefined(isolate)) {
- return false;
- }
- CallParameters const& params = CallParametersOf(node->op());
- // CallApiCallbackStub expects the target in a register, so we count it out,
- // and counts the receiver as an implicit argument, so we count the receiver
- // out too.
- int const argc = static_cast<int>(params.arity()) - 2;
- if (argc > CallApiCallbackStub::kArgMax || !params.feedback().IsValid()) {
- return false;
- }
- HeapObjectMatcher receiver(NodeProperties::GetValueInput(node, 1));
- if (!receiver.HasValue()) {
- return false;
- }
- return receiver.Value()->IsUndefined(isolate) ||
- (receiver.Value()->map()->IsJSObjectMap() &&
- !receiver.Value()->map()->is_access_check_needed());
-}
-
-} // namespace
-
-JSCallReducer::HolderLookup JSCallReducer::LookupHolder(
- Handle<JSObject> object,
- Handle<FunctionTemplateInfo> function_template_info,
- Handle<JSObject>* holder) {
- DCHECK(object->map()->IsJSObjectMap());
- Handle<Map> object_map(object->map());
- Handle<FunctionTemplateInfo> expected_receiver_type;
- if (!function_template_info->signature()->IsUndefined(isolate())) {
- expected_receiver_type =
- handle(FunctionTemplateInfo::cast(function_template_info->signature()));
- }
- if (expected_receiver_type.is_null() ||
- expected_receiver_type->IsTemplateFor(*object_map)) {
- *holder = Handle<JSObject>::null();
- return kHolderIsReceiver;
- }
- while (object_map->has_hidden_prototype()) {
- Handle<JSObject> prototype(JSObject::cast(object_map->prototype()));
- object_map = handle(prototype->map());
- if (expected_receiver_type->IsTemplateFor(*object_map)) {
- *holder = prototype;
- return kHolderFound;
- }
- }
- return kHolderNotFound;
-}
-
-// ES6 section B.2.2.1.1 get Object.prototype.__proto__
-Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
- DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
- Node* receiver = NodeProperties::GetValueInput(node, 1);
+Reduction JSCallReducer::ReduceObjectGetPrototype(Node* node, Node* object) {
Node* effect = NodeProperties::GetEffectInput(node);
- // Try to determine the {receiver} map.
- ZoneHandleSet<Map> receiver_maps;
+ // Try to determine the {object} map.
+ ZoneHandleSet<Map> object_maps;
NodeProperties::InferReceiverMapsResult result =
- NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ NodeProperties::InferReceiverMaps(object, effect, &object_maps);
if (result != NodeProperties::kNoReceiverMaps) {
Handle<Map> candidate_map(
- receiver_maps[0]->GetPrototypeChainRootMap(isolate()));
+ object_maps[0]->GetPrototypeChainRootMap(isolate()));
Handle<Object> candidate_prototype(candidate_map->prototype(), isolate());
+ // We cannot deal with primitives here.
+ if (candidate_map->IsPrimitiveMap()) return NoChange();
+
// Check if we can constant-fold the {candidate_prototype}.
- for (size_t i = 0; i < receiver_maps.size(); ++i) {
- Handle<Map> const receiver_map(
- receiver_maps[i]->GetPrototypeChainRootMap(isolate()));
- if (receiver_map->IsJSProxyMap() ||
- receiver_map->has_hidden_prototype() ||
- receiver_map->is_access_check_needed() ||
- receiver_map->prototype() != *candidate_prototype) {
+ for (size_t i = 0; i < object_maps.size(); ++i) {
+ Handle<Map> const object_map(
+ object_maps[i]->GetPrototypeChainRootMap(isolate()));
+ if (object_map->IsSpecialReceiverMap() ||
+ object_map->has_hidden_prototype() ||
+ object_map->prototype() != *candidate_prototype) {
+ // We exclude special receivers, like JSProxy or API objects that
+ // might require access checks here; we also don't want to deal
+ // with hidden prototypes at this point.
return NoChange();
}
if (result == NodeProperties::kUnreliableReceiverMaps &&
- !receiver_map->is_stable()) {
+ !object_map->is_stable()) {
return NoChange();
}
}
if (result == NodeProperties::kUnreliableReceiverMaps) {
- for (size_t i = 0; i < receiver_maps.size(); ++i) {
- dependencies()->AssumeMapStable(receiver_maps[i]);
+ for (size_t i = 0; i < object_maps.size(); ++i) {
+ dependencies()->AssumeMapStable(object_maps[i]);
}
}
Node* value = jsgraph()->Constant(candidate_prototype);
@@ -370,65 +325,116 @@ Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
return NoChange();
}
+// ES6 section 19.1.2.11 Object.getPrototypeOf ( O )
+Reduction JSCallReducer::ReduceObjectGetPrototypeOf(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* object = (node->op()->ValueInputCount() >= 3)
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ return ReduceObjectGetPrototype(node, object);
+}
+
+// ES6 section B.2.2.1.1 get Object.prototype.__proto__
+Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* receiver = NodeProperties::GetValueInput(node, 1);
+ return ReduceObjectGetPrototype(node, receiver);
+}
+
+// ES6 section 26.1.7 Reflect.getPrototypeOf ( target )
+Reduction JSCallReducer::ReduceReflectGetPrototypeOf(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ Node* target = (node->op()->ValueInputCount() >= 3)
+ ? NodeProperties::GetValueInput(node, 2)
+ : jsgraph()->UndefinedConstant();
+ return ReduceObjectGetPrototype(node, target);
+}
+
Reduction JSCallReducer::ReduceCallApiFunction(
- Node* node, Node* target,
- Handle<FunctionTemplateInfo> function_template_info) {
- Isolate* isolate = this->isolate();
- CHECK(!isolate->serializer_enabled());
- HeapObjectMatcher m(target);
- DCHECK(m.HasValue() && m.Value()->IsJSFunction());
- if (!CanInlineApiCall(isolate, node, function_template_info)) {
- return NoChange();
- }
- Handle<CallHandlerInfo> call_handler_info(
- handle(CallHandlerInfo::cast(function_template_info->call_code())));
- Handle<Object> data(call_handler_info->data(), isolate);
+ Node* node, Handle<FunctionTemplateInfo> function_template_info) {
+ DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+ CallParameters const& p = CallParametersOf(node->op());
+ int const argc = static_cast<int>(p.arity()) - 2;
+ Node* receiver = (p.convert_mode() == ConvertReceiverMode::kNullOrUndefined)
+ ? jsgraph()->HeapConstant(global_proxy())
+ : NodeProperties::GetValueInput(node, 1);
+ Node* effect = NodeProperties::GetEffectInput(node);
- Node* receiver_node = NodeProperties::GetValueInput(node, 1);
- CallParameters const& params = CallParametersOf(node->op());
+ // CallApiCallbackStub expects the target in a register, so we count it out,
+ // and counts the receiver as an implicit argument, so we count the receiver
+ // out too.
+ if (argc > CallApiCallbackStub::kArgMax) return NoChange();
- Handle<HeapObject> receiver = HeapObjectMatcher(receiver_node).Value();
- bool const receiver_is_undefined = receiver->IsUndefined(isolate);
- if (receiver_is_undefined) {
- receiver = handle(Handle<JSFunction>::cast(m.Value())->global_proxy());
- } else {
- DCHECK(receiver->map()->IsJSObjectMap() &&
- !receiver->map()->is_access_check_needed());
+ // Infer the {receiver} maps, and check if we can inline the API function
+ // callback based on those.
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return NoChange();
+ for (size_t i = 0; i < receiver_maps.size(); ++i) {
+ Handle<Map> receiver_map = receiver_maps[i];
+ if (!receiver_map->IsJSObjectMap() ||
+ (!function_template_info->accept_any_receiver() &&
+ receiver_map->is_access_check_needed())) {
+ return NoChange();
+ }
+ // In case of unreliable {receiver} information, the {receiver_maps}
+ // must all be stable in order to consume the information.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ if (!receiver_map->is_stable()) return NoChange();
+ }
}
- Handle<JSObject> holder;
- HolderLookup lookup = LookupHolder(Handle<JSObject>::cast(receiver),
- function_template_info, &holder);
- if (lookup == kHolderNotFound) return NoChange();
- if (receiver_is_undefined) {
- receiver_node = jsgraph()->HeapConstant(receiver);
- NodeProperties::ReplaceValueInput(node, receiver_node, 1);
+ // See if we can constant-fold the compatible receiver checks.
+ CallOptimization call_optimization(function_template_info);
+ if (!call_optimization.is_simple_api_call()) return NoChange();
+ CallOptimization::HolderLookup lookup;
+ Handle<JSObject> api_holder =
+ call_optimization.LookupHolderOfExpectedType(receiver_maps[0], &lookup);
+ if (lookup == CallOptimization::kHolderNotFound) return NoChange();
+ for (size_t i = 1; i < receiver_maps.size(); ++i) {
+ CallOptimization::HolderLookup lookupi;
+ Handle<JSObject> holder = call_optimization.LookupHolderOfExpectedType(
+ receiver_maps[i], &lookupi);
+ if (lookup != lookupi) return NoChange();
+ if (!api_holder.is_identical_to(holder)) return NoChange();
}
- Node* holder_node =
- lookup == kHolderFound ? jsgraph()->HeapConstant(holder) : receiver_node;
-
- Zone* zone = graph()->zone();
- // Same as CanInlineApiCall: exclude the target (which goes in a register) and
- // the receiver (which is implicitly counted by CallApiCallbackStub) from the
- // arguments count.
- int const argc = static_cast<int>(params.arity() - 2);
- CallApiCallbackStub stub(isolate, argc, data->IsUndefined(isolate), false);
+
+ // Install stability dependencies for unreliable {receiver_maps}.
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ for (size_t i = 0; i < receiver_maps.size(); ++i) {
+ dependencies()->AssumeMapStable(receiver_maps[i]);
+ }
+ }
+
+ // CallApiCallbackStub's register arguments: code, target, call data, holder,
+ // function address.
+ // TODO(turbofan): Consider introducing a JSCallApiCallback operator for
+ // this and lower it during JSGenericLowering, and unify this with the
+ // JSNativeContextSpecialization::InlineApiCall method a bit.
+ Handle<CallHandlerInfo> call_handler_info(
+ CallHandlerInfo::cast(function_template_info->call_code()), isolate());
+ Handle<Object> data(call_handler_info->data(), isolate());
+ CallApiCallbackStub stub(isolate(), argc, false);
CallInterfaceDescriptor cid = stub.GetCallInterfaceDescriptor();
CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
- isolate, zone, cid,
+ isolate(), graph()->zone(), cid,
cid.GetStackParameterCount() + argc + 1 /* implicit receiver */,
CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
MachineType::AnyTagged(), 1);
ApiFunction api_function(v8::ToCData<Address>(call_handler_info->callback()));
+ Node* holder = lookup == CallOptimization::kHolderFound
+ ? jsgraph()->HeapConstant(api_holder)
+ : receiver;
ExternalReference function_reference(
- &api_function, ExternalReference::DIRECT_API_CALL, isolate);
-
- // CallApiCallbackStub's register arguments: code, target, call data, holder,
- // function address.
- node->InsertInput(zone, 0, jsgraph()->HeapConstant(stub.GetCode()));
- node->InsertInput(zone, 2, jsgraph()->Constant(data));
- node->InsertInput(zone, 3, holder_node);
- node->InsertInput(zone, 4, jsgraph()->ExternalConstant(function_reference));
+ &api_function, ExternalReference::DIRECT_API_CALL, isolate());
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(stub.GetCode()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(data));
+ node->InsertInput(graph()->zone(), 3, holder);
+ node->InsertInput(graph()->zone(), 4,
+ jsgraph()->ExternalConstant(function_reference));
+ node->ReplaceInput(5, receiver);
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
return Changed(node);
}
@@ -448,62 +454,96 @@ Reduction JSCallReducer::ReduceSpreadCall(Node* node, int arity) {
// of spread (except for value uses in frame states).
if (spread->opcode() != IrOpcode::kJSCreateArguments) return NoChange();
for (Edge edge : spread->use_edges()) {
- if (edge.from()->opcode() == IrOpcode::kStateValues) continue;
+ Node* const user = edge.from();
+ if (user == node) continue;
+ // Ignore uses as frame state's locals or parameters.
+ if (user->opcode() == IrOpcode::kStateValues) continue;
+ // Ignore uses as frame state's accumulator.
+ if (user->opcode() == IrOpcode::kFrameState && user->InputAt(2) == spread) {
+ continue;
+ }
if (!NodeProperties::IsValueEdge(edge)) continue;
- if (edge.from() == node) continue;
return NoChange();
}
// Get to the actual frame state from which to extract the arguments;
// we can only optimize this in case the {node} was already inlined into
// some other function (and same for the {spread}).
- CreateArgumentsType type = CreateArgumentsTypeOf(spread->op());
+ CreateArgumentsType const type = CreateArgumentsTypeOf(spread->op());
Node* frame_state = NodeProperties::GetFrameStateInput(spread);
- Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
- if (outer_state->opcode() != IrOpcode::kFrameState) return NoChange();
- FrameStateInfo outer_info = OpParameter<FrameStateInfo>(outer_state);
- if (outer_info.type() == FrameStateType::kArgumentsAdaptor) {
- // Need to take the parameters from the arguments adaptor.
- frame_state = outer_state;
- }
FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
int start_index = 0;
+ // Determine the formal parameter count;
+ Handle<SharedFunctionInfo> shared;
+ if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+ int formal_parameter_count = shared->internal_formal_parameter_count();
if (type == CreateArgumentsType::kMappedArguments) {
- // Mapped arguments (sloppy mode) cannot be handled if they are aliased.
- Handle<SharedFunctionInfo> shared;
- if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
- if (shared->internal_formal_parameter_count() != 0) return NoChange();
+ // Mapped arguments (sloppy mode) that are aliased can only be handled
+ // here if there's no side-effect between the {node} and the {arg_array}.
+ // TODO(turbofan): Further relax this constraint.
+ if (formal_parameter_count != 0) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ while (effect != spread) {
+ if (effect->op()->EffectInputCount() != 1 ||
+ !(effect->op()->properties() & Operator::kNoWrite)) {
+ return NoChange();
+ }
+ effect = NodeProperties::GetEffectInput(effect);
+ }
+ }
} else if (type == CreateArgumentsType::kRestParameter) {
- Handle<SharedFunctionInfo> shared;
- if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
- start_index = shared->internal_formal_parameter_count();
+ start_index = formal_parameter_count;
// Only check the array iterator protector when we have a rest object.
if (!isolate()->IsArrayIteratorLookupChainIntact()) return NoChange();
- // Add a code dependency on the array iterator protector.
- dependencies()->AssumePropertyCell(factory()->array_iterator_protector());
}
+ // Install appropriate code dependencies.
dependencies()->AssumeMapStable(
isolate()->initial_array_iterator_prototype_map());
-
+ if (type == CreateArgumentsType::kRestParameter) {
+ dependencies()->AssumePropertyCell(factory()->array_iterator_protector());
+ }
+ // Remove the spread input from the {node}.
node->RemoveInput(arity--);
-
+ // Check if are spreading to inlined arguments or to the arguments of
+ // the outermost function.
+ Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+ if (outer_state->opcode() != IrOpcode::kFrameState) {
+ Operator const* op =
+ (node->opcode() == IrOpcode::kJSCallWithSpread)
+ ? javascript()->CallForwardVarargs(arity + 1, start_index,
+ TailCallMode::kDisallow)
+ : javascript()->ConstructForwardVarargs(arity + 2, start_index);
+ NodeProperties::ChangeOp(node, op);
+ return Changed(node);
+ }
+ // Get to the actual frame state from which to extract the arguments;
+ // we can only optimize this in case the {node} was already inlined into
+ // some other function (and same for the {arg_array}).
+ FrameStateInfo outer_info = OpParameter<FrameStateInfo>(outer_state);
+ if (outer_info.type() == FrameStateType::kArgumentsAdaptor) {
+ // Need to take the parameters from the arguments adaptor.
+ frame_state = outer_state;
+ }
// Add the actual parameters to the {node}, skipping the receiver.
Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
- for (int i = start_index + 1; i < state_info.parameter_count(); ++i) {
+ for (int i = start_index + 1; i < parameters->InputCount(); ++i) {
node->InsertInput(graph()->zone(), static_cast<int>(++arity),
parameters->InputAt(i));
}
+ // TODO(turbofan): Collect call counts on spread call/construct and thread it
+ // through here.
if (node->opcode() == IrOpcode::kJSCallWithSpread) {
- NodeProperties::ChangeOp(
- node, javascript()->Call(arity + 1, 7, VectorSlotPair()));
+ NodeProperties::ChangeOp(node, javascript()->Call(arity + 1));
+ Reduction const r = ReduceJSCall(node);
+ return r.Changed() ? r : Changed(node);
} else {
- NodeProperties::ChangeOp(
- node, javascript()->Construct(arity + 2, 7, VectorSlotPair()));
+ NodeProperties::ChangeOp(node, javascript()->Construct(arity + 2));
+ Reduction const r = ReduceJSConstruct(node);
+ return r.Changed() ? r : Changed(node);
}
- return Changed(node);
}
namespace {
@@ -570,8 +610,12 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceFunctionPrototypeHasInstance(node);
case Builtins::kNumberConstructor:
return ReduceNumberConstructor(node);
+ case Builtins::kObjectGetPrototypeOf:
+ return ReduceObjectGetPrototypeOf(node);
case Builtins::kObjectPrototypeGetProto:
return ReduceObjectPrototypeGetProto(node);
+ case Builtins::kReflectGetPrototypeOf:
+ return ReduceReflectGetPrototypeOf(node);
default:
break;
}
@@ -581,10 +625,10 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceArrayConstructor(node);
}
- if (shared->IsApiFunction()) {
- return ReduceCallApiFunction(
- node, target,
- handle(FunctionTemplateInfo::cast(shared->function_data())));
+ if (!FLAG_runtime_stats && shared->IsApiFunction()) {
+ Handle<FunctionTemplateInfo> function_template_info(
+ FunctionTemplateInfo::cast(shared->function_data()), isolate());
+ return ReduceCallApiFunction(node, function_template_info);
}
} else if (m.Value()->IsJSBoundFunction()) {
Handle<JSBoundFunction> function =
@@ -835,6 +879,11 @@ Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
Factory* JSCallReducer::factory() const { return isolate()->factory(); }
+Handle<JSGlobalProxy> JSCallReducer::global_proxy() const {
+ return handle(JSGlobalProxy::cast(native_context()->global_proxy()),
+ isolate());
+}
+
CommonOperatorBuilder* JSCallReducer::common() const {
return jsgraph()->common();
}
diff --git a/deps/v8/src/compiler/js-call-reducer.h b/deps/v8/src/compiler/js-call-reducer.h
index 29ca61c100..31326084cc 100644
--- a/deps/v8/src/compiler/js-call-reducer.h
+++ b/deps/v8/src/compiler/js-call-reducer.h
@@ -41,30 +41,27 @@ class JSCallReducer final : public AdvancedReducer {
Reduction ReduceArrayConstructor(Node* node);
Reduction ReduceBooleanConstructor(Node* node);
Reduction ReduceCallApiFunction(
- Node* node, Node* target,
- Handle<FunctionTemplateInfo> function_template_info);
+ Node* node, Handle<FunctionTemplateInfo> function_template_info);
Reduction ReduceNumberConstructor(Node* node);
Reduction ReduceFunctionPrototypeApply(Node* node);
Reduction ReduceFunctionPrototypeCall(Node* node);
Reduction ReduceFunctionPrototypeHasInstance(Node* node);
+ Reduction ReduceObjectGetPrototype(Node* node, Node* object);
+ Reduction ReduceObjectGetPrototypeOf(Node* node);
Reduction ReduceObjectPrototypeGetProto(Node* node);
+ Reduction ReduceReflectGetPrototypeOf(Node* node);
Reduction ReduceSpreadCall(Node* node, int arity);
Reduction ReduceJSConstruct(Node* node);
Reduction ReduceJSConstructWithSpread(Node* node);
Reduction ReduceJSCall(Node* node);
Reduction ReduceJSCallWithSpread(Node* node);
- enum HolderLookup { kHolderNotFound, kHolderIsReceiver, kHolderFound };
-
- HolderLookup LookupHolder(Handle<JSObject> object,
- Handle<FunctionTemplateInfo> function_template_info,
- Handle<JSObject>* holder);
-
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
Isolate* isolate() const;
Factory* factory() const;
Handle<Context> native_context() const { return native_context_; }
+ Handle<JSGlobalProxy> global_proxy() const;
CommonOperatorBuilder* common() const;
JSOperatorBuilder* javascript() const;
SimplifiedOperatorBuilder* simplified() const;
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 0deb7cb38b..c9548ffd1c 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -83,6 +83,45 @@ Reduction JSContextSpecialization::SimplifyJSStoreContext(Node* node,
return Changed(node);
}
+namespace {
+
+bool IsContextParameter(Node* node) {
+ DCHECK_EQ(IrOpcode::kParameter, node->opcode());
+ Node* const start = NodeProperties::GetValueInput(node, 0);
+ DCHECK_EQ(IrOpcode::kStart, start->opcode());
+ int const index = ParameterIndexOf(node->op());
+ // The context is always the last parameter to a JavaScript function, and
+ // {Parameter} indices start at -1, so value outputs of {Start} look like
+ // this: closure, receiver, param0, ..., paramN, context.
+ return index == start->op()->ValueOutputCount() - 2;
+}
+
+// Given a context {node} and the {distance} from that context to the target
+// context (which we want to read from or store to), try to return a
+// specialization context. If successful, update {distance} to whatever
+// distance remains from the specialization context.
+MaybeHandle<Context> GetSpecializationContext(Node* node, size_t* distance,
+ Maybe<OuterContext> maybe_outer) {
+ switch (node->opcode()) {
+ case IrOpcode::kHeapConstant:
+ return Handle<Context>::cast(OpParameter<Handle<HeapObject>>(node));
+ case IrOpcode::kParameter: {
+ OuterContext outer;
+ if (maybe_outer.To(&outer) && IsContextParameter(node) &&
+ *distance >= outer.distance) {
+ *distance -= outer.distance;
+ return outer.context;
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ return MaybeHandle<Context>();
+}
+
+} // anonymous namespace
+
Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
@@ -90,14 +129,13 @@ Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
size_t depth = access.depth();
// First walk up the context chain in the graph as far as possible.
- Node* outer = NodeProperties::GetOuterContext(node, &depth);
+ Node* context = NodeProperties::GetOuterContext(node, &depth);
Handle<Context> concrete;
- if (!NodeProperties::GetSpecializationContext(outer, context())
- .ToHandle(&concrete)) {
+ if (!GetSpecializationContext(context, &depth, outer()).ToHandle(&concrete)) {
// We do not have a concrete context object, so we can only partially reduce
// the load by folding-in the outer context node.
- return SimplifyJSLoadContext(node, outer, depth);
+ return SimplifyJSLoadContext(node, context, depth);
}
// Now walk up the concrete context chain for the remaining depth.
@@ -139,14 +177,13 @@ Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) {
// First walk up the context chain in the graph until we reduce the depth to 0
// or hit a node that does not have a CreateXYZContext operator.
- Node* outer = NodeProperties::GetOuterContext(node, &depth);
+ Node* context = NodeProperties::GetOuterContext(node, &depth);
Handle<Context> concrete;
- if (!NodeProperties::GetSpecializationContext(outer, context())
- .ToHandle(&concrete)) {
+ if (!GetSpecializationContext(context, &depth, outer()).ToHandle(&concrete)) {
// We do not have a concrete context object, so we can only partially reduce
// the load by folding-in the outer context node.
- return SimplifyJSStoreContext(node, outer, depth);
+ return SimplifyJSStoreContext(node, context, depth);
}
// Now walk up the concrete context chain for the remaining depth.
diff --git a/deps/v8/src/compiler/js-context-specialization.h b/deps/v8/src/compiler/js-context-specialization.h
index a38aca80bb..0cf2bc1e54 100644
--- a/deps/v8/src/compiler/js-context-specialization.h
+++ b/deps/v8/src/compiler/js-context-specialization.h
@@ -15,17 +15,29 @@ namespace compiler {
class JSGraph;
class JSOperatorBuilder;
+// Pair of a context and its distance from some point of reference.
+struct OuterContext {
+ OuterContext() : context(), distance() {}
+ OuterContext(Handle<Context> context_, size_t distance_)
+ : context(context_), distance(distance_) {}
+ Handle<Context> context;
+ size_t distance;
+};
// Specializes a given JSGraph to a given context, potentially constant folding
// some {LoadContext} nodes or strength reducing some {StoreContext} nodes.
+// Additionally, constant-folds the function parameter if {closure} is given.
+//
+// The context can be the incoming function context or any outer context
+// thereof, as indicated by {outer}'s {distance}.
class JSContextSpecialization final : public AdvancedReducer {
public:
JSContextSpecialization(Editor* editor, JSGraph* jsgraph,
- MaybeHandle<Context> context,
+ Maybe<OuterContext> outer,
MaybeHandle<JSFunction> closure)
: AdvancedReducer(editor),
jsgraph_(jsgraph),
- context_(context),
+ outer_(outer),
closure_(closure) {}
Reduction Reduce(Node* node) final;
@@ -43,11 +55,11 @@ class JSContextSpecialization final : public AdvancedReducer {
Isolate* isolate() const;
JSOperatorBuilder* javascript() const;
JSGraph* jsgraph() const { return jsgraph_; }
- MaybeHandle<Context> context() const { return context_; }
+ Maybe<OuterContext> outer() const { return outer_; }
MaybeHandle<JSFunction> closure() const { return closure_; }
JSGraph* const jsgraph_;
- MaybeHandle<Context> context_;
+ Maybe<OuterContext> outer_;
MaybeHandle<JSFunction> closure_;
DISALLOW_COPY_AND_ASSIGN(JSContextSpecialization);
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 432b5c620b..57eedfada2 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -228,6 +228,8 @@ Reduction JSCreateLowering::Reduce(Node* node) {
return ReduceJSCreateCatchContext(node);
case IrOpcode::kJSCreateBlockContext:
return ReduceJSCreateBlockContext(node);
+ case IrOpcode::kJSCreateGeneratorObject:
+ return ReduceJSCreateGeneratorObject(node);
default:
break;
}
@@ -548,6 +550,71 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
return NoChange();
}
+Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSCreateGeneratorObject, node->opcode());
+ Node* const closure = NodeProperties::GetValueInput(node, 0);
+ Node* const receiver = NodeProperties::GetValueInput(node, 1);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Type* const closure_type = NodeProperties::GetType(closure);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ // Extract constructor and original constructor function.
+ if (closure_type->IsHeapConstant()) {
+ DCHECK(closure_type->AsHeapConstant()->Value()->IsJSFunction());
+ Handle<JSFunction> js_function =
+ Handle<JSFunction>::cast(closure_type->AsHeapConstant()->Value());
+ JSFunction::EnsureHasInitialMap(js_function);
+ Handle<Map> initial_map(js_function->initial_map());
+ initial_map->CompleteInobjectSlackTracking();
+ DCHECK(initial_map->instance_type() == JS_GENERATOR_OBJECT_TYPE ||
+ initial_map->instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE);
+
+ // Add a dependency on the {initial_map} to make sure that this code is
+ // deoptimized whenever the {initial_map} of the {original_constructor}
+ // changes.
+ dependencies()->AssumeInitialMapCantChange(initial_map);
+
+ DCHECK(js_function->shared()->HasBytecodeArray());
+ int size = js_function->shared()->bytecode_array()->register_count();
+ Node* elements = effect = AllocateElements(
+ effect, control, FAST_HOLEY_ELEMENTS, size, NOT_TENURED);
+
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(initial_map->instance_size());
+ Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
+ Node* undefined = jsgraph()->UndefinedConstant();
+ a.Store(AccessBuilder::ForMap(), initial_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), empty_fixed_array);
+ a.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
+ a.Store(AccessBuilder::ForJSGeneratorObjectContext(), context);
+ a.Store(AccessBuilder::ForJSGeneratorObjectFunction(), closure);
+ a.Store(AccessBuilder::ForJSGeneratorObjectReceiver(), receiver);
+ a.Store(AccessBuilder::ForJSGeneratorObjectInputOrDebugPos(), undefined);
+ a.Store(AccessBuilder::ForJSGeneratorObjectResumeMode(),
+ jsgraph()->Constant(JSGeneratorObject::kNext));
+ a.Store(AccessBuilder::ForJSGeneratorObjectContinuation(),
+ jsgraph()->Constant(JSGeneratorObject::kGeneratorExecuting));
+ a.Store(AccessBuilder::ForJSGeneratorObjectRegisterFile(), elements);
+
+ if (initial_map->instance_type() == JS_ASYNC_GENERATOR_OBJECT_TYPE) {
+ a.Store(AccessBuilder::ForJSAsyncGeneratorObjectQueue(), undefined);
+ a.Store(AccessBuilder::ForJSAsyncGeneratorObjectAwaitInputOrDebugPos(),
+ undefined);
+ a.Store(AccessBuilder::ForJSAsyncGeneratorObjectAwaitedPromise(),
+ undefined);
+ }
+
+ // Handle in-object properties, too.
+ for (int i = 0; i < initial_map->GetInObjectProperties(); ++i) {
+ a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
+ undefined);
+ }
+ a.FinishAndChange(node);
+ return Changed(node);
+ }
+ return NoChange();
+}
+
Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
int capacity,
Handle<AllocationSite> site) {
@@ -594,10 +661,73 @@ Reduction JSCreateLowering::ReduceNewArray(Node* node, Node* length,
return Changed(node);
}
+Reduction JSCreateLowering::ReduceNewArray(Node* node,
+ std::vector<Node*> values,
+ Handle<AllocationSite> site) {
+ DCHECK_EQ(IrOpcode::kJSCreateArray, node->opcode());
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Extract transition and tenuring feedback from the {site} and add
+ // appropriate code dependencies on the {site} if deoptimization is
+ // enabled.
+ PretenureFlag pretenure = site->GetPretenureMode();
+ ElementsKind elements_kind = site->GetElementsKind();
+ DCHECK(IsFastElementsKind(elements_kind));
+ dependencies()->AssumeTenuringDecision(site);
+ dependencies()->AssumeTransitionStable(site);
+
+ // Check {values} based on the {elements_kind}. These checks are guarded
+ // by the {elements_kind} feedback on the {site}, so it's safe to just
+ // deoptimize in this case.
+ if (IsFastSmiElementsKind(elements_kind)) {
+ for (auto& value : values) {
+ if (!NodeProperties::GetType(value)->Is(Type::SignedSmall())) {
+ value = effect =
+ graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
+ }
+ }
+ } else if (IsFastDoubleElementsKind(elements_kind)) {
+ for (auto& value : values) {
+ if (!NodeProperties::GetType(value)->Is(Type::Number())) {
+ value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
+ effect, control);
+ }
+ // Make sure we do not store signaling NaNs into double arrays.
+ value = graph()->NewNode(simplified()->NumberSilenceNaN(), value);
+ }
+ }
+
+ // Retrieve the initial map for the array.
+ int const array_map_index = Context::ArrayMapIndex(elements_kind);
+ Node* js_array_map = jsgraph()->HeapConstant(
+ handle(Map::cast(native_context()->get(array_map_index)), isolate()));
+
+ // Setup elements, properties and length.
+ Node* elements = effect =
+ AllocateElements(effect, control, elements_kind, values, pretenure);
+ Node* properties = jsgraph()->EmptyFixedArrayConstant();
+ Node* length = jsgraph()->Constant(static_cast<int>(values.size()));
+
+ // Perform the allocation of the actual JSArray object.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.Allocate(JSArray::kSize, pretenure);
+ a.Store(AccessBuilder::ForMap(), js_array_map);
+ a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+ a.Store(AccessBuilder::ForJSObjectElements(), elements);
+ a.Store(AccessBuilder::ForJSArrayLength(elements_kind), length);
+ RelaxControls(node);
+ a.FinishAndChange(node);
+ return Changed(node);
+}
+
Reduction JSCreateLowering::ReduceNewArrayToStubCall(
Node* node, Handle<AllocationSite> site) {
CreateArrayParameters const& p = CreateArrayParametersOf(node->op());
int const arity = static_cast<int>(p.arity());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Node* new_target = NodeProperties::GetValueInput(node, 1);
+ Type* new_target_type = NodeProperties::GetType(new_target);
ElementsKind elements_kind = site->GetElementsKind();
AllocationSiteOverrideMode override_mode =
@@ -605,12 +735,19 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
+ // The Array constructor can only trigger an observable side-effect
+ // if the new.target may be a proxy.
+ Operator::Properties const properties =
+ (new_target != target || new_target_type->Maybe(Type::Proxy()))
+ ? Operator::kNoDeopt
+ : Operator::kNoDeopt | Operator::kNoWrite;
+
if (arity == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), elements_kind,
override_mode);
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 1,
- CallDescriptor::kNeedsFrameState);
+ CallDescriptor::kNeedsFrameState, properties);
node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(0));
@@ -628,7 +765,7 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
override_mode);
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
- CallDescriptor::kNeedsFrameState);
+ CallDescriptor::kNeedsFrameState, properties);
node->ReplaceInput(0, jsgraph()->HeapConstant(stub.GetCode()));
node->InsertInput(graph()->zone(), 2, jsgraph()->HeapConstant(site));
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(1));
@@ -655,7 +792,7 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
override_mode);
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
- CallDescriptor::kNeedsFrameState);
+ CallDescriptor::kNeedsFrameState, properties);
Node* inputs[] = {jsgraph()->HeapConstant(stub.GetCode()),
node->InputAt(1),
@@ -678,7 +815,7 @@ Reduction JSCreateLowering::ReduceNewArrayToStubCall(
isolate(), GetHoleyElementsKind(elements_kind), override_mode);
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
isolate(), graph()->zone(), stub.GetCallInterfaceDescriptor(), 2,
- CallDescriptor::kNeedsFrameState);
+ CallDescriptor::kNeedsFrameState, properties);
Node* inputs[] = {jsgraph()->HeapConstant(stub.GetCode()),
node->InputAt(1),
@@ -745,12 +882,25 @@ Reduction JSCreateLowering::ReduceJSCreateArray(Node* node) {
} else if (p.arity() == 1) {
Node* length = NodeProperties::GetValueInput(node, 2);
Type* length_type = NodeProperties::GetType(length);
+ if (!length_type->Maybe(Type::Number())) {
+ // Handle the single argument case, where we know that the value
+ // cannot be a valid Array length.
+ return ReduceNewArray(node, {length}, site);
+ }
if (length_type->Is(Type::SignedSmall()) && length_type->Min() >= 0 &&
length_type->Max() <= kElementLoopUnrollLimit &&
length_type->Min() == length_type->Max()) {
int capacity = static_cast<int>(length_type->Max());
return ReduceNewArray(node, length, capacity, site);
}
+ } else if (p.arity() <= JSArray::kInitialMaxFastElementArray) {
+ std::vector<Node*> values;
+ values.reserve(p.arity());
+ for (size_t i = 0; i < p.arity(); ++i) {
+ values.push_back(
+ NodeProperties::GetValueInput(node, static_cast<int>(2 + i)));
+ }
+ return ReduceNewArray(node, values, site);
}
}
@@ -1115,6 +1265,31 @@ Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
return a.Finish();
}
+Node* JSCreateLowering::AllocateElements(Node* effect, Node* control,
+ ElementsKind elements_kind,
+ std::vector<Node*> const& values,
+ PretenureFlag pretenure) {
+ int const capacity = static_cast<int>(values.size());
+ DCHECK_LE(1, capacity);
+ DCHECK_LE(capacity, JSArray::kInitialMaxFastElementArray);
+
+ Handle<Map> elements_map = IsFastDoubleElementsKind(elements_kind)
+ ? factory()->fixed_double_array_map()
+ : factory()->fixed_array_map();
+ ElementAccess access = IsFastDoubleElementsKind(elements_kind)
+ ? AccessBuilder::ForFixedDoubleArrayElement()
+ : AccessBuilder::ForFixedArrayElement();
+
+ // Actually allocate the backing store.
+ AllocationBuilder a(jsgraph(), effect, control);
+ a.AllocateArray(capacity, elements_map, pretenure);
+ for (int i = 0; i < capacity; ++i) {
+ Node* index = jsgraph()->Constant(i);
+ a.Store(access, index, values[i]);
+ }
+ return a.Finish();
+}
+
Node* JSCreateLowering::AllocateFastLiteral(
Node* effect, Node* control, Handle<JSObject> boilerplate,
AllocationSiteUsageContext* site_context) {
@@ -1206,7 +1381,7 @@ Node* JSCreateLowering::AllocateFastLiteral(
// Actually allocate and initialize the object.
AllocationBuilder builder(jsgraph(), effect, control);
builder.Allocate(boilerplate_map->instance_size(), pretenure,
- Type::OtherObject());
+ Type::For(boilerplate_map));
builder.Store(AccessBuilder::ForMap(), boilerplate_map);
builder.Store(AccessBuilder::ForJSObjectProperties(), properties);
builder.Store(AccessBuilder::ForJSObjectElements(), elements);
diff --git a/deps/v8/src/compiler/js-create-lowering.h b/deps/v8/src/compiler/js-create-lowering.h
index eea75d3842..d03464d39d 100644
--- a/deps/v8/src/compiler/js-create-lowering.h
+++ b/deps/v8/src/compiler/js-create-lowering.h
@@ -57,8 +57,11 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Reduction ReduceJSCreateWithContext(Node* node);
Reduction ReduceJSCreateCatchContext(Node* node);
Reduction ReduceJSCreateBlockContext(Node* node);
+ Reduction ReduceJSCreateGeneratorObject(Node* node);
Reduction ReduceNewArray(Node* node, Node* length, int capacity,
Handle<AllocationSite> site);
+ Reduction ReduceNewArray(Node* node, std::vector<Node*> values,
+ Handle<AllocationSite> site);
Node* AllocateArguments(Node* effect, Node* control, Node* frame_state);
Node* AllocateRestArguments(Node* effect, Node* control, Node* frame_state,
@@ -69,6 +72,10 @@ class V8_EXPORT_PRIVATE JSCreateLowering final
Node* AllocateElements(Node* effect, Node* control,
ElementsKind elements_kind, int capacity,
PretenureFlag pretenure);
+ Node* AllocateElements(Node* effect, Node* control,
+ ElementsKind elements_kind,
+ std::vector<Node*> const& values,
+ PretenureFlag pretenure);
Node* AllocateFastLiteral(Node* effect, Node* control,
Handle<JSObject> boilerplate,
AllocationSiteUsageContext* site_context);
diff --git a/deps/v8/src/compiler/js-frame-specialization.cc b/deps/v8/src/compiler/js-frame-specialization.cc
index 73e1b7dd24..d4f6822de6 100644
--- a/deps/v8/src/compiler/js-frame-specialization.cc
+++ b/deps/v8/src/compiler/js-frame-specialization.cc
@@ -16,8 +16,6 @@ Reduction JSFrameSpecialization::Reduce(Node* node) {
switch (node->opcode()) {
case IrOpcode::kOsrValue:
return ReduceOsrValue(node);
- case IrOpcode::kOsrGuard:
- return ReduceOsrGuard(node);
case IrOpcode::kParameter:
return ReduceParameter(node);
default:
@@ -47,13 +45,6 @@ Reduction JSFrameSpecialization::ReduceOsrValue(Node* node) {
return Replace(jsgraph()->Constant(value));
}
-Reduction JSFrameSpecialization::ReduceOsrGuard(Node* node) {
- DCHECK_EQ(IrOpcode::kOsrGuard, node->opcode());
- ReplaceWithValue(node, node->InputAt(0),
- NodeProperties::GetEffectInput(node));
- return Changed(node);
-}
-
Reduction JSFrameSpecialization::ReduceParameter(Node* node) {
DCHECK_EQ(IrOpcode::kParameter, node->opcode());
Handle<Object> value;
diff --git a/deps/v8/src/compiler/js-frame-specialization.h b/deps/v8/src/compiler/js-frame-specialization.h
index daf699265c..f268b3ac5b 100644
--- a/deps/v8/src/compiler/js-frame-specialization.h
+++ b/deps/v8/src/compiler/js-frame-specialization.h
@@ -29,7 +29,6 @@ class JSFrameSpecialization final : public AdvancedReducer {
private:
Reduction ReduceOsrValue(Node* node);
- Reduction ReduceOsrGuard(Node* node);
Reduction ReduceParameter(Node* node);
Isolate* isolate() const;
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index 2b333c06c5..ea5a4a4627 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -309,10 +309,10 @@ void JSGenericLowering::LowerJSStoreDataPropertyInLiteral(Node* node) {
}
void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
- LanguageMode language_mode = OpParameter<LanguageMode>(node);
- ReplaceWithRuntimeCall(node, is_strict(language_mode)
- ? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kDeleteProperty);
+ ReplaceWithStubCall(node, callable, flags);
}
void JSGenericLowering::LowerJSGetSuperConstructor(Node* node) {
@@ -423,6 +423,13 @@ void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) {
}
}
+void JSGenericLowering::LowerJSCreateGeneratorObject(Node* node) {
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable =
+ Builtins::CallableFor(isolate(), Builtins::kCreateGeneratorObject);
+ node->RemoveInput(4); // control
+ ReplaceWithStubCall(node, callable, flags);
+}
void JSGenericLowering::LowerJSCreateIterResultObject(Node* node) {
ReplaceWithRuntimeCall(node, Runtime::kCreateIterResultObject);
@@ -464,8 +471,7 @@ void JSGenericLowering::LowerJSCreateLiteralObject(Node* node) {
if ((p.flags() & ObjectLiteral::kShallowProperties) != 0 &&
p.length() <=
ConstructorBuiltins::kMaximumClonedShallowObjectProperties) {
- Callable callable =
- CodeFactory::FastCloneShallowObject(isolate(), p.length());
+ Callable callable = CodeFactory::FastCloneShallowObject(isolate());
ReplaceWithStubCall(node, callable, flags);
} else {
ReplaceWithRuntimeCall(node, Runtime::kCreateObjectLiteral);
@@ -516,6 +522,28 @@ void JSGenericLowering::LowerJSCreateScriptContext(Node* node) {
ReplaceWithRuntimeCall(node, Runtime::kNewScriptContext);
}
+void JSGenericLowering::LowerJSConstructForwardVarargs(Node* node) {
+ ConstructForwardVarargsParameters p =
+ ConstructForwardVarargsParametersOf(node->op());
+ int const arg_count = static_cast<int>(p.arity() - 2);
+ CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable = CodeFactory::ConstructForwardVarargs(isolate());
+ CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+ Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* start_index = jsgraph()->Uint32Constant(p.start_index());
+ Node* new_target = node->InputAt(arg_count + 1);
+ Node* receiver = jsgraph()->UndefinedConstant();
+ node->RemoveInput(arg_count + 1); // Drop new target.
+ node->InsertInput(zone(), 0, stub_code);
+ node->InsertInput(zone(), 2, new_target);
+ node->InsertInput(zone(), 3, stub_arity);
+ node->InsertInput(zone(), 4, start_index);
+ node->InsertInput(zone(), 5, receiver);
+ NodeProperties::ChangeOp(node, common()->Call(desc));
+}
+
void JSGenericLowering::LowerJSConstruct(Node* node) {
ConstructParameters const& p = ConstructParametersOf(node->op());
int const arg_count = static_cast<int>(p.arity() - 2);
@@ -556,17 +584,20 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
CallForwardVarargsParameters p = CallForwardVarargsParametersOf(node->op());
- Callable callable = CodeFactory::CallForwardVarargs(isolate());
+ int const arg_count = static_cast<int>(p.arity() - 2);
CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+ Callable callable = CodeFactory::CallForwardVarargs(isolate());
if (p.tail_call_mode() == TailCallMode::kAllow) {
flags |= CallDescriptor::kSupportsTailCalls;
}
CallDescriptor* desc = Linkage::GetStubCallDescriptor(
- isolate(), zone(), callable.descriptor(), 1, flags);
+ isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
+ Node* stub_arity = jsgraph()->Int32Constant(arg_count);
Node* start_index = jsgraph()->Uint32Constant(p.start_index());
node->InsertInput(zone(), 0, stub_code);
- node->InsertInput(zone(), 2, start_index);
+ node->InsertInput(zone(), 2, stub_arity);
+ node->InsertInput(zone(), 3, start_index);
NodeProperties::ChangeOp(node, common()->Call(desc));
}
diff --git a/deps/v8/src/compiler/js-graph.cc b/deps/v8/src/compiler/js-graph.cc
index b51623aca2..93706acf5a 100644
--- a/deps/v8/src/compiler/js-graph.cc
+++ b/deps/v8/src/compiler/js-graph.cc
@@ -280,6 +280,14 @@ Node* JSGraph::EmptyStateValues() {
0, SparseInputMask::Dense())));
}
+Node* JSGraph::SingleDeadTypedStateValues() {
+ return CACHED(kSingleDeadTypedStateValues,
+ graph()->NewNode(common()->TypedStateValues(
+ new (graph()->zone()->New(sizeof(ZoneVector<MachineType>)))
+ ZoneVector<MachineType>(0, graph()->zone()),
+ SparseInputMask(SparseInputMask::kEndMarker << 1))));
+}
+
Node* JSGraph::Dead() {
return CACHED(kDead, graph()->NewNode(common()->Dead()));
}
diff --git a/deps/v8/src/compiler/js-graph.h b/deps/v8/src/compiler/js-graph.h
index 8f81555cb2..4b3ed4856a 100644
--- a/deps/v8/src/compiler/js-graph.h
+++ b/deps/v8/src/compiler/js-graph.h
@@ -142,6 +142,10 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
// values for a certain part of the frame state.
Node* EmptyStateValues();
+ // Typed state values with a single dead input. This is useful to represent
+ // dead accumulator.
+ Node* SingleDeadTypedStateValues();
+
// Create a control node that serves as dependency for dead nodes.
Node* Dead();
@@ -181,6 +185,7 @@ class V8_EXPORT_PRIVATE JSGraph : public NON_EXPORTED_BASE(ZoneObject) {
kOneConstant,
kNaNConstant,
kEmptyStateValues,
+ kSingleDeadTypedStateValues,
kDead,
kNumCachedNodes // Must remain last.
};
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index 9774de28e5..4335e96c61 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -65,6 +65,15 @@ bool CanInlineFunction(Handle<SharedFunctionInfo> shared) {
return true;
}
+bool IsSmallInlineFunction(Handle<SharedFunctionInfo> shared) {
+ // Don't forcibly inline functions that weren't compiled yet.
+ if (shared->ast_node_count() == 0) return false;
+
+ // Forcibly inline small functions.
+ if (shared->ast_node_count() <= FLAG_max_inlined_nodes_small) return true;
+ return false;
+}
+
} // namespace
Reduction JSInliningHeuristic::Reduce(Node* node) {
@@ -91,7 +100,7 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
}
// Functions marked with %SetForceInlineFlag are immediately inlined.
- bool can_inline = false, force_inline = true;
+ bool can_inline = false, force_inline = true, small_inline = true;
for (int i = 0; i < candidate.num_functions; ++i) {
Handle<SharedFunctionInfo> shared =
candidate.functions[i].is_null()
@@ -100,11 +109,15 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
if (!shared->force_inline()) {
force_inline = false;
}
- if (CanInlineFunction(shared)) {
+ candidate.can_inline_function[i] = CanInlineFunction(shared);
+ if (candidate.can_inline_function[i]) {
can_inline = true;
}
+ if (!IsSmallInlineFunction(shared)) {
+ small_inline = false;
+ }
}
- if (force_inline) return InlineCandidate(candidate);
+ if (force_inline) return InlineCandidate(candidate, true);
if (!can_inline) return NoChange();
// Stop inlining once the maximum allowed level is reached.
@@ -141,11 +154,27 @@ Reduction JSInliningHeuristic::Reduce(Node* node) {
case kRestrictedInlining:
return NoChange();
case kStressInlining:
- return InlineCandidate(candidate);
+ return InlineCandidate(candidate, false);
case kGeneralInlining:
break;
}
+ // Don't consider a {candidate} whose frequency is below the
+ // threshold, i.e. a call site that is only hit once every N
+ // invocations of the caller.
+ if (candidate.frequency.IsKnown() &&
+ candidate.frequency.value() < FLAG_min_inlining_frequency) {
+ return NoChange();
+ }
+
+ // Forcibly inline small functions here. In the case of polymorphic inlining
+ // small_inline is set only when all functions are small.
+ if (small_inline && cumulative_count_ <= FLAG_max_inlined_nodes_absolute) {
+ TRACE("Inlining small function(s) at call site #%d:%s\n", node->id(),
+ node->op()->mnemonic());
+ return InlineCandidate(candidate, true);
+ }
+
// In the general case we remember the candidate for later.
candidates_.insert(candidate);
return NoChange();
@@ -164,19 +193,16 @@ void JSInliningHeuristic::Finalize() {
auto i = candidates_.begin();
Candidate candidate = *i;
candidates_.erase(i);
- // Only include candidates that we've successfully called before.
- // The candidate list is sorted, so we can exit at the first occurance of
- // frequency 0 in the list.
- if (candidate.frequency <= 0.0) return;
// Make sure we don't try to inline dead candidate nodes.
if (!candidate.node->IsDead()) {
- Reduction const reduction = InlineCandidate(candidate);
+ Reduction const reduction = InlineCandidate(candidate, false);
if (reduction.Changed()) return;
}
}
}
-Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate) {
+Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate,
+ bool force_inline) {
int const num_calls = candidate.num_functions;
Node* const node = candidate.node;
if (num_calls == 1) {
@@ -268,12 +294,16 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate) {
for (int i = 0; i < num_calls; ++i) {
Handle<JSFunction> function = candidate.functions[i];
Node* node = calls[i];
- Reduction const reduction = inliner_.ReduceJSCall(node);
- if (reduction.Changed()) {
- // Killing the call node is not strictly necessary, but it is safer to
- // make sure we do not resurrect the node.
- node->Kill();
- cumulative_count_ += function->shared()->ast_node_count();
+ if (force_inline ||
+ (candidate.can_inline_function[i] &&
+ cumulative_count_ < FLAG_max_inlined_nodes_cumulative)) {
+ Reduction const reduction = inliner_.ReduceJSCall(node);
+ if (reduction.Changed()) {
+ // Killing the call node is not strictly necessary, but it is safer to
+ // make sure we do not resurrect the node.
+ node->Kill();
+ cumulative_count_ += function->shared()->ast_node_count();
+ }
}
}
@@ -282,9 +312,19 @@ Reduction JSInliningHeuristic::InlineCandidate(Candidate const& candidate) {
bool JSInliningHeuristic::CandidateCompare::operator()(
const Candidate& left, const Candidate& right) const {
- if (left.frequency > right.frequency) {
+ if (right.frequency.IsUnknown()) {
+ if (left.frequency.IsUnknown()) {
+ // If left and right are both unknown then the ordering is indeterminate,
+ // which breaks strict weak ordering requirements, so we fall back to the
+ // node id as a tie breaker.
+ return left.node->id() > right.node->id();
+ }
+ return true;
+ } else if (left.frequency.IsUnknown()) {
+ return false;
+ } else if (left.frequency.value() > right.frequency.value()) {
return true;
- } else if (left.frequency < right.frequency) {
+ } else if (left.frequency.value() < right.frequency.value()) {
return false;
} else {
return left.node->id() > right.node->id();
@@ -292,10 +332,12 @@ bool JSInliningHeuristic::CandidateCompare::operator()(
}
void JSInliningHeuristic::PrintCandidates() {
- PrintF("Candidates for inlining (size=%zu):\n", candidates_.size());
+ OFStream os(stdout);
+ os << "Candidates for inlining (size=" << candidates_.size() << "):\n";
for (const Candidate& candidate : candidates_) {
- PrintF(" #%d:%s, frequency:%g\n", candidate.node->id(),
- candidate.node->op()->mnemonic(), candidate.frequency);
+ os << " #" << candidate.node->id() << ":"
+ << candidate.node->op()->mnemonic()
+ << ", frequency: " << candidate.frequency << std::endl;
for (int i = 0; i < candidate.num_functions; ++i) {
Handle<SharedFunctionInfo> shared =
candidate.functions[i].is_null()
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.h b/deps/v8/src/compiler/js-inlining-heuristic.h
index b834cb0a06..0f5f9f87c1 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.h
+++ b/deps/v8/src/compiler/js-inlining-heuristic.h
@@ -37,14 +37,17 @@ class JSInliningHeuristic final : public AdvancedReducer {
struct Candidate {
Handle<JSFunction> functions[kMaxCallPolymorphism];
+ // In the case of polymorphic inlining, this tells if each of the
+ // functions could be inlined.
+ bool can_inline_function[kMaxCallPolymorphism];
// TODO(2206): For now polymorphic inlining is treated orthogonally to
// inlining based on SharedFunctionInfo. This should be unified and the
// above array should be switched to SharedFunctionInfo instead. Currently
// we use {num_functions == 1 && functions[0].is_null()} as an indicator.
Handle<SharedFunctionInfo> shared_info;
int num_functions;
- Node* node = nullptr; // The call site at which to inline.
- float frequency = 0.0f; // Relative frequency of this call site.
+ Node* node = nullptr; // The call site at which to inline.
+ CallFrequency frequency; // Relative frequency of this call site.
};
// Comparator for candidates.
@@ -57,7 +60,7 @@ class JSInliningHeuristic final : public AdvancedReducer {
// Dumps candidates to console.
void PrintCandidates();
- Reduction InlineCandidate(Candidate const& candidate);
+ Reduction InlineCandidate(Candidate const& candidate, bool force_inline);
CommonOperatorBuilder* common() const;
Graph* graph() const;
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index af24b703d3..9b260e3533 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -66,7 +66,7 @@ class JSCallAccessor {
return call_->op()->ValueInputCount() - 2;
}
- float frequency() const {
+ CallFrequency frequency() const {
return (call_->opcode() == IrOpcode::kJSCall)
? CallParametersOf(call_->op()).frequency()
: ConstructParametersOf(call_->op()).frequency();
@@ -335,10 +335,11 @@ bool NeedsImplicitReceiver(Handle<SharedFunctionInfo> shared_info) {
DisallowHeapAllocation no_gc;
Isolate* const isolate = shared_info->GetIsolate();
Code* const construct_stub = shared_info->construct_stub();
- return construct_stub != *isolate->builtins()->JSBuiltinsConstructStub() &&
- construct_stub !=
- *isolate->builtins()->JSBuiltinsConstructStubForDerived() &&
- construct_stub != *isolate->builtins()->JSConstructStubApi();
+ if (construct_stub == *isolate->builtins()->JSConstructStubGeneric()) {
+ return !IsDerivedConstructor(shared_info->kind());
+ } else {
+ return false;
+ }
}
bool IsNonConstructible(Handle<SharedFunctionInfo> shared_info) {
@@ -486,18 +487,6 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
return NoChange();
}
- // TODO(706642): Don't inline derived class constructors for now, as the
- // inlining logic doesn't deal properly with derived class constructors
- // that return a primitive, i.e. it's not in sync with what the Parser
- // and the JSConstructSub does.
- if (node->opcode() == IrOpcode::kJSConstruct &&
- IsDerivedConstructor(shared_info->kind())) {
- TRACE("Not inlining %s into %s because constructor is derived.\n",
- shared_info->DebugName()->ToCString().get(),
- info_->shared_info()->DebugName()->ToCString().get());
- return NoChange();
- }
-
// Class constructors are callable, but [[Call]] will raise an exception.
// See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
if (node->opcode() == IrOpcode::kJSCall &&
@@ -655,21 +644,93 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
uncaught_subcalls.push_back(create); // Adds {IfSuccess} & {IfException}.
NodeProperties::ReplaceControlInput(node, create);
NodeProperties::ReplaceEffectInput(node, create);
- // Insert a check of the return value to determine whether the return
- // value or the implicit receiver should be selected as a result of the
- // call.
- Node* check = graph()->NewNode(simplified()->ObjectIsReceiver(), node);
- Node* select =
- graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
- check, node, create);
- NodeProperties::ReplaceUses(node, select, node, node, node);
- // Fix-up inputs that have been mangled by the {ReplaceUses} call above.
- NodeProperties::ReplaceValueInput(select, node, 1); // Fix-up input.
- NodeProperties::ReplaceValueInput(check, node, 0); // Fix-up input.
+ Node* node_success =
+ NodeProperties::FindSuccessfulControlProjection(node);
+ // Placeholder to hold {node}'s value dependencies while {node} is
+ // replaced.
+ Node* dummy = graph()->NewNode(common()->Dead());
+ NodeProperties::ReplaceUses(node, dummy, node, node, node);
+ Node* result;
+ if (FLAG_harmony_restrict_constructor_return &&
+ IsClassConstructor(shared_info->kind())) {
+ Node* is_undefined =
+ graph()->NewNode(simplified()->ReferenceEqual(), node,
+ jsgraph()->UndefinedConstant());
+ Node* branch_is_undefined =
+ graph()->NewNode(common()->Branch(), is_undefined, node_success);
+ Node* branch_is_undefined_true =
+ graph()->NewNode(common()->IfTrue(), branch_is_undefined);
+ Node* branch_is_undefined_false =
+ graph()->NewNode(common()->IfFalse(), branch_is_undefined);
+ Node* is_receiver =
+ graph()->NewNode(simplified()->ObjectIsReceiver(), node);
+ Node* branch_is_receiver = graph()->NewNode(
+ common()->Branch(), is_receiver, branch_is_undefined_false);
+ Node* branch_is_receiver_true =
+ graph()->NewNode(common()->IfTrue(), branch_is_receiver);
+ Node* branch_is_receiver_false =
+ graph()->NewNode(common()->IfFalse(), branch_is_receiver);
+ branch_is_receiver_false =
+ graph()->NewNode(javascript()->CallRuntime(
+ Runtime::kThrowConstructorReturnedNonObject),
+ context, NodeProperties::GetFrameStateInput(node),
+ node, branch_is_receiver_false);
+ uncaught_subcalls.push_back(branch_is_receiver_false);
+ branch_is_receiver_false =
+ graph()->NewNode(common()->Throw(), branch_is_receiver_false,
+ branch_is_receiver_false);
+ NodeProperties::MergeControlToEnd(graph(), common(),
+ branch_is_receiver_false);
+ Node* merge =
+ graph()->NewNode(common()->Merge(2), branch_is_undefined_true,
+ branch_is_receiver_true);
+ result =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ create, node, merge);
+ ReplaceWithValue(node_success, node_success, node_success, merge);
+ // Fix input destroyed by the above {ReplaceWithValue} call.
+ NodeProperties::ReplaceControlInput(branch_is_undefined, node_success,
+ 0);
+ } else {
+ // Insert a check of the return value to determine whether the return
+ // value or the implicit receiver should be selected as a result of the
+ // call.
+ Node* check = graph()->NewNode(simplified()->ObjectIsReceiver(), node);
+ result =
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+ check, node, create);
+ }
receiver = create; // The implicit receiver.
+ ReplaceWithValue(dummy, result);
+ } else if (IsDerivedConstructor(shared_info->kind())) {
+ Node* node_success =
+ NodeProperties::FindSuccessfulControlProjection(node);
+ Node* is_receiver =
+ graph()->NewNode(simplified()->ObjectIsReceiver(), node);
+ Node* branch_is_receiver =
+ graph()->NewNode(common()->Branch(), is_receiver, node_success);
+ Node* branch_is_receiver_true =
+ graph()->NewNode(common()->IfTrue(), branch_is_receiver);
+ Node* branch_is_receiver_false =
+ graph()->NewNode(common()->IfFalse(), branch_is_receiver);
+ branch_is_receiver_false =
+ graph()->NewNode(javascript()->CallRuntime(
+ Runtime::kThrowConstructorReturnedNonObject),
+ context, NodeProperties::GetFrameStateInput(node),
+ node, branch_is_receiver_false);
+ uncaught_subcalls.push_back(branch_is_receiver_false);
+ branch_is_receiver_false =
+ graph()->NewNode(common()->Throw(), branch_is_receiver_false,
+ branch_is_receiver_false);
+ NodeProperties::MergeControlToEnd(graph(), common(),
+ branch_is_receiver_false);
+
+ ReplaceWithValue(node_success, node_success, node_success,
+ branch_is_receiver_true);
+ // Fix input destroyed by the above {ReplaceWithValue} call.
+ NodeProperties::ReplaceControlInput(branch_is_receiver, node_success, 0);
}
node->ReplaceInput(1, receiver);
-
// Insert a construct stub frame into the chain of frame states. This will
// reconstruct the proper frame when deoptimizing within the constructor.
frame_state = CreateArtificialFrameState(
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.cc b/deps/v8/src/compiler/js-intrinsic-lowering.cc
index a18551c642..b9ee8a4ed6 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.cc
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.cc
@@ -38,6 +38,8 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceDeoptimizeNow(node);
case Runtime::kInlineGeneratorClose:
return ReduceGeneratorClose(node);
+ case Runtime::kInlineCreateJSGeneratorObject:
+ return ReduceCreateJSGeneratorObject(node);
case Runtime::kInlineGeneratorGetInputOrDebugPos:
return ReduceGeneratorGetInputOrDebugPos(node);
case Runtime::kInlineAsyncGeneratorGetAwaitInputOrDebugPos:
@@ -56,6 +58,18 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
return ReduceIsInstanceType(node, JS_TYPED_ARRAY_TYPE);
case Runtime::kInlineIsJSProxy:
return ReduceIsInstanceType(node, JS_PROXY_TYPE);
+ case Runtime::kInlineIsJSMap:
+ return ReduceIsInstanceType(node, JS_MAP_TYPE);
+ case Runtime::kInlineIsJSSet:
+ return ReduceIsInstanceType(node, JS_SET_TYPE);
+ case Runtime::kInlineIsJSMapIterator:
+ return ReduceIsInstanceType(node, JS_MAP_ITERATOR_TYPE);
+ case Runtime::kInlineIsJSSetIterator:
+ return ReduceIsInstanceType(node, JS_SET_ITERATOR_TYPE);
+ case Runtime::kInlineIsJSWeakMap:
+ return ReduceIsInstanceType(node, JS_WEAK_MAP_TYPE);
+ case Runtime::kInlineIsJSWeakSet:
+ return ReduceIsInstanceType(node, JS_WEAK_SET_TYPE);
case Runtime::kInlineIsJSReceiver:
return ReduceIsJSReceiver(node);
case Runtime::kInlineIsSmi:
@@ -86,6 +100,8 @@ Reduction JSIntrinsicLowering::Reduce(Node* node) {
case Runtime::kInlineArrayBufferViewGetByteOffset:
return ReduceArrayBufferViewField(
node, AccessBuilder::ForJSArrayBufferViewByteOffset());
+ case Runtime::kInlineArrayBufferViewWasNeutered:
+ return ReduceArrayBufferViewWasNeutered(node);
case Runtime::kInlineMaxSmi:
return ReduceMaxSmi(node);
case Runtime::kInlineTypedArrayGetLength:
@@ -145,6 +161,19 @@ Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
return Changed(node);
}
+Reduction JSIntrinsicLowering::ReduceCreateJSGeneratorObject(Node* node) {
+ Node* const closure = NodeProperties::GetValueInput(node, 0);
+ Node* const receiver = NodeProperties::GetValueInput(node, 1);
+ Node* const context = NodeProperties::GetContextInput(node);
+ Node* const effect = NodeProperties::GetEffectInput(node);
+ Node* const control = NodeProperties::GetControlInput(node);
+ Operator const* const op = javascript()->CreateGeneratorObject();
+ Node* create_generator =
+ graph()->NewNode(op, closure, receiver, context, effect, control);
+ ReplaceWithValue(node, create_generator, create_generator);
+ return Changed(create_generator);
+}
+
Reduction JSIntrinsicLowering::ReduceGeneratorClose(Node* node) {
Node* const generator = NodeProperties::GetValueInput(node, 0);
Node* const effect = NodeProperties::GetEffectInput(node);
@@ -336,10 +365,7 @@ Reduction JSIntrinsicLowering::ReduceToString(Node* node) {
Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
size_t const arity = CallRuntimeParametersOf(node->op()).arity();
- NodeProperties::ChangeOp(
- node,
- javascript()->Call(arity, 0.0f, VectorSlotPair(),
- ConvertReceiverMode::kAny, TailCallMode::kDisallow));
+ NodeProperties::ChangeOp(node, javascript()->Call(arity));
return Changed(node);
}
@@ -374,6 +400,22 @@ Reduction JSIntrinsicLowering::ReduceArrayBufferViewField(
return Replace(value);
}
+Reduction JSIntrinsicLowering::ReduceArrayBufferViewWasNeutered(Node* node) {
+ Node* receiver = NodeProperties::GetValueInput(node, 0);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ // Check if the {receiver}s buffer was neutered.
+ Node* receiver_buffer = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+ receiver, effect, control);
+ Node* value = effect = graph()->NewNode(
+ simplified()->ArrayBufferWasNeutered(), receiver_buffer, effect, control);
+
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+}
+
Reduction JSIntrinsicLowering::ReduceMaxSmi(Node* node) {
Node* value = jsgraph()->Constant(Smi::kMaxValue);
ReplaceWithValue(node, value);
diff --git a/deps/v8/src/compiler/js-intrinsic-lowering.h b/deps/v8/src/compiler/js-intrinsic-lowering.h
index 2a2baf0930..0f3e84a5e5 100644
--- a/deps/v8/src/compiler/js-intrinsic-lowering.h
+++ b/deps/v8/src/compiler/js-intrinsic-lowering.h
@@ -43,6 +43,7 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
Reduction ReduceCreateIterResultObject(Node* node);
Reduction ReduceDebugIsActive(Node* node);
Reduction ReduceDeoptimizeNow(Node* node);
+ Reduction ReduceCreateJSGeneratorObject(Node* node);
Reduction ReduceGeneratorClose(Node* node);
Reduction ReduceGeneratorGetContext(Node* node);
Reduction ReduceGeneratorGetInputOrDebugPos(Node* node);
@@ -68,6 +69,7 @@ class V8_EXPORT_PRIVATE JSIntrinsicLowering final
// TODO(turbofan): typedarray.js support; drop once TypedArrays are
// converted to proper CodeStubAssembler based builtins.
Reduction ReduceArrayBufferViewField(Node* node, FieldAccess const& access);
+ Reduction ReduceArrayBufferViewWasNeutered(Node* node);
Reduction ReduceMaxSmi(Node* node);
Reduction ReduceTypedArrayMaxSizeInHeap(Node* node);
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index 66013b85ca..5a3ccebed1 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -24,21 +24,21 @@ namespace compiler {
namespace {
-bool HasNumberMaps(MapList const& maps) {
+bool HasNumberMaps(MapHandles const& maps) {
for (auto map : maps) {
if (map->instance_type() == HEAP_NUMBER_TYPE) return true;
}
return false;
}
-bool HasOnlyJSArrayMaps(MapList const& maps) {
+bool HasOnlyJSArrayMaps(MapHandles const& maps) {
for (auto map : maps) {
if (!map->IsJSArrayMap()) return false;
}
return true;
}
-bool HasOnlyNumberMaps(MapList const& maps) {
+bool HasOnlyNumberMaps(MapHandles const& maps) {
for (auto map : maps) {
if (map->instance_type() != HEAP_NUMBER_TYPE) return false;
}
@@ -154,11 +154,6 @@ Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
if (function_prototype->IsConstructor()) {
ReplaceWithValue(node, value);
return Replace(value);
- } else {
- node->InsertInput(graph()->zone(), 0, value);
- NodeProperties::ChangeOp(
- node, javascript()->CallRuntime(Runtime::kThrowNotSuperConstructor));
- return Changed(node);
}
}
@@ -247,9 +242,8 @@ Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
node->ReplaceInput(2, object);
node->ReplaceInput(5, effect);
NodeProperties::ChangeOp(
- node,
- javascript()->Call(3, 0.0f, VectorSlotPair(),
- ConvertReceiverMode::kNotNullOrUndefined));
+ node, javascript()->Call(3, CallFrequency(), VectorSlotPair(),
+ ConvertReceiverMode::kNotNullOrUndefined));
// Rewire the value uses of {node} to ToBoolean conversion of the result.
Node* value = graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
@@ -271,10 +265,17 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
DCHECK_EQ(IrOpcode::kJSOrdinaryHasInstance, node->opcode());
Node* constructor = NodeProperties::GetValueInput(node, 0);
Node* object = NodeProperties::GetValueInput(node, 1);
+ Node* context = NodeProperties::GetContextInput(node);
+ Node* frame_state = NodeProperties::GetFrameStateInput(node);
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
- // Check if the {constructor} is a JSBoundFunction.
+ // Check if the {constructor} is known at compile time.
HeapObjectMatcher m(constructor);
- if (m.HasValue() && m.Value()->IsJSBoundFunction()) {
+ if (!m.HasValue()) return NoChange();
+
+ // Check if the {constructor} is a JSBoundFunction.
+ if (m.Value()->IsJSBoundFunction()) {
// OrdinaryHasInstance on bound functions turns into a recursive
// invocation of the instanceof operator again.
// ES6 section 7.3.19 OrdinaryHasInstance (C, O) step 2.
@@ -288,6 +289,160 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
return reduction.Changed() ? reduction : Changed(node);
}
+ // Check if the {constructor} is a JSFunction.
+ if (m.Value()->IsJSFunction()) {
+ // Check if the {function} is a constructor and has an instance "prototype".
+ Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+ if (function->IsConstructor() && function->has_instance_prototype() &&
+ function->prototype()->IsJSReceiver()) {
+ // Ensure that the {function} has a valid initial map, so we can
+ // depend on that for the prototype constant-folding below.
+ JSFunction::EnsureHasInitialMap(function);
+
+ // Install a code dependency on the {function}s initial map.
+ Handle<Map> initial_map(function->initial_map(), isolate());
+ dependencies()->AssumeInitialMapCantChange(initial_map);
+ Handle<JSReceiver> function_prototype =
+ handle(JSReceiver::cast(initial_map->prototype()), isolate());
+
+ // Check if we can constant-fold the prototype chain walk
+ // for the given {object} and the {function_prototype}.
+ InferHasInPrototypeChainResult result =
+ InferHasInPrototypeChain(object, effect, function_prototype);
+ if (result != kMayBeInPrototypeChain) {
+ Node* value = jsgraph()->BooleanConstant(result == kIsInPrototypeChain);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+
+ Node* prototype = jsgraph()->Constant(function_prototype);
+
+ Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), object);
+ Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check0, control);
+
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* etrue0 = effect;
+ Node* vtrue0 = jsgraph()->FalseConstant();
+
+ control = graph()->NewNode(common()->IfFalse(), branch0);
+
+ // Loop through the {object}s prototype chain looking for the {prototype}.
+ Node* loop = control =
+ graph()->NewNode(common()->Loop(2), control, control);
+ Node* eloop = effect =
+ graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
+ Node* vloop = object =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ object, object, loop);
+
+ // Load the {object} map and instance type.
+ Node* object_map = effect =
+ graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+ object, effect, control);
+ Node* object_instance_type = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
+ object_map, effect, control);
+
+ // Check if the {object} is a special receiver, because for special
+ // receivers, i.e. proxies or API objects that need access checks,
+ // we have to use the %HasInPrototypeChain runtime function instead.
+ Node* check1 = graph()->NewNode(
+ simplified()->NumberLessThanOrEqual(), object_instance_type,
+ jsgraph()->Constant(LAST_SPECIAL_RECEIVER_TYPE));
+ Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+ check1, control);
+
+ control = graph()->NewNode(common()->IfFalse(), branch1);
+
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* etrue1 = effect;
+ Node* vtrue1;
+
+ // Check if the {object} is not a receiver at all.
+ Node* check10 =
+ graph()->NewNode(simplified()->NumberLessThan(), object_instance_type,
+ jsgraph()->Constant(FIRST_JS_RECEIVER_TYPE));
+ Node* branch10 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check10, if_true1);
+
+ // A primitive value cannot match the {prototype} we're looking for.
+ if_true1 = graph()->NewNode(common()->IfTrue(), branch10);
+ vtrue1 = jsgraph()->FalseConstant();
+
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch10);
+ Node* efalse1 = etrue1;
+ Node* vfalse1;
+ {
+ // Slow path, need to call the %HasInPrototypeChain runtime function.
+ vfalse1 = efalse1 = if_false1 = graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kHasInPrototypeChain), object,
+ prototype, context, frame_state, efalse1, if_false1);
+
+ // Replace any potential {IfException} uses of {node} to catch
+ // exceptions from this %HasInPrototypeChain runtime call instead.
+ Node* on_exception = nullptr;
+ if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
+ NodeProperties::ReplaceControlInput(on_exception, vfalse1);
+ NodeProperties::ReplaceEffectInput(on_exception, efalse1);
+ if_false1 = graph()->NewNode(common()->IfSuccess(), vfalse1);
+ Revisit(on_exception);
+ }
+ }
+
+ // Load the {object} prototype.
+ Node* object_prototype = effect = graph()->NewNode(
+ simplified()->LoadField(AccessBuilder::ForMapPrototype()), object_map,
+ effect, control);
+
+ // Check if we reached the end of {object}s prototype chain.
+ Node* check2 =
+ graph()->NewNode(simplified()->ReferenceEqual(), object_prototype,
+ jsgraph()->NullConstant());
+ Node* branch2 = graph()->NewNode(common()->Branch(), check2, control);
+
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* etrue2 = effect;
+ Node* vtrue2 = jsgraph()->FalseConstant();
+
+ control = graph()->NewNode(common()->IfFalse(), branch2);
+
+ // Check if we reached the {prototype}.
+ Node* check3 = graph()->NewNode(simplified()->ReferenceEqual(),
+ object_prototype, prototype);
+ Node* branch3 = graph()->NewNode(common()->Branch(), check3, control);
+
+ Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
+ Node* etrue3 = effect;
+ Node* vtrue3 = jsgraph()->TrueConstant();
+
+ control = graph()->NewNode(common()->IfFalse(), branch3);
+
+ // Close the loop.
+ vloop->ReplaceInput(1, object_prototype);
+ eloop->ReplaceInput(1, effect);
+ loop->ReplaceInput(1, control);
+
+ control = graph()->NewNode(common()->Merge(5), if_true0, if_true1,
+ if_true2, if_true3, if_false1);
+ effect = graph()->NewNode(common()->EffectPhi(5), etrue0, etrue1, etrue2,
+ etrue3, efalse1, control);
+
+ // Morph the {node} into an appropriate Phi.
+ ReplaceWithValue(node, node, effect, control);
+ node->ReplaceInput(0, vtrue0);
+ node->ReplaceInput(1, vtrue1);
+ node->ReplaceInput(2, vtrue2);
+ node->ReplaceInput(3, vtrue3);
+ node->ReplaceInput(4, vfalse1);
+ node->ReplaceInput(5, control);
+ node->TrimInputCount(6);
+ NodeProperties::ChangeOp(
+ node, common()->Phi(MachineRepresentation::kTagged, 5));
+ return Changed(node);
+ }
+ }
+
return NoChange();
}
@@ -551,9 +706,8 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
}
Reduction JSNativeContextSpecialization::ReduceNamedAccess(
- Node* node, Node* value, MapHandleList const& receiver_maps,
- Handle<Name> name, AccessMode access_mode, LanguageMode language_mode,
- Node* index) {
+ Node* node, Node* value, MapHandles const& receiver_maps, Handle<Name> name,
+ AccessMode access_mode, LanguageMode language_mode, Node* index) {
DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
node->opcode() == IrOpcode::kJSStoreNamed ||
node->opcode() == IrOpcode::kJSLoadProperty ||
@@ -568,8 +722,8 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
// Check if we have an access o.x or o.x=v where o is the current
// native contexts' global proxy, and turn that into a direct access
// to the current native contexts' global object instead.
- if (receiver_maps.length() == 1) {
- Handle<Map> receiver_map = receiver_maps.first();
+ if (receiver_maps.size() == 1) {
+ Handle<Map> receiver_map = receiver_maps.front();
if (receiver_map->IsJSGlobalProxyMap()) {
Object* maybe_constructor = receiver_map->GetConstructor();
// Detached global proxies have |null| as their constructor.
@@ -686,7 +840,7 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccess(
Node* this_control = fallthrough_control;
// Perform map check on {receiver}.
- MapList const& receiver_maps = access_info.receiver_maps();
+ MapHandles const& receiver_maps = access_info.receiver_maps();
{
// Emit a (sequence of) map checks for other {receiver}s.
ZoneVector<Node*> this_controls(zone());
@@ -801,10 +955,10 @@ Reduction JSNativeContextSpecialization::ReduceNamedAccessFromNexus(
}
// Extract receiver maps from the IC using the {nexus}.
- MapHandleList receiver_maps;
+ MapHandles receiver_maps;
if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
return NoChange();
- } else if (receiver_maps.length() == 0) {
+ } else if (receiver_maps.empty()) {
if (flags() & kBailoutOnUninitialized) {
return ReduceSoftDeoptimize(
node,
@@ -831,11 +985,11 @@ Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
p.name().is_identical_to(factory()->prototype_string())) {
// Optimize "prototype" property of functions.
Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
- if (function->has_initial_map()) {
+ if (function->IsConstructor()) {
// We need to add a code dependency on the initial map of the
// {function} in order to be notified about changes to the
- // "prototype" of {function}, so it doesn't make sense to
- // continue unless deoptimization is enabled.
+ // "prototype" of {function}.
+ JSFunction::EnsureHasInitialMap(function);
Handle<Map> initial_map(function->initial_map(), isolate());
dependencies()->AssumeInitialMapCantChange(initial_map);
Handle<Object> prototype(function->prototype(), isolate());
@@ -892,7 +1046,7 @@ Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
}
Reduction JSNativeContextSpecialization::ReduceElementAccess(
- Node* node, Node* index, Node* value, MapHandleList const& receiver_maps,
+ Node* node, Node* index, Node* value, MapHandles const& receiver_maps,
AccessMode access_mode, LanguageMode language_mode,
KeyedAccessStoreMode store_mode) {
DCHECK(node->opcode() == IrOpcode::kJSLoadProperty ||
@@ -1056,7 +1210,7 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
receiver, this_effect, this_control);
// Perform map check(s) on {receiver}.
- MapList const& receiver_maps = access_info.receiver_maps();
+ MapHandles const& receiver_maps = access_info.receiver_maps();
if (j == access_infos.size() - 1) {
// Last map check on the fallthrough control path, do a
// conditional eager deoptimization exit here.
@@ -1189,10 +1343,10 @@ Reduction JSNativeContextSpecialization::ReduceKeyedAccess(
}
// Extract receiver maps from the {nexus}.
- MapHandleList receiver_maps;
+ MapHandles receiver_maps;
if (!ExtractReceiverMaps(receiver, effect, nexus, &receiver_maps)) {
return NoChange();
- } else if (receiver_maps.length() == 0) {
+ } else if (receiver_maps.empty()) {
if (flags() & kBailoutOnUninitialized) {
return ReduceSoftDeoptimize(
node,
@@ -1347,7 +1501,7 @@ JSNativeContextSpecialization::BuildPropertyAccess(
// Introduce the call to the getter function.
if (access_info.constant()->IsJSFunction()) {
value = effect = control = graph()->NewNode(
- javascript()->Call(2, 0.0f, VectorSlotPair(),
+ javascript()->Call(2, CallFrequency(), VectorSlotPair(),
ConvertReceiverMode::kNotNullOrUndefined),
target, receiver, context, frame_state0, effect, control);
} else {
@@ -1383,7 +1537,7 @@ JSNativeContextSpecialization::BuildPropertyAccess(
// Introduce the call to the setter function.
if (access_info.constant()->IsJSFunction()) {
effect = control = graph()->NewNode(
- javascript()->Call(3, 0.0f, VectorSlotPair(),
+ javascript()->Call(3, CallFrequency(), VectorSlotPair(),
ConvertReceiverMode::kNotNullOrUndefined),
target, receiver, value, context, frame_state0, effect, control);
} else {
@@ -1756,7 +1910,7 @@ JSNativeContextSpecialization::BuildElementAccess(
// TODO(bmeurer): We currently specialize based on elements kind. We should
// also be able to properly support strings and other JSObjects here.
ElementsKind elements_kind = access_info.elements_kind();
- MapList const& receiver_maps = access_info.receiver_maps();
+ MapHandles const& receiver_maps = access_info.receiver_maps();
if (IsFixedTypedArrayElementsKind(elements_kind)) {
Node* buffer;
@@ -2059,7 +2213,7 @@ JSNativeContextSpecialization::InlineApiCall(
int const argc = value == nullptr ? 0 : 1;
// The stub always expects the receiver as the first param on the stack.
CallApiCallbackStub stub(
- isolate(), argc, call_data_object->IsUndefined(isolate()),
+ isolate(), argc,
true /* FunctionTemplateInfo doesn't have an associated context. */);
CallInterfaceDescriptor call_interface_descriptor =
stub.GetCallInterfaceDescriptor();
@@ -2127,7 +2281,7 @@ Node* JSNativeContextSpecialization::BuildCheckHeapObject(Node* receiver,
Node* JSNativeContextSpecialization::BuildCheckMaps(
Node* receiver, Node* effect, Node* control,
- std::vector<Handle<Map>> const& receiver_maps) {
+ MapHandles const& receiver_maps) {
HeapObjectMatcher m(receiver);
if (m.HasValue()) {
Handle<Map> receiver_map(m.Value()->map(), isolate());
@@ -2154,6 +2308,15 @@ Node* JSNativeContextSpecialization::BuildCheckMaps(
Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
Handle<Map> map, Node* properties, Node* effect, Node* control) {
+ // TODO(bmeurer/jkummerow): Property deletions can undo map transitions
+ // while keeping the backing store around, meaning that even though the
+ // map might believe that objects have no unused property fields, there
+ // might actually be some. It would be nice to not create a new backing
+ // store in that case (i.e. when properties->length() >= new_length).
+ // However, introducing branches and Phi nodes here would make it more
+ // difficult for escape analysis to get rid of the backing stores used
+ // for intermediate states of chains of property additions. That makes
+ // it unclear what the best approach is here.
DCHECK_EQ(0, map->unused_property_fields());
// Compute the length of the old {properties} and the new properties.
int length = map->NextFreePropertyIndex() - map->GetInObjectProperties();
@@ -2192,7 +2355,7 @@ Node* JSNativeContextSpecialization::BuildExtendPropertiesBackingStore(
}
void JSNativeContextSpecialization::AssumePrototypesStable(
- std::vector<Handle<Map>> const& receiver_maps, Handle<JSObject> holder) {
+ MapHandles const& receiver_maps, Handle<JSObject> holder) {
// Determine actual holder and perform prototype chain checks.
for (auto map : receiver_maps) {
// Perform the implicit ToObject for primitives here.
@@ -2207,7 +2370,7 @@ void JSNativeContextSpecialization::AssumePrototypesStable(
}
bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
- std::vector<Handle<Map>> const& receiver_maps) {
+ MapHandles const& receiver_maps) {
// Check if the array prototype chain is intact.
if (!isolate()->IsFastArrayConstructorPrototypeChainIntact()) return false;
@@ -2241,10 +2404,61 @@ bool JSNativeContextSpecialization::CanTreatHoleAsUndefined(
return true;
}
+JSNativeContextSpecialization::InferHasInPrototypeChainResult
+JSNativeContextSpecialization::InferHasInPrototypeChain(
+ Node* receiver, Node* effect, Handle<JSReceiver> prototype) {
+ ZoneHandleSet<Map> receiver_maps;
+ NodeProperties::InferReceiverMapsResult result =
+ NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+ if (result == NodeProperties::kNoReceiverMaps) return kMayBeInPrototypeChain;
+
+ // Check if either all or none of the {receiver_maps} have the given
+ // {prototype} in their prototype chain.
+ bool all = true;
+ bool none = true;
+ for (size_t i = 0; i < receiver_maps.size(); ++i) {
+ Handle<Map> receiver_map = receiver_maps[i];
+ if (receiver_map->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
+ return kMayBeInPrototypeChain;
+ }
+ if (result == NodeProperties::kUnreliableReceiverMaps) {
+ // In case of an unreliable {result} we need to ensure that all
+ // {receiver_maps} are stable, because otherwise we cannot trust
+ // the {receiver_maps} information, since arbitrary side-effects
+ // may have happened.
+ if (!receiver_map->is_stable()) {
+ return kMayBeInPrototypeChain;
+ }
+ }
+ for (PrototypeIterator j(receiver_map);; j.Advance()) {
+ if (j.IsAtEnd()) {
+ all = false;
+ break;
+ }
+ Handle<JSReceiver> const current =
+ PrototypeIterator::GetCurrent<JSReceiver>(j);
+ if (current.is_identical_to(prototype)) {
+ none = false;
+ break;
+ }
+ if (!current->map()->is_stable() ||
+ current->map()->instance_type() <= LAST_SPECIAL_RECEIVER_TYPE) {
+ return kMayBeInPrototypeChain;
+ }
+ }
+ }
+ DCHECK_IMPLIES(all, !none);
+ DCHECK_IMPLIES(none, !all);
+
+ if (all) return kIsInPrototypeChain;
+ if (none) return kIsNotInPrototypeChain;
+ return kMayBeInPrototypeChain;
+}
+
bool JSNativeContextSpecialization::ExtractReceiverMaps(
Node* receiver, Node* effect, FeedbackNexus const& nexus,
- MapHandleList* receiver_maps) {
- DCHECK_EQ(0, receiver_maps->length());
+ MapHandles* receiver_maps) {
+ DCHECK_EQ(0, receiver_maps->size());
// See if we can infer a concrete type for the {receiver}.
if (InferReceiverMaps(receiver, effect, receiver_maps)) {
// We can assume that the {receiver} still has the infered {receiver_maps}.
@@ -2255,11 +2469,12 @@ bool JSNativeContextSpecialization::ExtractReceiverMaps(
// Try to filter impossible candidates based on infered root map.
Handle<Map> receiver_map;
if (InferReceiverRootMap(receiver).ToHandle(&receiver_map)) {
- for (int i = receiver_maps->length(); --i >= 0;) {
- if (receiver_maps->at(i)->FindRootMap() != *receiver_map) {
- receiver_maps->Remove(i);
- }
- }
+ receiver_maps->erase(
+ std::remove_if(receiver_maps->begin(), receiver_maps->end(),
+ [receiver_map](const Handle<Map>& map) {
+ return map->FindRootMap() != *receiver_map;
+ }),
+ receiver_maps->end());
}
return true;
}
@@ -2267,13 +2482,13 @@ bool JSNativeContextSpecialization::ExtractReceiverMaps(
}
bool JSNativeContextSpecialization::InferReceiverMaps(
- Node* receiver, Node* effect, MapHandleList* receiver_maps) {
+ Node* receiver, Node* effect, MapHandles* receiver_maps) {
ZoneHandleSet<Map> maps;
NodeProperties::InferReceiverMapsResult result =
NodeProperties::InferReceiverMaps(receiver, effect, &maps);
if (result == NodeProperties::kReliableReceiverMaps) {
for (size_t i = 0; i < maps.size(); ++i) {
- receiver_maps->Add(maps[i]);
+ receiver_maps->push_back(maps[i]);
}
return true;
} else if (result == NodeProperties::kUnreliableReceiverMaps) {
@@ -2283,7 +2498,7 @@ bool JSNativeContextSpecialization::InferReceiverMaps(
if (!maps[i]->is_stable()) return false;
}
for (size_t i = 0; i < maps.size(); ++i) {
- receiver_maps->Add(maps[i]);
+ receiver_maps->push_back(maps[i]);
}
return true;
}
diff --git a/deps/v8/src/compiler/js-native-context-specialization.h b/deps/v8/src/compiler/js-native-context-specialization.h
index cd1b3349ad..2f9df08f81 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.h
+++ b/deps/v8/src/compiler/js-native-context-specialization.h
@@ -8,7 +8,7 @@
#include "src/base/flags.h"
#include "src/compiler/graph-reducer.h"
#include "src/deoptimize-reason.h"
-#include "src/feedback-vector.h"
+#include "src/objects/map.h"
namespace v8 {
namespace internal {
@@ -16,6 +16,7 @@ namespace internal {
// Forward declarations.
class CompilationDependencies;
class Factory;
+class FeedbackNexus;
namespace compiler {
@@ -67,7 +68,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
Reduction ReduceJSStoreDataPropertyInLiteral(Node* node);
Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
- MapHandleList const& receiver_maps,
+ MapHandles const& receiver_maps,
AccessMode access_mode,
LanguageMode language_mode,
KeyedAccessStoreMode store_mode);
@@ -82,7 +83,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
AccessMode access_mode,
LanguageMode language_mode);
Reduction ReduceNamedAccess(Node* node, Node* value,
- MapHandleList const& receiver_maps,
+ MapHandles const& receiver_maps,
Handle<Name> name, AccessMode access_mode,
LanguageMode language_mode,
Node* index = nullptr);
@@ -130,7 +131,7 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
// Construct an appropriate map check.
Node* BuildCheckMaps(Node* receiver, Node* effect, Node* control,
- std::vector<Handle<Map>> const& maps);
+ MapHandles const& maps);
// Construct appropriate subgraph to extend properties backing store.
Node* BuildExtendPropertiesBackingStore(Handle<Map> map, Node* properties,
@@ -138,26 +139,37 @@ class JSNativeContextSpecialization final : public AdvancedReducer {
// Adds stability dependencies on all prototypes of every class in
// {receiver_type} up to (and including) the {holder}.
- void AssumePrototypesStable(std::vector<Handle<Map>> const& receiver_maps,
+ void AssumePrototypesStable(MapHandles const& receiver_maps,
Handle<JSObject> holder);
// Checks if we can turn the hole into undefined when loading an element
// from an object with one of the {receiver_maps}; sets up appropriate
// code dependencies and might use the array protector cell.
- bool CanTreatHoleAsUndefined(std::vector<Handle<Map>> const& receiver_maps);
+ bool CanTreatHoleAsUndefined(MapHandles const& receiver_maps);
+
+ // Checks if we know at compile time that the {receiver} either definitely
+ // has the {prototype} in it's prototype chain, or the {receiver} definitely
+ // doesn't have the {prototype} in it's prototype chain.
+ enum InferHasInPrototypeChainResult {
+ kIsInPrototypeChain,
+ kIsNotInPrototypeChain,
+ kMayBeInPrototypeChain
+ };
+ InferHasInPrototypeChainResult InferHasInPrototypeChain(
+ Node* receiver, Node* effect, Handle<JSReceiver> prototype);
// Extract receiver maps from {nexus} and filter based on {receiver} if
// possible.
bool ExtractReceiverMaps(Node* receiver, Node* effect,
FeedbackNexus const& nexus,
- MapHandleList* receiver_maps);
+ MapHandles* receiver_maps);
// Try to infer maps for the given {receiver} at the current {effect}.
// If maps are returned then you can be sure that the {receiver} definitely
// has one of the returned maps at this point in the program (identified
// by {effect}).
bool InferReceiverMaps(Node* receiver, Node* effect,
- MapHandleList* receiver_maps);
+ MapHandles* receiver_maps);
// Try to infer a root map for the {receiver} independent of the current
// program location.
MaybeHandle<Map> InferReceiverRootMap(Node* receiver);
diff --git a/deps/v8/src/compiler/js-operator.cc b/deps/v8/src/compiler/js-operator.cc
index 74156b086d..b8156a23f4 100644
--- a/deps/v8/src/compiler/js-operator.cc
+++ b/deps/v8/src/compiler/js-operator.cc
@@ -17,6 +17,11 @@ namespace v8 {
namespace internal {
namespace compiler {
+std::ostream& operator<<(std::ostream& os, CallFrequency f) {
+ if (f.IsUnknown()) return os << "unknown";
+ return os << f.value();
+}
+
VectorSlotPair::VectorSlotPair() {}
@@ -52,6 +57,17 @@ ToBooleanHints ToBooleanHintsOf(Operator const* op) {
return OpParameter<ToBooleanHints>(op);
}
+std::ostream& operator<<(std::ostream& os,
+ ConstructForwardVarargsParameters const& p) {
+ return os << p.arity() << ", " << p.start_index();
+}
+
+ConstructForwardVarargsParameters const& ConstructForwardVarargsParametersOf(
+ Operator const* op) {
+ DCHECK_EQ(IrOpcode::kJSConstructForwardVarargs, op->opcode());
+ return OpParameter<ConstructForwardVarargsParameters>(op);
+}
+
bool operator==(ConstructParameters const& lhs,
ConstructParameters const& rhs) {
return lhs.arity() == rhs.arity() && lhs.frequency() == rhs.frequency() &&
@@ -113,7 +129,8 @@ const CallParameters& CallParametersOf(const Operator* op) {
std::ostream& operator<<(std::ostream& os,
CallForwardVarargsParameters const& p) {
- return os << p.start_index() << ", " << p.tail_call_mode();
+ return os << p.arity() << ", " << p.start_index() << ", "
+ << p.tail_call_mode();
}
CallForwardVarargsParameters const& CallForwardVarargsParametersOf(
@@ -738,16 +755,16 @@ const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
}
const Operator* JSOperatorBuilder::CallForwardVarargs(
- uint32_t start_index, TailCallMode tail_call_mode) {
- CallForwardVarargsParameters parameters(start_index, tail_call_mode);
+ size_t arity, uint32_t start_index, TailCallMode tail_call_mode) {
+ CallForwardVarargsParameters parameters(arity, start_index, tail_call_mode);
return new (zone()) Operator1<CallForwardVarargsParameters>( // --
IrOpcode::kJSCallForwardVarargs, Operator::kNoProperties, // opcode
"JSCallForwardVarargs", // name
- 2, 1, 1, 1, 1, 2, // counts
+ parameters.arity(), 1, 1, 1, 1, 2, // counts
parameters); // parameter
}
-const Operator* JSOperatorBuilder::Call(size_t arity, float frequency,
+const Operator* JSOperatorBuilder::Call(size_t arity, CallFrequency frequency,
VectorSlotPair const& feedback,
ConvertReceiverMode convert_mode,
TailCallMode tail_call_mode) {
@@ -793,7 +810,18 @@ const Operator* JSOperatorBuilder::CallRuntime(const Runtime::Function* f,
parameters); // parameter
}
-const Operator* JSOperatorBuilder::Construct(uint32_t arity, float frequency,
+const Operator* JSOperatorBuilder::ConstructForwardVarargs(
+ size_t arity, uint32_t start_index) {
+ ConstructForwardVarargsParameters parameters(arity, start_index);
+ return new (zone()) Operator1<ConstructForwardVarargsParameters>( // --
+ IrOpcode::kJSConstructForwardVarargs, Operator::kNoProperties, // opcode
+ "JSConstructForwardVarargs", // name
+ parameters.arity(), 1, 1, 1, 1, 2, // counts
+ parameters); // parameter
+}
+
+const Operator* JSOperatorBuilder::Construct(uint32_t arity,
+ CallFrequency frequency,
VectorSlotPair const& feedback) {
ConstructParameters parameters(arity, frequency, feedback);
return new (zone()) Operator1<ConstructParameters>( // --
@@ -891,14 +919,19 @@ const Operator* JSOperatorBuilder::StoreNamedOwn(
parameters); // parameter
}
-const Operator* JSOperatorBuilder::DeleteProperty(LanguageMode language_mode) {
- return new (zone()) Operator1<LanguageMode>( // --
+const Operator* JSOperatorBuilder::DeleteProperty() {
+ return new (zone()) Operator( // --
IrOpcode::kJSDeleteProperty, Operator::kNoProperties, // opcode
"JSDeleteProperty", // name
- 2, 1, 1, 1, 1, 2, // counts
- language_mode); // parameter
+ 3, 1, 1, 1, 1, 2); // counts
}
+const Operator* JSOperatorBuilder::CreateGeneratorObject() {
+ return new (zone()) Operator( // --
+ IrOpcode::kJSCreateGeneratorObject, Operator::kEliminatable, // opcode
+ "JSCreateGeneratorObject", // name
+ 2, 1, 1, 1, 1, 0); // counts
+}
const Operator* JSOperatorBuilder::LoadGlobal(const Handle<Name>& name,
const VectorSlotPair& feedback,
diff --git a/deps/v8/src/compiler/js-operator.h b/deps/v8/src/compiler/js-operator.h
index d7b0dfab9b..5ac3b6769e 100644
--- a/deps/v8/src/compiler/js-operator.h
+++ b/deps/v8/src/compiler/js-operator.h
@@ -26,6 +26,37 @@ namespace compiler {
class Operator;
struct JSOperatorGlobalCache;
+// Defines the frequency a given Call/Construct site was executed. For some
+// call sites the frequency is not known.
+class CallFrequency final {
+ public:
+ CallFrequency() : value_(std::numeric_limits<float>::quiet_NaN()) {}
+ explicit CallFrequency(float value) : value_(value) {
+ DCHECK(!std::isnan(value));
+ }
+
+ bool IsKnown() const { return !IsUnknown(); }
+ bool IsUnknown() const { return std::isnan(value_); }
+ float value() const {
+ DCHECK(IsKnown());
+ return value_;
+ }
+
+ bool operator==(CallFrequency const& that) const {
+ return bit_cast<uint32_t>(this->value_) == bit_cast<uint32_t>(that.value_);
+ }
+ bool operator!=(CallFrequency const& that) const { return !(*this == that); }
+
+ friend size_t hash_value(CallFrequency f) {
+ return bit_cast<uint32_t>(f.value_);
+ }
+
+ private:
+ float value_;
+};
+
+std::ostream& operator<<(std::ostream&, CallFrequency);
+
// Defines a pair of {FeedbackVector} and {FeedbackSlot}, which
// is used to access the type feedback for a certain {Node}.
class V8_EXPORT_PRIVATE VectorSlotPair {
@@ -59,22 +90,56 @@ ConvertReceiverMode ConvertReceiverModeOf(Operator const* op);
// The ToBooleanHints are used as parameter by JSToBoolean operators.
ToBooleanHints ToBooleanHintsOf(Operator const* op);
+// Defines the flags for a JavaScript call forwarding parameters. This
+// is used as parameter by JSConstructForwardVarargs operators.
+class ConstructForwardVarargsParameters final {
+ public:
+ ConstructForwardVarargsParameters(size_t arity, uint32_t start_index)
+ : bit_field_(ArityField::encode(arity) |
+ StartIndexField::encode(start_index)) {}
+
+ size_t arity() const { return ArityField::decode(bit_field_); }
+ uint32_t start_index() const { return StartIndexField::decode(bit_field_); }
+
+ bool operator==(ConstructForwardVarargsParameters const& that) const {
+ return this->bit_field_ == that.bit_field_;
+ }
+ bool operator!=(ConstructForwardVarargsParameters const& that) const {
+ return !(*this == that);
+ }
+
+ private:
+ friend size_t hash_value(ConstructForwardVarargsParameters const& p) {
+ return p.bit_field_;
+ }
+
+ typedef BitField<size_t, 0, 16> ArityField;
+ typedef BitField<uint32_t, 16, 16> StartIndexField;
+
+ uint32_t const bit_field_;
+};
+
+std::ostream& operator<<(std::ostream&,
+ ConstructForwardVarargsParameters const&);
+
+ConstructForwardVarargsParameters const& ConstructForwardVarargsParametersOf(
+ Operator const*) WARN_UNUSED_RESULT;
// Defines the arity and the feedback for a JavaScript constructor call. This is
// used as a parameter by JSConstruct operators.
class ConstructParameters final {
public:
- ConstructParameters(uint32_t arity, float frequency,
+ ConstructParameters(uint32_t arity, CallFrequency frequency,
VectorSlotPair const& feedback)
: arity_(arity), frequency_(frequency), feedback_(feedback) {}
uint32_t arity() const { return arity_; }
- float frequency() const { return frequency_; }
+ CallFrequency frequency() const { return frequency_; }
VectorSlotPair const& feedback() const { return feedback_; }
private:
uint32_t const arity_;
- float const frequency_;
+ CallFrequency const frequency_;
VectorSlotPair const feedback_;
};
@@ -115,11 +180,13 @@ SpreadWithArityParameter const& SpreadWithArityParameterOf(Operator const*);
// is used as parameter by JSCallForwardVarargs operators.
class CallForwardVarargsParameters final {
public:
- CallForwardVarargsParameters(uint32_t start_index,
+ CallForwardVarargsParameters(size_t arity, uint32_t start_index,
TailCallMode tail_call_mode)
- : bit_field_(StartIndexField::encode(start_index) |
+ : bit_field_(ArityField::encode(arity) |
+ StartIndexField::encode(start_index) |
TailCallModeField::encode(tail_call_mode)) {}
+ size_t arity() const { return ArityField::decode(bit_field_); }
uint32_t start_index() const { return StartIndexField::decode(bit_field_); }
TailCallMode tail_call_mode() const {
return TailCallModeField::decode(bit_field_);
@@ -137,8 +204,9 @@ class CallForwardVarargsParameters final {
return p.bit_field_;
}
- typedef BitField<uint32_t, 0, 30> StartIndexField;
- typedef BitField<TailCallMode, 31, 1> TailCallModeField;
+ typedef BitField<size_t, 0, 15> ArityField;
+ typedef BitField<uint32_t, 15, 15> StartIndexField;
+ typedef BitField<TailCallMode, 30, 1> TailCallModeField;
uint32_t const bit_field_;
};
@@ -152,8 +220,9 @@ CallForwardVarargsParameters const& CallForwardVarargsParametersOf(
// used as a parameter by JSCall operators.
class CallParameters final {
public:
- CallParameters(size_t arity, float frequency, VectorSlotPair const& feedback,
- TailCallMode tail_call_mode, ConvertReceiverMode convert_mode)
+ CallParameters(size_t arity, CallFrequency frequency,
+ VectorSlotPair const& feedback, TailCallMode tail_call_mode,
+ ConvertReceiverMode convert_mode)
: bit_field_(ArityField::encode(arity) |
ConvertReceiverModeField::encode(convert_mode) |
TailCallModeField::encode(tail_call_mode)),
@@ -161,7 +230,7 @@ class CallParameters final {
feedback_(feedback) {}
size_t arity() const { return ArityField::decode(bit_field_); }
- float frequency() const { return frequency_; }
+ CallFrequency frequency() const { return frequency_; }
ConvertReceiverMode convert_mode() const {
return ConvertReceiverModeField::decode(bit_field_);
}
@@ -187,7 +256,7 @@ class CallParameters final {
typedef BitField<TailCallMode, 31, 1> TailCallModeField;
uint32_t const bit_field_;
- float const frequency_;
+ CallFrequency const frequency_;
VectorSlotPair const feedback_;
};
@@ -633,10 +702,10 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CreateLiteralRegExp(Handle<String> constant_pattern,
int literal_flags, int literal_index);
- const Operator* CallForwardVarargs(uint32_t start_index,
+ const Operator* CallForwardVarargs(size_t arity, uint32_t start_index,
TailCallMode tail_call_mode);
const Operator* Call(
- size_t arity, float frequency = 0.0f,
+ size_t arity, CallFrequency frequency = CallFrequency(),
VectorSlotPair const& feedback = VectorSlotPair(),
ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
TailCallMode tail_call_mode = TailCallMode::kDisallow);
@@ -644,8 +713,11 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* CallRuntime(Runtime::FunctionId id);
const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
- const Operator* Construct(uint32_t arity, float frequency,
- VectorSlotPair const& feedback);
+
+ const Operator* ConstructForwardVarargs(size_t arity, uint32_t start_index);
+ const Operator* Construct(uint32_t arity,
+ CallFrequency frequency = CallFrequency(),
+ VectorSlotPair const& feedback = VectorSlotPair());
const Operator* ConstructWithSpread(uint32_t arity);
const Operator* ConvertReceiver(ConvertReceiverMode convert_mode);
@@ -662,12 +734,14 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
VectorSlotPair const& feedback);
const Operator* StoreDataPropertyInLiteral(const VectorSlotPair& feedback);
- const Operator* DeleteProperty(LanguageMode language_mode);
+ const Operator* DeleteProperty();
const Operator* HasProperty();
const Operator* GetSuperConstructor();
+ const Operator* CreateGeneratorObject();
+
const Operator* LoadGlobal(const Handle<Name>& name,
const VectorSlotPair& feedback,
TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index 420e68cdf5..64838a1f83 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -488,13 +488,13 @@ JSTypedLowering::JSTypedLowering(Editor* editor,
dependencies_(dependencies),
flags_(flags),
jsgraph_(jsgraph),
- pointer_comparable_type_(Type::Union(
- Type::Oddball(),
- Type::Union(
- Type::SymbolOrReceiver(),
- Type::HeapConstant(factory()->empty_string(), graph()->zone()),
- graph()->zone()),
- graph()->zone())),
+ empty_string_type_(
+ Type::HeapConstant(factory()->empty_string(), graph()->zone())),
+ pointer_comparable_type_(
+ Type::Union(Type::Oddball(),
+ Type::Union(Type::SymbolOrReceiver(), empty_string_type_,
+ graph()->zone()),
+ graph()->zone())),
type_cache_(TypeCache::Get()) {
for (size_t k = 0; k < arraysize(shifted_int32_ranges_); ++k) {
double min = kMinInt / (1 << k);
@@ -535,6 +535,23 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
if (r.ShouldCreateConsString()) {
return ReduceCreateConsString(node);
}
+ // Eliminate useless concatenation of empty string.
+ if ((flags() & kDeoptimizationEnabled) &&
+ BinaryOperationHintOf(node->op()) == BinaryOperationHint::kString) {
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+ if (r.LeftInputIs(empty_string_type_)) {
+ Node* value = effect = graph()->NewNode(simplified()->CheckString(),
+ r.right(), effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ } else if (r.RightInputIs(empty_string_type_)) {
+ Node* value = effect = graph()->NewNode(simplified()->CheckString(),
+ r.left(), effect, control);
+ ReplaceWithValue(node, value, effect, control);
+ return Replace(value);
+ }
+ }
StringAddFlags flags = STRING_ADD_CHECK_NONE;
if (!r.LeftInputIs(Type::String())) {
flags = STRING_ADD_CONVERT_LEFT;
@@ -1328,16 +1345,12 @@ Reduction JSTypedLowering::ReduceJSOrdinaryHasInstance(Node* node) {
Type* constructor_type = NodeProperties::GetType(constructor);
Node* object = NodeProperties::GetValueInput(node, 1);
Type* object_type = NodeProperties::GetType(object);
- Node* context = NodeProperties::GetContextInput(node);
- Node* frame_state = NodeProperties::GetFrameStateInput(node);
- Node* effect = NodeProperties::GetEffectInput(node);
- Node* control = NodeProperties::GetControlInput(node);
// Check if the {constructor} cannot be callable.
// See ES6 section 7.3.19 OrdinaryHasInstance ( C, O ) step 1.
if (!constructor_type->Maybe(Type::Callable())) {
Node* value = jsgraph()->FalseConstant();
- ReplaceWithValue(node, value, effect, control);
+ ReplaceWithValue(node, value);
return Replace(value);
}
@@ -1347,156 +1360,11 @@ Reduction JSTypedLowering::ReduceJSOrdinaryHasInstance(Node* node) {
if (!object_type->Maybe(Type::Receiver()) &&
!constructor_type->Maybe(Type::BoundFunction())) {
Node* value = jsgraph()->FalseConstant();
- ReplaceWithValue(node, value, effect, control);
+ ReplaceWithValue(node, value);
return Replace(value);
}
- // Check if the {constructor} is a (known) JSFunction.
- if (!constructor_type->IsHeapConstant() ||
- !constructor_type->AsHeapConstant()->Value()->IsJSFunction()) {
- return NoChange();
- }
- Handle<JSFunction> function =
- Handle<JSFunction>::cast(constructor_type->AsHeapConstant()->Value());
-
- // Check if the {function} already has an initial map (i.e. the
- // {function} has been used as a constructor at least once).
- if (!function->has_initial_map()) return NoChange();
-
- // Check if the {function}s "prototype" is a JSReceiver.
- if (!function->prototype()->IsJSReceiver()) return NoChange();
-
- // Install a code dependency on the {function}s initial map.
- Handle<Map> initial_map(function->initial_map(), isolate());
- dependencies()->AssumeInitialMapCantChange(initial_map);
-
- Node* prototype =
- jsgraph()->Constant(handle(initial_map->prototype(), isolate()));
-
- Node* check0 = graph()->NewNode(simplified()->ObjectIsSmi(), object);
- Node* branch0 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
-
- Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
- Node* etrue0 = effect;
- Node* vtrue0 = jsgraph()->FalseConstant();
-
- control = graph()->NewNode(common()->IfFalse(), branch0);
-
- // Loop through the {object}s prototype chain looking for the {prototype}.
- Node* loop = control = graph()->NewNode(common()->Loop(2), control, control);
- Node* eloop = effect =
- graph()->NewNode(common()->EffectPhi(2), effect, effect, loop);
- Node* vloop = object = graph()->NewNode(
- common()->Phi(MachineRepresentation::kTagged, 2), object, object, loop);
- // TODO(jarin): This is a very ugly hack to work-around the super-smart
- // implicit typing of the Phi, which goes completely nuts if the {object}
- // is for example a HeapConstant.
- NodeProperties::SetType(vloop, Type::NonInternal());
-
- // Load the {object} map and instance type.
- Node* object_map = effect =
- graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()), object,
- effect, control);
- Node* object_instance_type = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapInstanceType()), object_map,
- effect, control);
-
- // Check if the {object} is a special receiver, because for special
- // receivers, i.e. proxies or API objects that need access checks,
- // we have to use the %HasInPrototypeChain runtime function instead.
- Node* check1 = graph()->NewNode(
- simplified()->NumberLessThanOrEqual(), object_instance_type,
- jsgraph()->Constant(LAST_SPECIAL_RECEIVER_TYPE));
- Node* branch1 =
- graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, control);
-
- control = graph()->NewNode(common()->IfFalse(), branch1);
-
- Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
- Node* etrue1 = effect;
- Node* vtrue1;
-
- // Check if the {object} is not a receiver at all.
- Node* check10 =
- graph()->NewNode(simplified()->NumberLessThan(), object_instance_type,
- jsgraph()->Constant(FIRST_JS_RECEIVER_TYPE));
- Node* branch10 =
- graph()->NewNode(common()->Branch(BranchHint::kTrue), check10, if_true1);
-
- // A primitive value cannot match the {prototype} we're looking for.
- if_true1 = graph()->NewNode(common()->IfTrue(), branch10);
- vtrue1 = jsgraph()->FalseConstant();
-
- Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch10);
- Node* efalse1 = etrue1;
- Node* vfalse1;
- {
- // Slow path, need to call the %HasInPrototypeChain runtime function.
- vfalse1 = efalse1 = if_false1 = graph()->NewNode(
- javascript()->CallRuntime(Runtime::kHasInPrototypeChain), object,
- prototype, context, frame_state, efalse1, if_false1);
-
- // Replace any potential {IfException} uses of {node} to catch exceptions
- // from this %HasInPrototypeChain runtime call instead.
- Node* on_exception = nullptr;
- if (NodeProperties::IsExceptionalCall(node, &on_exception)) {
- NodeProperties::ReplaceControlInput(on_exception, vfalse1);
- NodeProperties::ReplaceEffectInput(on_exception, efalse1);
- if_false1 = graph()->NewNode(common()->IfSuccess(), vfalse1);
- Revisit(on_exception);
- }
- }
-
- // Load the {object} prototype.
- Node* object_prototype = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForMapPrototype()), object_map,
- effect, control);
-
- // Check if we reached the end of {object}s prototype chain.
- Node* check2 = graph()->NewNode(simplified()->ReferenceEqual(),
- object_prototype, jsgraph()->NullConstant());
- Node* branch2 = graph()->NewNode(common()->Branch(), check2, control);
-
- Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
- Node* etrue2 = effect;
- Node* vtrue2 = jsgraph()->FalseConstant();
-
- control = graph()->NewNode(common()->IfFalse(), branch2);
-
- // Check if we reached the {prototype}.
- Node* check3 = graph()->NewNode(simplified()->ReferenceEqual(),
- object_prototype, prototype);
- Node* branch3 = graph()->NewNode(common()->Branch(), check3, control);
-
- Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
- Node* etrue3 = effect;
- Node* vtrue3 = jsgraph()->TrueConstant();
-
- control = graph()->NewNode(common()->IfFalse(), branch3);
-
- // Close the loop.
- vloop->ReplaceInput(1, object_prototype);
- eloop->ReplaceInput(1, effect);
- loop->ReplaceInput(1, control);
-
- control = graph()->NewNode(common()->Merge(5), if_true0, if_true1, if_true2,
- if_true3, if_false1);
- effect = graph()->NewNode(common()->EffectPhi(5), etrue0, etrue1, etrue2,
- etrue3, efalse1, control);
-
- // Morph the {node} into an appropriate Phi.
- ReplaceWithValue(node, node, effect, control);
- node->ReplaceInput(0, vtrue0);
- node->ReplaceInput(1, vtrue1);
- node->ReplaceInput(2, vtrue2);
- node->ReplaceInput(3, vtrue3);
- node->ReplaceInput(4, vfalse1);
- node->ReplaceInput(5, control);
- node->TrimInputCount(6);
- NodeProperties::ChangeOp(node,
- common()->Phi(MachineRepresentation::kTagged, 5));
- return Changed(node);
+ return NoChange();
}
Reduction JSTypedLowering::ReduceJSLoadContext(Node* node) {
@@ -1542,35 +1410,49 @@ Reduction JSTypedLowering::ReduceJSStoreContext(Node* node) {
return Changed(node);
}
-Reduction JSTypedLowering::ReduceJSLoadModule(Node* node) {
- DCHECK_EQ(IrOpcode::kJSLoadModule, node->opcode());
+Node* JSTypedLowering::BuildGetModuleCell(Node* node) {
+ DCHECK(node->opcode() == IrOpcode::kJSLoadModule ||
+ node->opcode() == IrOpcode::kJSStoreModule);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
int32_t cell_index = OpParameter<int32_t>(node);
Node* module = NodeProperties::GetValueInput(node, 0);
+ Type* module_type = NodeProperties::GetType(module);
+
+ if (module_type->IsHeapConstant()) {
+ Handle<Module> module_constant =
+ Handle<Module>::cast(module_type->AsHeapConstant()->Value());
+ Handle<Cell> cell_constant(module_constant->GetCell(cell_index), isolate());
+ return jsgraph()->HeapConstant(cell_constant);
+ }
- Node* array;
+ FieldAccess field_access;
int index;
if (ModuleDescriptor::GetCellIndexKind(cell_index) ==
ModuleDescriptor::kExport) {
- array = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForModuleRegularExports()),
- module, effect, control);
+ field_access = AccessBuilder::ForModuleRegularExports();
index = cell_index - 1;
} else {
DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
ModuleDescriptor::kImport);
- array = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForModuleRegularImports()),
- module, effect, control);
+ field_access = AccessBuilder::ForModuleRegularImports();
index = -cell_index - 1;
}
-
- Node* cell = effect = graph()->NewNode(
+ Node* array = effect = graph()->NewNode(simplified()->LoadField(field_access),
+ module, effect, control);
+ return graph()->NewNode(
simplified()->LoadField(AccessBuilder::ForFixedArraySlot(index)), array,
effect, control);
+}
+Reduction JSTypedLowering::ReduceJSLoadModule(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSLoadModule, node->opcode());
+ Node* effect = NodeProperties::GetEffectInput(node);
+ Node* control = NodeProperties::GetControlInput(node);
+
+ Node* cell = BuildGetModuleCell(node);
+ if (cell->op()->EffectOutputCount() > 0) effect = cell;
Node* value = effect =
graph()->NewNode(simplified()->LoadField(AccessBuilder::ForCellValue()),
cell, effect, control);
@@ -1583,32 +1465,12 @@ Reduction JSTypedLowering::ReduceJSStoreModule(Node* node) {
DCHECK_EQ(IrOpcode::kJSStoreModule, node->opcode());
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
-
- int32_t cell_index = OpParameter<int32_t>(node);
- Node* module = NodeProperties::GetValueInput(node, 0);
Node* value = NodeProperties::GetValueInput(node, 1);
+ DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(OpParameter<int32_t>(node)),
+ ModuleDescriptor::kExport);
- Node* array;
- int index;
- if (ModuleDescriptor::GetCellIndexKind(cell_index) ==
- ModuleDescriptor::kExport) {
- array = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForModuleRegularExports()),
- module, effect, control);
- index = cell_index - 1;
- } else {
- DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
- ModuleDescriptor::kImport);
- array = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForModuleRegularImports()),
- module, effect, control);
- index = -cell_index - 1;
- }
-
- Node* cell = effect = graph()->NewNode(
- simplified()->LoadField(AccessBuilder::ForFixedArraySlot(index)), array,
- effect, control);
-
+ Node* cell = BuildGetModuleCell(node);
+ if (cell->op()->EffectOutputCount() > 0) effect = cell;
effect =
graph()->NewNode(simplified()->StoreField(AccessBuilder::ForCellValue()),
cell, value, effect, control);
@@ -1858,6 +1720,38 @@ bool NeedsArgumentAdaptorFrame(Handle<SharedFunctionInfo> shared, int arity) {
} // namespace
+Reduction JSTypedLowering::ReduceJSConstructForwardVarargs(Node* node) {
+ DCHECK_EQ(IrOpcode::kJSConstructForwardVarargs, node->opcode());
+ ConstructForwardVarargsParameters p =
+ ConstructForwardVarargsParametersOf(node->op());
+ DCHECK_LE(2u, p.arity());
+ int const arity = static_cast<int>(p.arity() - 2);
+ int const start_index = static_cast<int>(p.start_index());
+ Node* target = NodeProperties::GetValueInput(node, 0);
+ Type* target_type = NodeProperties::GetType(target);
+ Node* new_target = NodeProperties::GetValueInput(node, arity + 1);
+
+ // Check if {target} is a JSFunction.
+ if (target_type->Is(Type::Function())) {
+ // Patch {node} to an indirect call via ConstructFunctionForwardVarargs.
+ Callable callable = CodeFactory::ConstructFunctionForwardVarargs(isolate());
+ node->RemoveInput(arity + 1);
+ node->InsertInput(graph()->zone(), 0,
+ jsgraph()->HeapConstant(callable.code()));
+ node->InsertInput(graph()->zone(), 2, new_target);
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 4, jsgraph()->Constant(start_index));
+ node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
+ NodeProperties::ChangeOp(
+ node, common()->Call(Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), arity + 1,
+ CallDescriptor::kNeedsFrameState)));
+ return Changed(node);
+ }
+
+ return NoChange();
+}
+
Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
ConstructParameters const& p = ConstructParametersOf(node->op());
@@ -1936,6 +1830,9 @@ Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
Reduction JSTypedLowering::ReduceJSCallForwardVarargs(Node* node) {
DCHECK_EQ(IrOpcode::kJSCallForwardVarargs, node->opcode());
CallForwardVarargsParameters p = CallForwardVarargsParametersOf(node->op());
+ DCHECK_LE(2u, p.arity());
+ int const arity = static_cast<int>(p.arity() - 2);
+ int const start_index = static_cast<int>(p.start_index());
Node* target = NodeProperties::GetValueInput(node, 0);
Type* target_type = NodeProperties::GetType(target);
@@ -1951,11 +1848,12 @@ Reduction JSTypedLowering::ReduceJSCallForwardVarargs(Node* node) {
Callable callable = CodeFactory::CallFunctionForwardVarargs(isolate());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(p.start_index()));
+ node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(start_index));
NodeProperties::ChangeOp(
- node,
- common()->Call(Linkage::GetStubCallDescriptor(
- isolate(), graph()->zone(), callable.descriptor(), 1, flags)));
+ node, common()->Call(Linkage::GetStubCallDescriptor(
+ isolate(), graph()->zone(), callable.descriptor(), arity + 1,
+ flags)));
return Changed(node);
}
@@ -2331,6 +2229,8 @@ Reduction JSTypedLowering::Reduce(Node* node) {
return ReduceJSStoreModule(node);
case IrOpcode::kJSConvertReceiver:
return ReduceJSConvertReceiver(node);
+ case IrOpcode::kJSConstructForwardVarargs:
+ return ReduceJSConstructForwardVarargs(node);
case IrOpcode::kJSConstruct:
return ReduceJSConstruct(node);
case IrOpcode::kJSCallForwardVarargs:
diff --git a/deps/v8/src/compiler/js-typed-lowering.h b/deps/v8/src/compiler/js-typed-lowering.h
index 98d71c3ed9..0b92a40a5b 100644
--- a/deps/v8/src/compiler/js-typed-lowering.h
+++ b/deps/v8/src/compiler/js-typed-lowering.h
@@ -69,6 +69,7 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceJSToString(Node* node);
Reduction ReduceJSToObject(Node* node);
Reduction ReduceJSConvertReceiver(Node* node);
+ Reduction ReduceJSConstructForwardVarargs(Node* node);
Reduction ReduceJSConstruct(Node* node);
Reduction ReduceJSCallForwardVarargs(Node* node);
Reduction ReduceJSCall(Node* node);
@@ -88,6 +89,9 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
Reduction ReduceSpeculativeNumberBinop(Node* node);
Reduction ReduceSpeculativeNumberComparison(Node* node);
+ // Helper for ReduceJSLoadModule and ReduceJSStoreModule.
+ Node* BuildGetModuleCell(Node* node);
+
Factory* factory() const;
Graph* graph() const;
JSGraph* jsgraph() const { return jsgraph_; }
@@ -101,6 +105,7 @@ class V8_EXPORT_PRIVATE JSTypedLowering final
CompilationDependencies* dependencies_;
Flags flags_;
JSGraph* jsgraph_;
+ Type* empty_string_type_;
Type* shifted_int32_ranges_[4];
Type* pointer_comparable_type_;
TypeCache const& type_cache_;
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index 81c90d011f..1275f8f6ff 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -176,9 +176,17 @@ bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
case Runtime::kInlineFixedArrayGet:
case Runtime::kInlineFixedArraySet:
case Runtime::kInlineGeneratorClose:
+ case Runtime::kInlineGeneratorGetContext:
case Runtime::kInlineGeneratorGetInputOrDebugPos:
case Runtime::kInlineGeneratorGetResumeMode:
+ case Runtime::kInlineCreateJSGeneratorObject:
case Runtime::kInlineIsArray:
+ case Runtime::kInlineIsJSMap:
+ case Runtime::kInlineIsJSSet:
+ case Runtime::kInlineIsJSMapIterator:
+ case Runtime::kInlineIsJSSetIterator:
+ case Runtime::kInlineIsJSWeakMap:
+ case Runtime::kInlineIsJSWeakSet:
case Runtime::kInlineIsJSReceiver:
case Runtime::kInlineIsRegExp:
case Runtime::kInlineIsSmi:
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
index 2c688a1cb5..b4a5b717e6 100644
--- a/deps/v8/src/compiler/load-elimination.cc
+++ b/deps/v8/src/compiler/load-elimination.cc
@@ -142,7 +142,7 @@ bool IsCompatibleCheck(Node const* a, Node const* b) {
Node* LoadElimination::AbstractChecks::Lookup(Node* node) const {
for (Node* const check : nodes_) {
- if (check && IsCompatibleCheck(check, node)) {
+ if (check && !check->IsDead() && IsCompatibleCheck(check, node)) {
return check;
}
}
@@ -195,13 +195,23 @@ void LoadElimination::AbstractChecks::Print() const {
}
}
-Node* LoadElimination::AbstractElements::Lookup(Node* object,
- Node* index) const {
+namespace {
+
+bool IsCompatible(MachineRepresentation r1, MachineRepresentation r2) {
+ if (r1 == r2) return true;
+ return IsAnyTagged(r1) && IsAnyTagged(r2);
+}
+
+} // namespace
+
+Node* LoadElimination::AbstractElements::Lookup(
+ Node* object, Node* index, MachineRepresentation representation) const {
for (Element const element : elements_) {
if (element.object == nullptr) continue;
DCHECK_NOT_NULL(element.index);
DCHECK_NOT_NULL(element.value);
- if (MustAlias(object, element.object) && MustAlias(index, element.index)) {
+ if (MustAlias(object, element.object) && MustAlias(index, element.index) &&
+ IsCompatible(representation, element.representation)) {
return element.value;
}
}
@@ -470,22 +480,26 @@ LoadElimination::AbstractState const* LoadElimination::AbstractState::KillMaps(
return this;
}
-Node* LoadElimination::AbstractState::LookupElement(Node* object,
- Node* index) const {
+Node* LoadElimination::AbstractState::LookupElement(
+ Node* object, Node* index, MachineRepresentation representation) const {
if (this->elements_) {
- return this->elements_->Lookup(object, index);
+ return this->elements_->Lookup(object, index, representation);
}
return nullptr;
}
LoadElimination::AbstractState const*
LoadElimination::AbstractState::AddElement(Node* object, Node* index,
- Node* value, Zone* zone) const {
+ Node* value,
+ MachineRepresentation representation,
+ Zone* zone) const {
AbstractState* that = new (zone) AbstractState(*this);
if (that->elements_) {
- that->elements_ = that->elements_->Extend(object, index, value, zone);
+ that->elements_ =
+ that->elements_->Extend(object, index, value, representation, zone);
} else {
- that->elements_ = new (zone) AbstractElements(object, index, value, zone);
+ that->elements_ =
+ new (zone) AbstractElements(object, index, value, representation, zone);
}
return that;
}
@@ -823,7 +837,8 @@ Reduction LoadElimination::ReduceLoadElement(Node* node) {
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
- if (Node* replacement = state->LookupElement(object, index)) {
+ if (Node* replacement = state->LookupElement(
+ object, index, access.machine_type.representation())) {
// Make sure we don't resurrect dead {replacement} nodes.
if (!replacement->IsDead()) {
// We might need to guard the {replacement} if the type of the
@@ -838,7 +853,8 @@ Reduction LoadElimination::ReduceLoadElement(Node* node) {
return Replace(replacement);
}
}
- state = state->AddElement(object, index, node, zone());
+ state = state->AddElement(object, index, node,
+ access.machine_type.representation(), zone());
return UpdateState(node, state);
}
return NoChange();
@@ -852,7 +868,8 @@ Reduction LoadElimination::ReduceStoreElement(Node* node) {
Node* const effect = NodeProperties::GetEffectInput(node);
AbstractState const* state = node_states_.Get(effect);
if (state == nullptr) return NoChange();
- Node* const old_value = state->LookupElement(object, index);
+ Node* const old_value =
+ state->LookupElement(object, index, access.machine_type.representation());
if (old_value == new_value) {
// This store is fully redundant.
return Replace(effect);
@@ -880,7 +897,8 @@ Reduction LoadElimination::ReduceStoreElement(Node* node) {
case MachineRepresentation::kTaggedSigned:
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTagged:
- state = state->AddElement(object, index, new_value, zone());
+ state = state->AddElement(object, index, new_value,
+ access.machine_type.representation(), zone());
break;
}
return UpdateState(node, state);
@@ -1007,8 +1025,15 @@ LoadElimination::AbstractState const* LoadElimination::ComputeLoopState(
!ZoneHandleSet<Map>(transition.target())
.contains(object_maps)) {
state = state->KillMaps(object, zone());
- state = state->KillField(
- object, FieldIndexOf(JSObject::kElementsOffset), zone());
+ switch (transition.mode()) {
+ case ElementsTransition::kFastTransition:
+ break;
+ case ElementsTransition::kSlowTransition:
+ // Kill the elements as well.
+ state = state->KillField(
+ object, FieldIndexOf(JSObject::kElementsOffset), zone());
+ break;
+ }
}
break;
}
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
index cd486a2cd7..5d09aa5124 100644
--- a/deps/v8/src/compiler/load-elimination.h
+++ b/deps/v8/src/compiler/load-elimination.h
@@ -8,6 +8,7 @@
#include "src/base/compiler-specific.h"
#include "src/compiler/graph-reducer.h"
#include "src/globals.h"
+#include "src/machine-type.h"
#include "src/zone/zone-handle-set.h"
namespace v8 {
@@ -78,19 +79,23 @@ class V8_EXPORT_PRIVATE LoadElimination final
elements_[i] = Element();
}
}
- AbstractElements(Node* object, Node* index, Node* value, Zone* zone)
+ AbstractElements(Node* object, Node* index, Node* value,
+ MachineRepresentation representation, Zone* zone)
: AbstractElements(zone) {
- elements_[next_index_++] = Element(object, index, value);
+ elements_[next_index_++] = Element(object, index, value, representation);
}
AbstractElements const* Extend(Node* object, Node* index, Node* value,
+ MachineRepresentation representation,
Zone* zone) const {
AbstractElements* that = new (zone) AbstractElements(*this);
- that->elements_[that->next_index_] = Element(object, index, value);
+ that->elements_[that->next_index_] =
+ Element(object, index, value, representation);
that->next_index_ = (that->next_index_ + 1) % arraysize(elements_);
return that;
}
- Node* Lookup(Node* object, Node* index) const;
+ Node* Lookup(Node* object, Node* index,
+ MachineRepresentation representation) const;
AbstractElements const* Kill(Node* object, Node* index, Zone* zone) const;
bool Equals(AbstractElements const* that) const;
AbstractElements const* Merge(AbstractElements const* that,
@@ -101,12 +106,17 @@ class V8_EXPORT_PRIVATE LoadElimination final
private:
struct Element {
Element() {}
- Element(Node* object, Node* index, Node* value)
- : object(object), index(index), value(value) {}
+ Element(Node* object, Node* index, Node* value,
+ MachineRepresentation representation)
+ : object(object),
+ index(index),
+ value(value),
+ representation(representation) {}
Node* object = nullptr;
Node* index = nullptr;
Node* value = nullptr;
+ MachineRepresentation representation = MachineRepresentation::kNone;
};
Element elements_[kMaxTrackedElements];
@@ -224,10 +234,12 @@ class V8_EXPORT_PRIVATE LoadElimination final
Node* LookupField(Node* object, size_t index) const;
AbstractState const* AddElement(Node* object, Node* index, Node* value,
+ MachineRepresentation representation,
Zone* zone) const;
AbstractState const* KillElement(Node* object, Node* index,
Zone* zone) const;
- Node* LookupElement(Node* object, Node* index) const;
+ Node* LookupElement(Node* object, Node* index,
+ MachineRepresentation representation) const;
AbstractState const* AddCheck(Node* node, Zone* zone) const;
Node* LookupCheck(Node* node) const;
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 2e66b17a9d..96f7dc1a91 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -70,9 +70,25 @@ CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
return OpParameter<CheckedStoreRepresentation>(op);
}
-int StackSlotSizeOf(Operator const* op) {
+bool operator==(StackSlotRepresentation lhs, StackSlotRepresentation rhs) {
+ return lhs.size() == rhs.size() && lhs.alignment() == rhs.alignment();
+}
+
+bool operator!=(StackSlotRepresentation lhs, StackSlotRepresentation rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(StackSlotRepresentation rep) {
+ return base::hash_combine(rep.size(), rep.alignment());
+}
+
+std::ostream& operator<<(std::ostream& os, StackSlotRepresentation rep) {
+ return os << "(" << rep.size() << " : " << rep.alignment() << ")";
+}
+
+StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kStackSlot, op->opcode());
- return OpParameter<int>(op);
+ return OpParameter<StackSlotRepresentation>(op);
}
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
@@ -229,19 +245,14 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(F32x4UConvertI32x4, Operator::kNoProperties, 1, 0, 1) \
V(F32x4Abs, Operator::kNoProperties, 1, 0, 1) \
V(F32x4Neg, Operator::kNoProperties, 1, 0, 1) \
- V(F32x4Sqrt, Operator::kNoProperties, 1, 0, 1) \
V(F32x4RecipApprox, Operator::kNoProperties, 1, 0, 1) \
V(F32x4RecipSqrtApprox, Operator::kNoProperties, 1, 0, 1) \
V(F32x4Add, Operator::kCommutative, 2, 0, 1) \
+ V(F32x4AddHoriz, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Sub, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Mul, Operator::kCommutative, 2, 0, 1) \
- V(F32x4Div, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Min, Operator::kCommutative, 2, 0, 1) \
V(F32x4Max, Operator::kCommutative, 2, 0, 1) \
- V(F32x4MinNum, Operator::kCommutative, 2, 0, 1) \
- V(F32x4MaxNum, Operator::kCommutative, 2, 0, 1) \
- V(F32x4RecipRefine, Operator::kNoProperties, 2, 0, 1) \
- V(F32x4RecipSqrtRefine, Operator::kNoProperties, 2, 0, 1) \
V(F32x4Eq, Operator::kCommutative, 2, 0, 1) \
V(F32x4Ne, Operator::kCommutative, 2, 0, 1) \
V(F32x4Lt, Operator::kNoProperties, 2, 0, 1) \
@@ -252,6 +263,7 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(I32x4SConvertI16x8High, Operator::kNoProperties, 1, 0, 1) \
V(I32x4Neg, Operator::kNoProperties, 1, 0, 1) \
V(I32x4Add, Operator::kCommutative, 2, 0, 1) \
+ V(I32x4AddHoriz, Operator::kNoProperties, 2, 0, 1) \
V(I32x4Sub, Operator::kNoProperties, 2, 0, 1) \
V(I32x4Mul, Operator::kCommutative, 2, 0, 1) \
V(I32x4MinS, Operator::kCommutative, 2, 0, 1) \
@@ -274,6 +286,7 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(I16x8SConvertI32x4, Operator::kNoProperties, 2, 0, 1) \
V(I16x8Add, Operator::kCommutative, 2, 0, 1) \
V(I16x8AddSaturateS, Operator::kCommutative, 2, 0, 1) \
+ V(I16x8AddHoriz, Operator::kNoProperties, 2, 0, 1) \
V(I16x8Sub, Operator::kNoProperties, 2, 0, 1) \
V(I16x8SubSaturateS, Operator::kNoProperties, 2, 0, 1) \
V(I16x8Mul, Operator::kCommutative, 2, 0, 1) \
@@ -426,13 +439,15 @@ MachineType AtomicOpRepresentationOf(Operator const* op) {
V(16x8, 16) \
V(8x16, 8)
-#define STACK_SLOT_CACHED_SIZES_LIST(V) V(4) V(8) V(16)
+#define STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(V) \
+ V(4, 0) V(8, 0) V(16, 0) V(4, 4) V(8, 8) V(16, 16)
-struct StackSlotOperator : public Operator1<int> {
- explicit StackSlotOperator(int size)
- : Operator1<int>(IrOpcode::kStackSlot,
- Operator::kNoDeopt | Operator::kNoThrow, "StackSlot", 0,
- 0, 0, 1, 0, 0, size) {}
+struct StackSlotOperator : public Operator1<StackSlotRepresentation> {
+ explicit StackSlotOperator(int size, int alignment)
+ : Operator1<StackSlotRepresentation>(
+ IrOpcode::kStackSlot, Operator::kNoDeopt | Operator::kNoThrow,
+ "StackSlot", 0, 0, 0, 1, 0, 0,
+ StackSlotRepresentation(size, alignment)) {}
};
struct MachineOperatorGlobalCache {
@@ -499,12 +514,15 @@ struct MachineOperatorGlobalCache {
MACHINE_TYPE_LIST(LOAD)
#undef LOAD
-#define STACKSLOT(Size) \
- struct StackSlotOfSize##Size##Operator final : public StackSlotOperator { \
- StackSlotOfSize##Size##Operator() : StackSlotOperator(Size) {} \
- }; \
- StackSlotOfSize##Size##Operator kStackSlotSize##Size;
- STACK_SLOT_CACHED_SIZES_LIST(STACKSLOT)
+#define STACKSLOT(Size, Alignment) \
+ struct StackSlotOfSize##Size##OfAlignment##Alignment##Operator final \
+ : public StackSlotOperator { \
+ StackSlotOfSize##Size##OfAlignment##Alignment##Operator() \
+ : StackSlotOperator(Size, Alignment) {} \
+ }; \
+ StackSlotOfSize##Size##OfAlignment##Alignment##Operator \
+ kStackSlotOfSize##Size##OfAlignment##Alignment;
+ STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(STACKSLOT)
#undef STACKSLOT
#define STORE(Type) \
@@ -755,21 +773,23 @@ const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
return nullptr;
}
-const Operator* MachineOperatorBuilder::StackSlot(int size) {
+const Operator* MachineOperatorBuilder::StackSlot(int size, int alignment) {
DCHECK_LE(0, size);
-#define CASE_CACHED_SIZE(Size) \
- case Size: \
- return &cache_.kStackSlotSize##Size;
- switch (size) {
- STACK_SLOT_CACHED_SIZES_LIST(CASE_CACHED_SIZE);
- default:
- return new (zone_) StackSlotOperator(size);
+ DCHECK(alignment == 0 || alignment == 4 || alignment == 8 || alignment == 16);
+#define CASE_CACHED_SIZE(Size, Alignment) \
+ if (size == Size && alignment == Alignment) { \
+ return &cache_.kStackSlotOfSize##Size##OfAlignment##Alignment; \
}
+
+ STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST(CASE_CACHED_SIZE)
+
#undef CASE_CACHED_SIZE
+ return new (zone_) StackSlotOperator(size, alignment);
}
-const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep) {
- return StackSlot(1 << ElementSizeLog2Of(rep));
+const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep,
+ int alignment) {
+ return StackSlot(1 << ElementSizeLog2Of(rep), alignment);
}
const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
@@ -1007,16 +1027,29 @@ SIMD_LANE_OP_LIST(SIMD_LANE_OPS)
SIMD_FORMAT_LIST(SIMD_SHIFT_OPS)
#undef SIMD_SHIFT_OPS
-// TODO(bbudge) Add Shuffle, DCHECKs based on format.
-#define SIMD_PERMUTE_OPS(format, bits) \
- const Operator* MachineOperatorBuilder::S##format##Swizzle( \
- uint32_t swizzle) { \
- return new (zone_) \
- Operator1<uint32_t>(IrOpcode::kS##format##Swizzle, Operator::kPure, \
- "Swizzle", 2, 0, 0, 1, 0, 0, swizzle); \
- }
-SIMD_FORMAT_LIST(SIMD_PERMUTE_OPS)
-#undef SIMD_PERMUTE_OPS
+const Operator* MachineOperatorBuilder::S32x4Shuffle(uint8_t shuffle[16]) {
+ uint8_t* array = zone_->NewArray<uint8_t>(4);
+ memcpy(array, shuffle, 4);
+ return new (zone_)
+ Operator1<uint8_t*>(IrOpcode::kS32x4Shuffle, Operator::kPure, "Shuffle",
+ 2, 0, 0, 1, 0, 0, array);
+}
+
+const Operator* MachineOperatorBuilder::S16x8Shuffle(uint8_t shuffle[16]) {
+ uint8_t* array = zone_->NewArray<uint8_t>(8);
+ memcpy(array, shuffle, 8);
+ return new (zone_)
+ Operator1<uint8_t*>(IrOpcode::kS16x8Shuffle, Operator::kPure, "Shuffle",
+ 2, 0, 0, 1, 0, 0, array);
+}
+
+const Operator* MachineOperatorBuilder::S8x16Shuffle(uint8_t shuffle[16]) {
+ uint8_t* array = zone_->NewArray<uint8_t>(16);
+ memcpy(array, shuffle, 16);
+ return new (zone_)
+ Operator1<uint8_t*>(IrOpcode::kS8x16Shuffle, Operator::kPure, "Shuffle",
+ 2, 0, 0, 1, 0, 0, array);
+}
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 9ffb355362..82d40a09e3 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -93,7 +93,29 @@ typedef MachineRepresentation CheckedStoreRepresentation;
CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
-int StackSlotSizeOf(Operator const* op);
+class StackSlotRepresentation final {
+ public:
+ StackSlotRepresentation(int size, int alignment)
+ : size_(size), alignment_(alignment) {}
+
+ int size() const { return size_; }
+ int alignment() const { return alignment_; }
+
+ private:
+ int size_;
+ int alignment_;
+};
+
+V8_EXPORT_PRIVATE bool operator==(StackSlotRepresentation,
+ StackSlotRepresentation);
+bool operator!=(StackSlotRepresentation, StackSlotRepresentation);
+
+size_t hash_value(StackSlotRepresentation);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+ StackSlotRepresentation);
+
+StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op);
MachineRepresentation AtomicStoreRepresentationOf(Operator const* op);
@@ -441,19 +463,15 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* F32x4UConvertI32x4();
const Operator* F32x4Abs();
const Operator* F32x4Neg();
- const Operator* F32x4Sqrt();
const Operator* F32x4RecipApprox();
const Operator* F32x4RecipSqrtApprox();
const Operator* F32x4Add();
+ const Operator* F32x4AddHoriz();
const Operator* F32x4Sub();
const Operator* F32x4Mul();
const Operator* F32x4Div();
const Operator* F32x4Min();
const Operator* F32x4Max();
- const Operator* F32x4MinNum();
- const Operator* F32x4MaxNum();
- const Operator* F32x4RecipRefine();
- const Operator* F32x4RecipSqrtRefine();
const Operator* F32x4Eq();
const Operator* F32x4Ne();
const Operator* F32x4Lt();
@@ -469,6 +487,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I32x4Shl(int32_t);
const Operator* I32x4ShrS(int32_t);
const Operator* I32x4Add();
+ const Operator* I32x4AddHoriz();
const Operator* I32x4Sub();
const Operator* I32x4Mul();
const Operator* I32x4MinS();
@@ -498,6 +517,7 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* I16x8SConvertI32x4();
const Operator* I16x8Add();
const Operator* I16x8AddSaturateS();
+ const Operator* I16x8AddHoriz();
const Operator* I16x8Sub();
const Operator* I16x8SubSaturateS();
const Operator* I16x8Mul();
@@ -556,15 +576,12 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* S128Xor();
const Operator* S128Not();
+ const Operator* S32x4Shuffle(uint8_t shuffle[16]);
const Operator* S32x4Select();
- const Operator* S32x4Swizzle(uint32_t);
- const Operator* S32x4Shuffle();
+ const Operator* S16x8Shuffle(uint8_t shuffle[16]);
const Operator* S16x8Select();
- const Operator* S16x8Swizzle(uint32_t);
- const Operator* S16x8Shuffle();
+ const Operator* S8x16Shuffle(uint8_t shuffle[16]);
const Operator* S8x16Select();
- const Operator* S8x16Swizzle(uint32_t);
- const Operator* S8x16Shuffle();
const Operator* S1x4Zero();
const Operator* S1x4And();
@@ -604,8 +621,8 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// unaligned store [base + index], value
const Operator* UnalignedStore(UnalignedStoreRepresentation rep);
- const Operator* StackSlot(int size);
- const Operator* StackSlot(MachineRepresentation rep);
+ const Operator* StackSlot(int size, int alignment = 0);
+ const Operator* StackSlot(MachineRepresentation rep, int alignment = 0);
// Access to the machine stack.
const Operator* LoadStackPointer();
diff --git a/deps/v8/src/compiler/mips/OWNERS b/deps/v8/src/compiler/mips/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/src/compiler/mips/OWNERS
+++ b/deps/v8/src/compiler/mips/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/src/compiler/mips/code-generator-mips.cc b/deps/v8/src/compiler/mips/code-generator-mips.cc
index 628c79025e..5055735ba6 100644
--- a/deps/v8/src/compiler/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/mips/code-generator-mips.cc
@@ -622,8 +622,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Call(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
} else {
- __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
- __ Call(at);
+ __ Call(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -640,8 +639,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
} else {
- __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
- __ Jump(at);
+ __ Jump(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -777,8 +775,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
- __ Addu(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
- Operand(offset.offset()));
+ Register base_reg = offset.from_stack_pointer() ? sp : fp;
+ __ Addu(i.OutputRegister(), base_reg, Operand(offset.offset()));
+ int alignment = i.InputInt32(1);
+ DCHECK(alignment == 0 || alignment == 4 || alignment == 8 ||
+ alignment == 16);
+ if (FLAG_debug_code && alignment > 0) {
+ // Verify that the output_register is properly aligned
+ __ And(kScratchReg, i.OutputRegister(), Operand(kPointerSize - 1));
+ __ Assert(eq, kAllocationIsNotDoubleAligned, kScratchReg,
+ Operand(zero_reg));
+ }
+
+ if (alignment == 2 * kPointerSize) {
+ Label done;
+ __ Addu(kScratchReg, base_reg, Operand(offset.offset()));
+ __ And(kScratchReg, kScratchReg, Operand(alignment - 1));
+ __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
+ __ Addu(i.OutputRegister(), i.OutputRegister(), kPointerSize);
+ __ bind(&done);
+ } else if (alignment > 2 * kPointerSize) {
+ Label done;
+ __ Addu(kScratchReg, base_reg, Operand(offset.offset()));
+ __ And(kScratchReg, kScratchReg, Operand(alignment - 1));
+ __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
+ __ li(kScratchReg2, alignment);
+ __ Subu(kScratchReg2, kScratchReg2, Operand(kScratchReg));
+ __ Addu(i.OutputRegister(), i.OutputRegister(), kScratchReg2);
+ __ bind(&done);
+ }
break;
}
case kIeee754Float64Acos:
@@ -1760,13 +1785,319 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMipsS32x4Select: {
+ case kMipsS32x4Select:
+ case kMipsS16x8Select:
+ case kMipsS8x16Select: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
DCHECK(i.OutputSimd128Register().is(i.InputSimd128Register(0)));
__ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2),
i.InputSimd128Register(1));
break;
}
+ case kMipsF32x4Abs: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
+ break;
+ }
+ case kMipsF32x4Neg: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
+ break;
+ }
+ case kMipsF32x4RecipApprox: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsF32x4RecipSqrtApprox: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ frsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsF32x4Add: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsF32x4Sub: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsF32x4Mul: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsF32x4Max: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsF32x4Min: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fmin_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsF32x4Eq: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsF32x4Ne: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fcne_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsF32x4Lt: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsF32x4Le: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI32x4SConvertF32x4: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI32x4UConvertF32x4: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI32x4Neg: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ subv_w(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI32x4LtS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI32x4LeS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI32x4LtU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI32x4LeU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8Splat: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kMipsI16x8ExtractLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ copy_s_h(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputInt8(1));
+ break;
+ }
+ case kMipsI16x8ReplaceLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ if (!src.is(dst)) {
+ __ move_v(dst, src);
+ }
+ __ insert_h(dst, i.InputInt8(1), i.InputRegister(2));
+ break;
+ }
+ case kMipsI16x8Neg: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ subv_h(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI16x8Shl: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ slli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ break;
+ }
+ case kMipsI16x8ShrS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ srai_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ break;
+ }
+ case kMipsI16x8ShrU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ srli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ break;
+ }
+ case kMipsI16x8Add: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ addv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8AddSaturateS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8Sub: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8SubSaturateS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8Mul: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8MaxS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8MinS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8Eq: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8Ne: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
+ __ nor_v(dst, dst, dst);
+ break;
+ }
+ case kMipsI16x8LtS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8LeS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8AddSaturateU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8SubSaturateU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8MaxU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8MinU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8LtU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI16x8LeU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMipsI8x16Splat: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kMipsI8x16ExtractLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ copy_s_b(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputInt8(1));
+ break;
+ }
+ case kMipsI8x16ReplaceLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ if (!src.is(dst)) {
+ __ move_v(dst, src);
+ }
+ __ insert_b(dst, i.InputInt8(1), i.InputRegister(2));
+ break;
+ }
+ case kMipsI8x16Neg: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ subv_b(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMipsI8x16Shl: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ slli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ break;
+ }
+ case kMipsI8x16ShrS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ srai_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ break;
+ }
}
return kSuccess;
} // NOLINT(readability/fn_size)
diff --git a/deps/v8/src/compiler/mips/instruction-codes-mips.h b/deps/v8/src/compiler/mips/instruction-codes-mips.h
index 7d0e755617..f80fae9340 100644
--- a/deps/v8/src/compiler/mips/instruction-codes-mips.h
+++ b/deps/v8/src/compiler/mips/instruction-codes-mips.h
@@ -154,7 +154,59 @@ namespace compiler {
V(MipsI32x4ShrU) \
V(MipsI32x4MaxU) \
V(MipsI32x4MinU) \
- V(MipsS32x4Select)
+ V(MipsS32x4Select) \
+ V(MipsF32x4Abs) \
+ V(MipsF32x4Neg) \
+ V(MipsF32x4RecipApprox) \
+ V(MipsF32x4RecipSqrtApprox) \
+ V(MipsF32x4Add) \
+ V(MipsF32x4Sub) \
+ V(MipsF32x4Mul) \
+ V(MipsF32x4Max) \
+ V(MipsF32x4Min) \
+ V(MipsF32x4Eq) \
+ V(MipsF32x4Ne) \
+ V(MipsF32x4Lt) \
+ V(MipsF32x4Le) \
+ V(MipsI32x4SConvertF32x4) \
+ V(MipsI32x4UConvertF32x4) \
+ V(MipsI32x4Neg) \
+ V(MipsI32x4LtS) \
+ V(MipsI32x4LeS) \
+ V(MipsI32x4LtU) \
+ V(MipsI32x4LeU) \
+ V(MipsI16x8Splat) \
+ V(MipsI16x8ExtractLane) \
+ V(MipsI16x8ReplaceLane) \
+ V(MipsI16x8Neg) \
+ V(MipsI16x8Shl) \
+ V(MipsI16x8ShrS) \
+ V(MipsI16x8ShrU) \
+ V(MipsI16x8Add) \
+ V(MipsI16x8AddSaturateS) \
+ V(MipsI16x8Sub) \
+ V(MipsI16x8SubSaturateS) \
+ V(MipsI16x8Mul) \
+ V(MipsI16x8MaxS) \
+ V(MipsI16x8MinS) \
+ V(MipsI16x8Eq) \
+ V(MipsI16x8Ne) \
+ V(MipsI16x8LtS) \
+ V(MipsI16x8LeS) \
+ V(MipsI16x8AddSaturateU) \
+ V(MipsI16x8SubSaturateU) \
+ V(MipsI16x8MaxU) \
+ V(MipsI16x8MinU) \
+ V(MipsI16x8LtU) \
+ V(MipsI16x8LeU) \
+ V(MipsI8x16Splat) \
+ V(MipsI8x16ExtractLane) \
+ V(MipsI8x16ReplaceLane) \
+ V(MipsI8x16Neg) \
+ V(MipsI8x16Shl) \
+ V(MipsI8x16ShrS) \
+ V(MipsS16x8Select) \
+ V(MipsS8x16Select)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
index c99be67dd7..1058833a43 100644
--- a/deps/v8/src/compiler/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/mips/instruction-selector-mips.cc
@@ -256,6 +256,16 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
VisitBinop(selector, node, opcode, false, kArchNop);
}
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int alignment = rep.alignment();
+ int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)),
+ sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
+}
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -2035,6 +2045,214 @@ void InstructionSelector::VisitS32x4Select(Node* node) {
VisitRRRR(this, kMipsS32x4Select, node);
}
+void InstructionSelector::VisitF32x4Abs(Node* node) {
+ VisitRR(this, kMipsF32x4Abs, node);
+}
+
+void InstructionSelector::VisitF32x4Neg(Node* node) {
+ VisitRR(this, kMipsF32x4Neg, node);
+}
+
+void InstructionSelector::VisitF32x4RecipApprox(Node* node) {
+ VisitRR(this, kMipsF32x4RecipApprox, node);
+}
+
+void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
+ VisitRR(this, kMipsF32x4RecipSqrtApprox, node);
+}
+
+void InstructionSelector::VisitF32x4Add(Node* node) {
+ VisitRRR(this, kMipsF32x4Add, node);
+}
+
+void InstructionSelector::VisitF32x4Sub(Node* node) {
+ VisitRRR(this, kMipsF32x4Sub, node);
+}
+
+void InstructionSelector::VisitF32x4Mul(Node* node) {
+ VisitRRR(this, kMipsF32x4Mul, node);
+}
+
+void InstructionSelector::VisitF32x4Max(Node* node) {
+ VisitRRR(this, kMipsF32x4Max, node);
+}
+
+void InstructionSelector::VisitF32x4Min(Node* node) {
+ VisitRRR(this, kMipsF32x4Min, node);
+}
+
+void InstructionSelector::VisitF32x4Eq(Node* node) {
+ VisitRRR(this, kMipsF32x4Eq, node);
+}
+
+void InstructionSelector::VisitF32x4Ne(Node* node) {
+ VisitRRR(this, kMipsF32x4Ne, node);
+}
+
+void InstructionSelector::VisitF32x4Lt(Node* node) {
+ VisitRRR(this, kMipsF32x4Lt, node);
+}
+
+void InstructionSelector::VisitF32x4Le(Node* node) {
+ VisitRRR(this, kMipsF32x4Le, node);
+}
+
+void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
+ VisitRR(this, kMipsI32x4SConvertF32x4, node);
+}
+
+void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
+ VisitRR(this, kMipsI32x4UConvertF32x4, node);
+}
+
+void InstructionSelector::VisitI32x4Neg(Node* node) {
+ VisitRR(this, kMipsI32x4Neg, node);
+}
+
+void InstructionSelector::VisitI32x4LtS(Node* node) {
+ VisitRRR(this, kMipsI32x4LtS, node);
+}
+
+void InstructionSelector::VisitI32x4LeS(Node* node) {
+ VisitRRR(this, kMipsI32x4LeS, node);
+}
+
+void InstructionSelector::VisitI32x4LtU(Node* node) {
+ VisitRRR(this, kMipsI32x4LtU, node);
+}
+
+void InstructionSelector::VisitI32x4LeU(Node* node) {
+ VisitRRR(this, kMipsI32x4LeU, node);
+}
+
+void InstructionSelector::VisitI16x8Splat(Node* node) {
+ VisitRR(this, kMipsI16x8Splat, node);
+}
+
+void InstructionSelector::VisitI16x8ExtractLane(Node* node) {
+ VisitRRI(this, kMipsI16x8ExtractLane, node);
+}
+
+void InstructionSelector::VisitI16x8ReplaceLane(Node* node) {
+ VisitRRIR(this, kMipsI16x8ReplaceLane, node);
+}
+
+void InstructionSelector::VisitI16x8Neg(Node* node) {
+ VisitRR(this, kMipsI16x8Neg, node);
+}
+
+void InstructionSelector::VisitI16x8Shl(Node* node) {
+ VisitRRI(this, kMipsI16x8Shl, node);
+}
+
+void InstructionSelector::VisitI16x8ShrS(Node* node) {
+ VisitRRI(this, kMipsI16x8ShrS, node);
+}
+
+void InstructionSelector::VisitI16x8ShrU(Node* node) {
+ VisitRRI(this, kMipsI16x8ShrU, node);
+}
+
+void InstructionSelector::VisitI16x8Add(Node* node) {
+ VisitRRR(this, kMipsI16x8Add, node);
+}
+
+void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
+ VisitRRR(this, kMipsI16x8AddSaturateS, node);
+}
+
+void InstructionSelector::VisitI16x8Sub(Node* node) {
+ VisitRRR(this, kMipsI16x8Sub, node);
+}
+
+void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
+ VisitRRR(this, kMipsI16x8SubSaturateS, node);
+}
+
+void InstructionSelector::VisitI16x8Mul(Node* node) {
+ VisitRRR(this, kMipsI16x8Mul, node);
+}
+
+void InstructionSelector::VisitI16x8MaxS(Node* node) {
+ VisitRRR(this, kMipsI16x8MaxS, node);
+}
+
+void InstructionSelector::VisitI16x8MinS(Node* node) {
+ VisitRRR(this, kMipsI16x8MinS, node);
+}
+
+void InstructionSelector::VisitI16x8Eq(Node* node) {
+ VisitRRR(this, kMipsI16x8Eq, node);
+}
+
+void InstructionSelector::VisitI16x8Ne(Node* node) {
+ VisitRRR(this, kMipsI16x8Ne, node);
+}
+
+void InstructionSelector::VisitI16x8LtS(Node* node) {
+ VisitRRR(this, kMipsI16x8LtS, node);
+}
+
+void InstructionSelector::VisitI16x8LeS(Node* node) {
+ VisitRRR(this, kMipsI16x8LeS, node);
+}
+
+void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
+ VisitRRR(this, kMipsI16x8AddSaturateU, node);
+}
+
+void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
+ VisitRRR(this, kMipsI16x8SubSaturateU, node);
+}
+
+void InstructionSelector::VisitI16x8MaxU(Node* node) {
+ VisitRRR(this, kMipsI16x8MaxU, node);
+}
+
+void InstructionSelector::VisitI16x8MinU(Node* node) {
+ VisitRRR(this, kMipsI16x8MinU, node);
+}
+
+void InstructionSelector::VisitI16x8LtU(Node* node) {
+ VisitRRR(this, kMipsI16x8LtU, node);
+}
+
+void InstructionSelector::VisitI16x8LeU(Node* node) {
+ VisitRRR(this, kMipsI16x8LeU, node);
+}
+
+void InstructionSelector::VisitI8x16Splat(Node* node) {
+ VisitRR(this, kMipsI8x16Splat, node);
+}
+
+void InstructionSelector::VisitI8x16ExtractLane(Node* node) {
+ VisitRRI(this, kMipsI8x16ExtractLane, node);
+}
+
+void InstructionSelector::VisitI8x16ReplaceLane(Node* node) {
+ VisitRRIR(this, kMipsI8x16ReplaceLane, node);
+}
+
+void InstructionSelector::VisitI8x16Neg(Node* node) {
+ VisitRR(this, kMipsI8x16Neg, node);
+}
+
+void InstructionSelector::VisitI8x16Shl(Node* node) {
+ VisitRRI(this, kMipsI8x16Shl, node);
+}
+
+void InstructionSelector::VisitI8x16ShrS(Node* node) {
+ VisitRRI(this, kMipsI8x16ShrS, node);
+}
+
+void InstructionSelector::VisitS16x8Select(Node* node) {
+ VisitRRRR(this, kMipsS16x8Select, node);
+}
+
+void InstructionSelector::VisitS8x16Select(Node* node) {
+ VisitRRRR(this, kMipsS8x16Select, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/mips64/OWNERS b/deps/v8/src/compiler/mips64/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/src/compiler/mips64/OWNERS
+++ b/deps/v8/src/compiler/mips64/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
index f1831adf63..f4fb71d989 100644
--- a/deps/v8/src/compiler/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
@@ -583,8 +583,8 @@ void CodeGenerator::AssembleDeconstructFrame() {
void CodeGenerator::AssemblePrepareTailCall() {
if (frame_access_state()->has_frame()) {
- __ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
}
frame_access_state()->SetFrameAccessToSP();
}
@@ -597,14 +597,14 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
Label done;
// Check if current frame is an arguments adaptor frame.
- __ ld(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Ld(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Branch(&done, ne, scratch3,
Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
// Load arguments count from current arguments adaptor frame (note, it
// does not include receiver).
Register caller_args_count_reg = scratch1;
- __ ld(caller_args_count_reg,
+ __ Ld(caller_args_count_reg,
MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(caller_args_count_reg);
@@ -696,10 +696,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
- __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ Ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
}
- __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(at);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -709,13 +709,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
- __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ Ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
}
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
i.TempRegister(0), i.TempRegister(1),
i.TempRegister(2));
- __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ Ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Jump(at);
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -784,7 +784,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArchParentFramePointer:
if (frame_access_state()->has_frame()) {
- __ ld(i.OutputRegister(), MemOperand(fp, 0));
+ __ Ld(i.OutputRegister(), MemOperand(fp, 0));
} else {
__ mov(i.OutputRegister(), fp);
}
@@ -803,7 +803,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
scratch0, scratch1, mode);
__ Daddu(at, object, index);
- __ sd(value, MemOperand(at));
+ __ Sd(value, MemOperand(at));
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask, ne,
ool->entry());
@@ -813,8 +813,35 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
- __ Daddu(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
- Operand(offset.offset()));
+ Register base_reg = offset.from_stack_pointer() ? sp : fp;
+ __ Daddu(i.OutputRegister(), base_reg, Operand(offset.offset()));
+ int alignment = i.InputInt32(1);
+ DCHECK(alignment == 0 || alignment == 4 || alignment == 8 ||
+ alignment == 16);
+ if (FLAG_debug_code && alignment > 0) {
+ // Verify that the output_register is properly aligned
+ __ And(kScratchReg, i.OutputRegister(), Operand(kPointerSize - 1));
+ __ Assert(eq, kAllocationIsNotDoubleAligned, kScratchReg,
+ Operand(zero_reg));
+ }
+ if (alignment == 2 * kPointerSize) {
+ Label done;
+ __ Daddu(kScratchReg, base_reg, Operand(offset.offset()));
+ __ And(kScratchReg, kScratchReg, Operand(alignment - 1));
+ __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
+ __ Daddu(i.OutputRegister(), i.OutputRegister(), kPointerSize);
+ __ bind(&done);
+ } else if (alignment > 2 * kPointerSize) {
+ Label done;
+ __ Daddu(kScratchReg, base_reg, Operand(offset.offset()));
+ __ And(kScratchReg, kScratchReg, Operand(alignment - 1));
+ __ BranchShort(&done, eq, kScratchReg, Operand(zero_reg));
+ __ li(kScratchReg2, alignment);
+ __ Dsubu(kScratchReg2, kScratchReg2, Operand(kScratchReg));
+ __ Daddu(i.OutputRegister(), i.OutputRegister(), kScratchReg2);
+ __ bind(&done);
+ }
+
break;
}
case kIeee754Float64Acos:
@@ -1216,19 +1243,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kMips64Dext: {
- int16_t pos = i.InputInt8(1);
- int16_t size = i.InputInt8(2);
- if (size > 0 && size <= 32 && pos >= 0 && pos < 32) {
- __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
- i.InputInt8(2));
- } else if (size > 32 && size <= 64 && pos >= 0 && pos < 32) {
- __ Dextm(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
- i.InputInt8(2));
- } else {
- DCHECK(size > 0 && size <= 32 && pos >= 32 && pos < 64);
- __ Dextu(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
- i.InputInt8(2));
- }
+ __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
break;
}
case kMips64Dins:
@@ -1712,64 +1728,64 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ seh(i.OutputRegister(), i.InputRegister(0));
break;
case kMips64Lbu:
- __ lbu(i.OutputRegister(), i.MemoryOperand());
+ __ Lbu(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Lb:
- __ lb(i.OutputRegister(), i.MemoryOperand());
+ __ Lb(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Sb:
- __ sb(i.InputOrZeroRegister(2), i.MemoryOperand());
+ __ Sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Lhu:
- __ lhu(i.OutputRegister(), i.MemoryOperand());
+ __ Lhu(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Ulhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Lh:
- __ lh(i.OutputRegister(), i.MemoryOperand());
+ __ Lh(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Ulh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Sh:
- __ sh(i.InputOrZeroRegister(2), i.MemoryOperand());
+ __ Sh(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Ush:
__ Ush(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg);
break;
case kMips64Lw:
- __ lw(i.OutputRegister(), i.MemoryOperand());
+ __ Lw(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Ulw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Lwu:
- __ lwu(i.OutputRegister(), i.MemoryOperand());
+ __ Lwu(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Ulwu:
__ Ulwu(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Ld:
- __ ld(i.OutputRegister(), i.MemoryOperand());
+ __ Ld(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Uld:
__ Uld(i.OutputRegister(), i.MemoryOperand());
break;
case kMips64Sw:
- __ sw(i.InputOrZeroRegister(2), i.MemoryOperand());
+ __ Sw(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Usw:
__ Usw(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Sd:
- __ sd(i.InputOrZeroRegister(2), i.MemoryOperand());
+ __ Sd(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Usd:
__ Usd(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Lwc1: {
- __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
+ __ Lwc1(i.OutputSingleRegister(), i.MemoryOperand());
break;
}
case kMips64Ulwc1: {
@@ -1783,7 +1799,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
- __ swc1(ft, operand);
+ __ Swc1(ft, operand);
break;
}
case kMips64Uswc1: {
@@ -1797,7 +1813,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kMips64Ldc1:
- __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
+ __ Ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
break;
case kMips64Uldc1:
__ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
@@ -1807,7 +1823,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {
__ Move(kDoubleRegZero, 0.0);
}
- __ sdc1(ft, i.MemoryOperand());
+ __ Sdc1(ft, i.MemoryOperand());
break;
}
case kMips64Usdc1: {
@@ -1820,7 +1836,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64Push:
if (instr->InputAt(0)->IsFPRegister()) {
- __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
__ Subu(sp, sp, Operand(kDoubleSize));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else {
@@ -1835,9 +1851,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kMips64StoreToStackSlot: {
if (instr->InputAt(0)->IsFPRegister()) {
- __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
} else {
- __ sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
+ __ Sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
}
break;
}
@@ -2090,13 +2106,319 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1));
break;
}
- case kMips64S32x4Select: {
+ case kMips64S32x4Select:
+ case kMips64S16x8Select:
+ case kMips64S8x16Select: {
CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
DCHECK(i.OutputSimd128Register().is(i.InputSimd128Register(0)));
__ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2),
i.InputSimd128Register(1));
break;
}
+ case kMips64F32x4Abs: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
+ break;
+ }
+ case kMips64F32x4Neg: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31);
+ break;
+ }
+ case kMips64F32x4RecipApprox: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ frcp_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64F32x4RecipSqrtApprox: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ frsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64F32x4Add: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64F32x4Sub: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64F32x4Mul: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64F32x4Max: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64F32x4Min: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fmin_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64F32x4Eq: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64F32x4Ne: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fcne_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64F32x4Lt: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64F32x4Le: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I32x4SConvertF32x4: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I32x4UConvertF32x4: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I32x4Neg: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ subv_w(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I32x4LtS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I32x4LeS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I32x4LtU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I32x4LeU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8Splat: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fill_h(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kMips64I16x8ExtractLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ copy_s_h(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputInt8(1));
+ break;
+ }
+ case kMips64I16x8ReplaceLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ if (!src.is(dst)) {
+ __ move_v(dst, src);
+ }
+ __ insert_h(dst, i.InputInt8(1), i.InputRegister(2));
+ break;
+ }
+ case kMips64I16x8Neg: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ subv_h(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I16x8Shl: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ slli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ break;
+ }
+ case kMips64I16x8ShrS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ srai_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ break;
+ }
+ case kMips64I16x8ShrU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ srli_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ break;
+ }
+ case kMips64I16x8Add: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ addv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8AddSaturateS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8Sub: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8SubSaturateS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8Mul: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8MaxS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8MinS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8Eq: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8Ne: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
+ __ nor_v(dst, dst, dst);
+ break;
+ }
+ case kMips64I16x8LtS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8LeS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8AddSaturateU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8SubSaturateU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8MaxU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8MinU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8LtU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I16x8LeU: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kMips64I8x16Splat: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ fill_b(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kMips64I8x16ExtractLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ copy_s_b(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputInt8(1));
+ break;
+ }
+ case kMips64I8x16ReplaceLane: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ if (!src.is(dst)) {
+ __ move_v(dst, src);
+ }
+ __ insert_b(dst, i.InputInt8(1), i.InputRegister(2));
+ break;
+ }
+ case kMips64I8x16Neg: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero);
+ __ subv_b(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0));
+ break;
+ }
+ case kMips64I8x16Shl: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ slli_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ break;
+ }
+ case kMips64I8x16ShrS: {
+ CpuFeatureScope msa_scope(masm(), MIPS_SIMD);
+ __ srai_b(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ break;
+ }
}
return kSuccess;
} // NOLINT(readability/fn_size)
@@ -2331,7 +2653,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
base::bits::IsPowerOfTwo64(i.InputOperand(1).immediate())) {
uint16_t pos =
base::bits::CountTrailingZeros64(i.InputOperand(1).immediate());
- __ ExtractBits(result, i.InputRegister(0), pos, 1);
+ __ Dext(result, i.InputRegister(0), pos, 1);
} else {
__ And(kScratchReg, i.InputRegister(0), i.InputOperand(1));
__ Sltu(result, zero_reg, kScratchReg);
@@ -2657,17 +2979,17 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (destination->IsRegister()) {
__ mov(g.ToRegister(destination), src);
} else {
- __ sd(src, g.ToMemOperand(destination));
+ __ Sd(src, g.ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
MemOperand src = g.ToMemOperand(source);
if (destination->IsRegister()) {
- __ ld(g.ToRegister(destination), src);
+ __ Ld(g.ToRegister(destination), src);
} else {
Register temp = kScratchReg;
- __ ld(temp, src);
- __ sd(temp, g.ToMemOperand(destination));
+ __ Ld(temp, src);
+ __ Sd(temp, g.ToMemOperand(destination));
}
} else if (source->IsConstant()) {
Constant src = g.ToConstant(source);
@@ -2713,15 +3035,15 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64.
break;
}
- if (destination->IsStackSlot()) __ sd(dst, g.ToMemOperand(destination));
+ if (destination->IsStackSlot()) __ Sd(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
if (destination->IsFPStackSlot()) {
MemOperand dst = g.ToMemOperand(destination);
if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
- __ sw(zero_reg, dst);
+ __ Sw(zero_reg, dst);
} else {
__ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
- __ sw(at, dst);
+ __ Sw(at, dst);
}
} else {
DCHECK(destination->IsFPRegister());
@@ -2735,7 +3057,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
: kScratchDoubleReg;
__ Move(dst, src.ToFloat64());
if (destination->IsFPStackSlot()) {
- __ sdc1(dst, g.ToMemOperand(destination));
+ __ Sdc1(dst, g.ToMemOperand(destination));
}
}
} else if (source->IsFPRegister()) {
@@ -2745,17 +3067,17 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ Move(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
- __ sdc1(src, g.ToMemOperand(destination));
+ __ Sdc1(src, g.ToMemOperand(destination));
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
MemOperand src = g.ToMemOperand(source);
if (destination->IsFPRegister()) {
- __ ldc1(g.ToDoubleRegister(destination), src);
+ __ Ldc1(g.ToDoubleRegister(destination), src);
} else {
FPURegister temp = kScratchDoubleReg;
- __ ldc1(temp, src);
- __ sdc1(temp, g.ToMemOperand(destination));
+ __ Ldc1(temp, src);
+ __ Sdc1(temp, g.ToMemOperand(destination));
}
} else {
UNREACHABLE();
@@ -2781,8 +3103,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
DCHECK(destination->IsStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ mov(temp, src);
- __ ld(src, dst);
- __ sd(temp, dst);
+ __ Ld(src, dst);
+ __ Sd(temp, dst);
}
} else if (source->IsStackSlot()) {
DCHECK(destination->IsStackSlot());
@@ -2790,10 +3112,10 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
Register temp_1 = kScratchReg2;
MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination);
- __ ld(temp_0, src);
- __ ld(temp_1, dst);
- __ sd(temp_0, dst);
- __ sd(temp_1, src);
+ __ Ld(temp_0, src);
+ __ Ld(temp_1, dst);
+ __ Sd(temp_0, dst);
+ __ Sd(temp_1, src);
} else if (source->IsFPRegister()) {
FPURegister temp = kScratchDoubleReg;
FPURegister src = g.ToDoubleRegister(source);
@@ -2806,8 +3128,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ Move(temp, src);
- __ ldc1(src, dst);
- __ sdc1(temp, dst);
+ __ Ldc1(src, dst);
+ __ Sdc1(temp, dst);
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPStackSlot());
@@ -2817,12 +3139,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
MemOperand src1(src0.rm(), src0.offset() + kIntSize);
MemOperand dst0 = g.ToMemOperand(destination);
MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize);
- __ ldc1(temp_1, dst0); // Save destination in temp_1.
- __ lw(temp_0, src0); // Then use temp_0 to copy source to destination.
- __ sw(temp_0, dst0);
- __ lw(temp_0, src1);
- __ sw(temp_0, dst1);
- __ sdc1(temp_1, src0);
+ __ Ldc1(temp_1, dst0); // Save destination in temp_1.
+ __ Lw(temp_0, src0); // Then use temp_0 to copy source to destination.
+ __ Sw(temp_0, dst0);
+ __ Lw(temp_0, src1);
+ __ Sw(temp_0, dst1);
+ __ Sdc1(temp_1, src0);
} else {
// No other combinations are possible.
UNREACHABLE();
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
index 5d22bc1eba..02cd4d5852 100644
--- a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
@@ -188,7 +188,59 @@ namespace compiler {
V(Mips64I32x4ShrU) \
V(Mips64I32x4MaxU) \
V(Mips64I32x4MinU) \
- V(Mips64S32x4Select)
+ V(Mips64S32x4Select) \
+ V(Mips64F32x4Abs) \
+ V(Mips64F32x4Neg) \
+ V(Mips64F32x4RecipApprox) \
+ V(Mips64F32x4RecipSqrtApprox) \
+ V(Mips64F32x4Add) \
+ V(Mips64F32x4Sub) \
+ V(Mips64F32x4Mul) \
+ V(Mips64F32x4Max) \
+ V(Mips64F32x4Min) \
+ V(Mips64F32x4Eq) \
+ V(Mips64F32x4Ne) \
+ V(Mips64F32x4Lt) \
+ V(Mips64F32x4Le) \
+ V(Mips64I32x4SConvertF32x4) \
+ V(Mips64I32x4UConvertF32x4) \
+ V(Mips64I32x4Neg) \
+ V(Mips64I32x4LtS) \
+ V(Mips64I32x4LeS) \
+ V(Mips64I32x4LtU) \
+ V(Mips64I32x4LeU) \
+ V(Mips64I16x8Splat) \
+ V(Mips64I16x8ExtractLane) \
+ V(Mips64I16x8ReplaceLane) \
+ V(Mips64I16x8Neg) \
+ V(Mips64I16x8Shl) \
+ V(Mips64I16x8ShrS) \
+ V(Mips64I16x8ShrU) \
+ V(Mips64I16x8Add) \
+ V(Mips64I16x8AddSaturateS) \
+ V(Mips64I16x8Sub) \
+ V(Mips64I16x8SubSaturateS) \
+ V(Mips64I16x8Mul) \
+ V(Mips64I16x8MaxS) \
+ V(Mips64I16x8MinS) \
+ V(Mips64I16x8Eq) \
+ V(Mips64I16x8Ne) \
+ V(Mips64I16x8LtS) \
+ V(Mips64I16x8LeS) \
+ V(Mips64I16x8AddSaturateU) \
+ V(Mips64I16x8SubSaturateU) \
+ V(Mips64I16x8MaxU) \
+ V(Mips64I16x8MinU) \
+ V(Mips64I16x8LtU) \
+ V(Mips64I16x8LeU) \
+ V(Mips64I8x16Splat) \
+ V(Mips64I8x16ExtractLane) \
+ V(Mips64I8x16ReplaceLane) \
+ V(Mips64I8x16Neg) \
+ V(Mips64I8x16Shl) \
+ V(Mips64I8x16ShrS) \
+ V(Mips64S16x8Select) \
+ V(Mips64S8x16Select)
// Addressing modes represent the "shape" of inputs to an instruction.
// Many instructions support multiple addressing modes. Addressing modes
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
index 4e5c4e847e..b4664d036a 100644
--- a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
@@ -352,6 +352,17 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
VisitBinop(selector, node, opcode, false, kArchNop);
}
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int alignment = rep.alignment();
+ int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)),
+ sequence()->AddImmediate(Constant(alignment)), 0, nullptr);
+}
+
void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
Node* output = nullptr) {
Mips64OperandGenerator g(selector);
@@ -2786,6 +2797,214 @@ void InstructionSelector::VisitS32x4Select(Node* node) {
VisitRRRR(this, kMips64S32x4Select, node);
}
+void InstructionSelector::VisitF32x4Abs(Node* node) {
+ VisitRR(this, kMips64F32x4Abs, node);
+}
+
+void InstructionSelector::VisitF32x4Neg(Node* node) {
+ VisitRR(this, kMips64F32x4Neg, node);
+}
+
+void InstructionSelector::VisitF32x4RecipApprox(Node* node) {
+ VisitRR(this, kMips64F32x4RecipApprox, node);
+}
+
+void InstructionSelector::VisitF32x4RecipSqrtApprox(Node* node) {
+ VisitRR(this, kMips64F32x4RecipSqrtApprox, node);
+}
+
+void InstructionSelector::VisitF32x4Add(Node* node) {
+ VisitRRR(this, kMips64F32x4Add, node);
+}
+
+void InstructionSelector::VisitF32x4Sub(Node* node) {
+ VisitRRR(this, kMips64F32x4Sub, node);
+}
+
+void InstructionSelector::VisitF32x4Mul(Node* node) {
+ VisitRRR(this, kMips64F32x4Mul, node);
+}
+
+void InstructionSelector::VisitF32x4Max(Node* node) {
+ VisitRRR(this, kMips64F32x4Max, node);
+}
+
+void InstructionSelector::VisitF32x4Min(Node* node) {
+ VisitRRR(this, kMips64F32x4Min, node);
+}
+
+void InstructionSelector::VisitF32x4Eq(Node* node) {
+ VisitRRR(this, kMips64F32x4Eq, node);
+}
+
+void InstructionSelector::VisitF32x4Ne(Node* node) {
+ VisitRRR(this, kMips64F32x4Ne, node);
+}
+
+void InstructionSelector::VisitF32x4Lt(Node* node) {
+ VisitRRR(this, kMips64F32x4Lt, node);
+}
+
+void InstructionSelector::VisitF32x4Le(Node* node) {
+ VisitRRR(this, kMips64F32x4Le, node);
+}
+
+void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
+ VisitRR(this, kMips64I32x4SConvertF32x4, node);
+}
+
+void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
+ VisitRR(this, kMips64I32x4UConvertF32x4, node);
+}
+
+void InstructionSelector::VisitI32x4Neg(Node* node) {
+ VisitRR(this, kMips64I32x4Neg, node);
+}
+
+void InstructionSelector::VisitI32x4LtS(Node* node) {
+ VisitRRR(this, kMips64I32x4LtS, node);
+}
+
+void InstructionSelector::VisitI32x4LeS(Node* node) {
+ VisitRRR(this, kMips64I32x4LeS, node);
+}
+
+void InstructionSelector::VisitI32x4LtU(Node* node) {
+ VisitRRR(this, kMips64I32x4LtU, node);
+}
+
+void InstructionSelector::VisitI32x4LeU(Node* node) {
+ VisitRRR(this, kMips64I32x4LeU, node);
+}
+
+void InstructionSelector::VisitI16x8Splat(Node* node) {
+ VisitRR(this, kMips64I16x8Splat, node);
+}
+
+void InstructionSelector::VisitI16x8ExtractLane(Node* node) {
+ VisitRRI(this, kMips64I16x8ExtractLane, node);
+}
+
+void InstructionSelector::VisitI16x8ReplaceLane(Node* node) {
+ VisitRRIR(this, kMips64I16x8ReplaceLane, node);
+}
+
+void InstructionSelector::VisitI16x8Neg(Node* node) {
+ VisitRR(this, kMips64I16x8Neg, node);
+}
+
+void InstructionSelector::VisitI16x8Shl(Node* node) {
+ VisitRRI(this, kMips64I16x8Shl, node);
+}
+
+void InstructionSelector::VisitI16x8ShrS(Node* node) {
+ VisitRRI(this, kMips64I16x8ShrS, node);
+}
+
+void InstructionSelector::VisitI16x8ShrU(Node* node) {
+ VisitRRI(this, kMips64I16x8ShrU, node);
+}
+
+void InstructionSelector::VisitI16x8Add(Node* node) {
+ VisitRRR(this, kMips64I16x8Add, node);
+}
+
+void InstructionSelector::VisitI16x8AddSaturateS(Node* node) {
+ VisitRRR(this, kMips64I16x8AddSaturateS, node);
+}
+
+void InstructionSelector::VisitI16x8Sub(Node* node) {
+ VisitRRR(this, kMips64I16x8Sub, node);
+}
+
+void InstructionSelector::VisitI16x8SubSaturateS(Node* node) {
+ VisitRRR(this, kMips64I16x8SubSaturateS, node);
+}
+
+void InstructionSelector::VisitI16x8Mul(Node* node) {
+ VisitRRR(this, kMips64I16x8Mul, node);
+}
+
+void InstructionSelector::VisitI16x8MaxS(Node* node) {
+ VisitRRR(this, kMips64I16x8MaxS, node);
+}
+
+void InstructionSelector::VisitI16x8MinS(Node* node) {
+ VisitRRR(this, kMips64I16x8MinS, node);
+}
+
+void InstructionSelector::VisitI16x8Eq(Node* node) {
+ VisitRRR(this, kMips64I16x8Eq, node);
+}
+
+void InstructionSelector::VisitI16x8Ne(Node* node) {
+ VisitRRR(this, kMips64I16x8Ne, node);
+}
+
+void InstructionSelector::VisitI16x8LtS(Node* node) {
+ VisitRRR(this, kMips64I16x8LtS, node);
+}
+
+void InstructionSelector::VisitI16x8LeS(Node* node) {
+ VisitRRR(this, kMips64I16x8LeS, node);
+}
+
+void InstructionSelector::VisitI16x8AddSaturateU(Node* node) {
+ VisitRRR(this, kMips64I16x8AddSaturateU, node);
+}
+
+void InstructionSelector::VisitI16x8SubSaturateU(Node* node) {
+ VisitRRR(this, kMips64I16x8SubSaturateU, node);
+}
+
+void InstructionSelector::VisitI16x8MaxU(Node* node) {
+ VisitRRR(this, kMips64I16x8MaxU, node);
+}
+
+void InstructionSelector::VisitI16x8MinU(Node* node) {
+ VisitRRR(this, kMips64I16x8MinU, node);
+}
+
+void InstructionSelector::VisitI16x8LtU(Node* node) {
+ VisitRRR(this, kMips64I16x8LtU, node);
+}
+
+void InstructionSelector::VisitI16x8LeU(Node* node) {
+ VisitRRR(this, kMips64I16x8LeU, node);
+}
+
+void InstructionSelector::VisitI8x16Splat(Node* node) {
+ VisitRR(this, kMips64I8x16Splat, node);
+}
+
+void InstructionSelector::VisitI8x16ExtractLane(Node* node) {
+ VisitRRI(this, kMips64I8x16ExtractLane, node);
+}
+
+void InstructionSelector::VisitI8x16ReplaceLane(Node* node) {
+ VisitRRIR(this, kMips64I8x16ReplaceLane, node);
+}
+
+void InstructionSelector::VisitI8x16Neg(Node* node) {
+ VisitRR(this, kMips64I8x16Neg, node);
+}
+
+void InstructionSelector::VisitI8x16Shl(Node* node) {
+ VisitRRI(this, kMips64I8x16Shl, node);
+}
+
+void InstructionSelector::VisitI8x16ShrS(Node* node) {
+ VisitRRI(this, kMips64I8x16ShrS, node);
+}
+
+void InstructionSelector::VisitS16x8Select(Node* node) {
+ VisitRRRR(this, kMips64S16x8Select, node);
+}
+
+void InstructionSelector::VisitS8x16Select(Node* node) {
+ VisitRRRR(this, kMips64S8x16Select, node);
+}
+
// static
MachineOperatorBuilder::Flags
InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/deps/v8/src/compiler/node-properties.cc b/deps/v8/src/compiler/node-properties.cc
index a45f7f7a79..452840c1c1 100644
--- a/deps/v8/src/compiler/node-properties.cc
+++ b/deps/v8/src/compiler/node-properties.cc
@@ -138,6 +138,18 @@ bool NodeProperties::IsExceptionalCall(Node* node, Node** out_exception) {
return false;
}
+// static
+Node* NodeProperties::FindSuccessfulControlProjection(Node* node) {
+ DCHECK_GT(node->op()->ControlOutputCount(), 0);
+ if (node->op()->HasProperty(Operator::kNoThrow)) return node;
+ for (Edge const edge : node->use_edges()) {
+ if (!NodeProperties::IsControlEdge(edge)) continue;
+ if (edge.from()->opcode() == IrOpcode::kIfSuccess) {
+ return edge.from();
+ }
+ }
+ return node;
+}
// static
void NodeProperties::ReplaceValueInput(Node* node, Node* value, int index) {
@@ -404,6 +416,13 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
// These never change the map of objects.
break;
}
+ case IrOpcode::kFinishRegion: {
+ // FinishRegion renames the output of allocations, so we need
+ // to update the {receiver} that we are looking for, if the
+ // {receiver} matches the current {effect}.
+ if (IsSame(receiver, effect)) receiver = GetValueInput(effect, 0);
+ break;
+ }
default: {
DCHECK_EQ(1, effect->op()->EffectOutputCount());
if (effect->op()->EffectInputCount() != 1) {
@@ -418,37 +437,18 @@ NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
break;
}
}
+
+ // Stop walking the effect chain once we hit the definition of
+ // the {receiver} along the {effect}s.
+ if (IsSame(receiver, effect)) return kNoReceiverMaps;
+
+ // Continue with the next {effect}.
DCHECK_EQ(1, effect->op()->EffectInputCount());
effect = NodeProperties::GetEffectInput(effect);
}
}
// static
-MaybeHandle<Context> NodeProperties::GetSpecializationContext(
- Node* node, MaybeHandle<Context> context) {
- switch (node->opcode()) {
- case IrOpcode::kHeapConstant:
- return Handle<Context>::cast(OpParameter<Handle<HeapObject>>(node));
- case IrOpcode::kParameter: {
- Node* const start = NodeProperties::GetValueInput(node, 0);
- DCHECK_EQ(IrOpcode::kStart, start->opcode());
- int const index = ParameterIndexOf(node->op());
- // The context is always the last parameter to a JavaScript function, and
- // {Parameter} indices start at -1, so value outputs of {Start} look like
- // this: closure, receiver, param0, ..., paramN, context.
- if (index == start->op()->ValueOutputCount() - 2) {
- return context;
- }
- break;
- }
- default:
- break;
- }
- return MaybeHandle<Context>();
-}
-
-
-// static
Node* NodeProperties::GetOuterContext(Node* node, size_t* depth) {
Node* context = NodeProperties::GetContextInput(node);
while (*depth > 0 &&
diff --git a/deps/v8/src/compiler/node-properties.h b/deps/v8/src/compiler/node-properties.h
index aa35ea84e0..02ab2ce044 100644
--- a/deps/v8/src/compiler/node-properties.h
+++ b/deps/v8/src/compiler/node-properties.h
@@ -79,6 +79,10 @@ class V8_EXPORT_PRIVATE NodeProperties final {
// the present IfException projection is returned via {out_exception}.
static bool IsExceptionalCall(Node* node, Node** out_exception = nullptr);
+ // Returns the node producing the successful control output of {node}. This is
+ // the IfSuccess projection of {node} if present and {node} itself otherwise.
+ static Node* FindSuccessfulControlProjection(Node* node);
+
// ---------------------------------------------------------------------------
// Miscellaneous mutators.
@@ -142,12 +146,6 @@ class V8_EXPORT_PRIVATE NodeProperties final {
// ---------------------------------------------------------------------------
// Context.
- // Try to retrieve the specialization context from the given {node},
- // optionally utilizing the knowledge about the (outermost) function
- // {context}.
- static MaybeHandle<Context> GetSpecializationContext(
- Node* node, MaybeHandle<Context> context = MaybeHandle<Context>());
-
// Walk up the context chain from the given {node} until we reduce the {depth}
// to 0 or hit a node that does not extend the context chain ({depth} will be
// updated accordingly).
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 18736a1f56..ce152b1512 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -66,7 +66,6 @@
V(Call) \
V(Parameter) \
V(OsrValue) \
- V(OsrGuard) \
V(LoopExit) \
V(LoopExitValue) \
V(LoopExitEffect) \
@@ -147,6 +146,7 @@
V(JSStoreDataPropertyInLiteral) \
V(JSDeleteProperty) \
V(JSHasProperty) \
+ V(JSCreateGeneratorObject) \
V(JSGetSuperConstructor)
#define JS_CONTEXT_OP_LIST(V) \
@@ -159,6 +159,7 @@
V(JSCreateScriptContext)
#define JS_OTHER_OP_LIST(V) \
+ V(JSConstructForwardVarargs) \
V(JSConstruct) \
V(JSConstructWithSpread) \
V(JSCallForwardVarargs) \
@@ -575,19 +576,14 @@
V(F32x4UConvertI32x4) \
V(F32x4Abs) \
V(F32x4Neg) \
- V(F32x4Sqrt) \
V(F32x4RecipApprox) \
V(F32x4RecipSqrtApprox) \
V(F32x4Add) \
+ V(F32x4AddHoriz) \
V(F32x4Sub) \
V(F32x4Mul) \
- V(F32x4Div) \
V(F32x4Min) \
V(F32x4Max) \
- V(F32x4MinNum) \
- V(F32x4MaxNum) \
- V(F32x4RecipRefine) \
- V(F32x4RecipSqrtRefine) \
V(F32x4Eq) \
V(F32x4Ne) \
V(F32x4Lt) \
@@ -604,6 +600,7 @@
V(I32x4Shl) \
V(I32x4ShrS) \
V(I32x4Add) \
+ V(I32x4AddHoriz) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4MinS) \
@@ -635,6 +632,7 @@
V(I16x8SConvertI32x4) \
V(I16x8Add) \
V(I16x8AddSaturateS) \
+ V(I16x8AddHoriz) \
V(I16x8Sub) \
V(I16x8SubSaturateS) \
V(I16x8Mul) \
@@ -691,19 +689,16 @@
V(S128Load) \
V(S128Store) \
V(S128Zero) \
+ V(S128Not) \
V(S128And) \
V(S128Or) \
V(S128Xor) \
- V(S128Not) \
- V(S32x4Select) \
- V(S32x4Swizzle) \
V(S32x4Shuffle) \
- V(S16x8Select) \
- V(S16x8Swizzle) \
+ V(S32x4Select) \
V(S16x8Shuffle) \
- V(S8x16Select) \
- V(S8x16Swizzle) \
+ V(S16x8Select) \
V(S8x16Shuffle) \
+ V(S8x16Select) \
V(S1x4Zero) \
V(S1x4And) \
V(S1x4Or) \
diff --git a/deps/v8/src/compiler/operator-properties.cc b/deps/v8/src/compiler/operator-properties.cc
index 0c0a3d803a..35b24d8531 100644
--- a/deps/v8/src/compiler/operator-properties.cc
+++ b/deps/v8/src/compiler/operator-properties.cc
@@ -34,6 +34,10 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSStrictEqual:
return false;
+ // Generator creation cannot call back into arbitrary JavaScript.
+ case IrOpcode::kJSCreateGeneratorObject:
+ return false;
+
// Binary operations
case IrOpcode::kJSAdd:
case IrOpcode::kJSSubtract:
@@ -92,6 +96,7 @@ bool OperatorProperties::HasFrameStateInput(const Operator* op) {
case IrOpcode::kJSToString:
// Call operations
+ case IrOpcode::kJSConstructForwardVarargs:
case IrOpcode::kJSConstruct:
case IrOpcode::kJSConstructWithSpread:
case IrOpcode::kJSCallForwardVarargs:
diff --git a/deps/v8/src/compiler/operator.cc b/deps/v8/src/compiler/operator.cc
index 4f746e2944..e43cd5cdb0 100644
--- a/deps/v8/src/compiler/operator.cc
+++ b/deps/v8/src/compiler/operator.cc
@@ -14,7 +14,11 @@ namespace {
template <typename N>
V8_INLINE N CheckRange(size_t val) {
- CHECK_LE(val, std::numeric_limits<N>::max());
+ // The getters on Operator for input and output counts currently return int.
+ // Thus check that the given value fits in the integer range.
+ // TODO(titzer): Remove this check once the getters return size_t.
+ CHECK_LE(val, std::min(static_cast<size_t>(std::numeric_limits<N>::max()),
+ static_cast<size_t>(kMaxInt)));
return static_cast<N>(val);
}
diff --git a/deps/v8/src/compiler/osr.cc b/deps/v8/src/compiler/osr.cc
index ebf2c421b5..2de3df6354 100644
--- a/deps/v8/src/compiler/osr.cc
+++ b/deps/v8/src/compiler/osr.cc
@@ -94,8 +94,7 @@ void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
continue;
}
if (orig->InputCount() == 0 || orig->opcode() == IrOpcode::kParameter ||
- orig->opcode() == IrOpcode::kOsrValue ||
- orig->opcode() == IrOpcode::kOsrGuard) {
+ orig->opcode() == IrOpcode::kOsrValue) {
// No need to copy leaf nodes or parameters.
mapping->at(orig->id()) = orig;
continue;
@@ -254,20 +253,6 @@ void PeelOuterLoopsForOsr(Graph* graph, CommonOperatorBuilder* common,
}
}
-void SetTypeForOsrValue(Node* osr_value, Node* loop,
- CommonOperatorBuilder* common) {
- Node* osr_guard = nullptr;
- for (Node* use : osr_value->uses()) {
- if (use->opcode() == IrOpcode::kOsrGuard) {
- DCHECK_EQ(use->InputAt(0), osr_value);
- osr_guard = use;
- break;
- }
- }
-
- NodeProperties::ChangeOp(osr_guard, common->OsrGuard(OsrGuardType::kAny));
-}
-
} // namespace
void OsrHelper::Deconstruct(JSGraph* jsgraph, CommonOperatorBuilder* common,
@@ -297,12 +282,6 @@ void OsrHelper::Deconstruct(JSGraph* jsgraph, CommonOperatorBuilder* common,
CHECK(osr_loop); // Should have found the OSR loop.
- for (Node* use : osr_loop_entry->uses()) {
- if (use->opcode() == IrOpcode::kOsrValue) {
- SetTypeForOsrValue(use, osr_loop, common);
- }
- }
-
// Analyze the graph to determine how deeply nested the OSR loop is.
LoopTree* loop_tree = LoopFinder::BuildLoopTree(graph, tmp_zone);
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 585923fa69..bc8fd0cbe9 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -174,6 +174,8 @@ class PipelineData {
}
~PipelineData() {
+ delete code_generator_; // Must happen before zones are destroyed.
+ code_generator_ = nullptr;
DeleteRegisterAllocationZone();
DeleteInstructionZone();
DeleteGraphZone();
@@ -196,6 +198,8 @@ class PipelineData {
code_ = code;
}
+ CodeGenerator* code_generator() const { return code_generator_; }
+
// RawMachineAssembler generally produces graphs which cannot be verified.
bool MayHaveUnverifiableGraph() const { return outer_zone_ == nullptr; }
@@ -314,6 +318,11 @@ class PipelineData {
sequence(), debug_name());
}
+ void InitializeCodeGenerator(Linkage* linkage) {
+ DCHECK_NULL(code_generator_);
+ code_generator_ = new CodeGenerator(frame(), linkage, sequence(), info());
+ }
+
void BeginPhaseKind(const char* phase_kind_name) {
if (pipeline_statistics() != nullptr) {
pipeline_statistics()->BeginPhaseKind(phase_kind_name);
@@ -339,6 +348,7 @@ class PipelineData {
bool verify_graph_ = false;
bool is_asm_ = false;
Handle<Code> code_ = Handle<Code>::null();
+ CodeGenerator* code_generator_ = nullptr;
// All objects in the following group of fields are allocated in graph_zone_.
// They are all set to nullptr when the graph_zone_ is destroyed.
@@ -356,8 +366,7 @@ class PipelineData {
// All objects in the following group of fields are allocated in
// instruction_zone_. They are all set to nullptr when the instruction_zone_
- // is
- // destroyed.
+ // is destroyed.
ZoneStats::Scope instruction_zone_scope_;
Zone* instruction_zone_;
InstructionSequence* sequence_ = nullptr;
@@ -400,8 +409,11 @@ class PipelineImpl final {
// Run the concurrent optimization passes.
bool OptimizeGraph(Linkage* linkage);
- // Perform the actual code generation and return handle to a code object.
- Handle<Code> GenerateCode(Linkage* linkage);
+ // Run the code assembly pass.
+ void AssembleCode(Linkage* linkage);
+
+ // Run the code finalization pass.
+ Handle<Code> FinalizeCode();
bool ScheduleAndSelectInstructions(Linkage* linkage, bool trim_graph);
void RunPrintAndVerify(const char* phase, bool untyped = false);
@@ -615,6 +627,11 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
return AbortOptimization(kGraphBuildingFailed);
}
+ // Make sure that we have generated the maximal number of deopt entries.
+ // This is in order to avoid triggering the generation of deopt entries later
+ // during code assembly.
+ Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(isolate());
+
return SUCCEEDED;
}
@@ -624,7 +641,8 @@ PipelineCompilationJob::Status PipelineCompilationJob::ExecuteJobImpl() {
}
PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl() {
- Handle<Code> code = pipeline_.GenerateCode(linkage_);
+ pipeline_.AssembleCode(linkage_);
+ Handle<Code> code = pipeline_.FinalizeCode();
if (code.is_null()) {
if (info()->bailout_reason() == kNoReason) {
return AbortOptimization(kCodeGenerationFailed);
@@ -663,6 +681,8 @@ class PipelineWasmCompilationJob final : public CompilationJob {
Status FinalizeJobImpl() final;
private:
+ size_t AllocatedMemory() const override;
+
ZoneStats zone_stats_;
std::unique_ptr<PipelineStatistics> pipeline_statistics_;
PipelineData data_;
@@ -709,9 +729,14 @@ PipelineWasmCompilationJob::ExecuteJobImpl() {
return SUCCEEDED;
}
+size_t PipelineWasmCompilationJob::AllocatedMemory() const {
+ return pipeline_.data_->zone_stats()->GetCurrentAllocatedBytes();
+}
+
PipelineWasmCompilationJob::Status
PipelineWasmCompilationJob::FinalizeJobImpl() {
- pipeline_.GenerateCode(&linkage_);
+ pipeline_.AssembleCode(&linkage_);
+ pipeline_.FinalizeCode();
return SUCCEEDED;
}
@@ -765,12 +790,12 @@ struct GraphBuilderPhase {
BytecodeGraphBuilder graph_builder(
temp_zone, data->info()->shared_info(),
handle(data->info()->closure()->feedback_vector()),
- data->info()->osr_ast_id(), data->jsgraph(), 1.0f,
+ data->info()->osr_ast_id(), data->jsgraph(), CallFrequency(1.0f),
data->source_positions(), SourcePosition::kNotInlined, flags);
succeeded = graph_builder.CreateGraph();
} else {
AstGraphBuilderWithPositions graph_builder(
- temp_zone, data->info(), data->jsgraph(), 1.0f,
+ temp_zone, data->info(), data->jsgraph(), CallFrequency(1.0f),
data->loop_assignment(), data->source_positions());
succeeded = graph_builder.CreateGraph();
}
@@ -781,6 +806,30 @@ struct GraphBuilderPhase {
}
};
+namespace {
+
+Maybe<OuterContext> GetModuleContext(Handle<JSFunction> closure) {
+ Context* current = closure->context();
+ size_t distance = 0;
+ while (!current->IsNativeContext()) {
+ if (current->IsModuleContext()) {
+ return Just(OuterContext(handle(current), distance));
+ }
+ current = current->previous();
+ distance++;
+ }
+ return Nothing<OuterContext>();
+}
+
+Maybe<OuterContext> ChooseSpecializationContext(CompilationInfo* info) {
+ if (info->is_function_context_specializing()) {
+ DCHECK(info->has_context());
+ return Just(OuterContext(handle(info->context()), 0));
+ }
+ return GetModuleContext(info->closure());
+}
+
+} // anonymous namespace
struct InliningPhase {
static const char* phase_name() { return "inlining"; }
@@ -797,9 +846,7 @@ struct InliningPhase {
data->info()->dependencies());
JSContextSpecialization context_specialization(
&graph_reducer, data->jsgraph(),
- data->info()->is_function_context_specializing()
- ? handle(data->info()->context())
- : MaybeHandle<Context>(),
+ ChooseSpecializationContext(data->info()),
data->info()->is_function_context_specializing()
? data->info()->closure()
: MaybeHandle<JSFunction>());
@@ -1426,14 +1473,19 @@ struct JumpThreadingPhase {
}
};
+struct AssembleCodePhase {
+ static const char* phase_name() { return "assemble code"; }
+
+ void Run(PipelineData* data, Zone* temp_zone) {
+ data->code_generator()->AssembleCode();
+ }
+};
-struct GenerateCodePhase {
- static const char* phase_name() { return "generate code"; }
+struct FinalizeCodePhase {
+ static const char* phase_name() { return "finalize code"; }
- void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
- CodeGenerator generator(data->frame(), linkage, data->sequence(),
- data->info());
- data->set_code(generator.GenerateCode());
+ void Run(PipelineData* data, Zone* temp_zone) {
+ data->set_code(data->code_generator()->FinalizeCode());
}
};
@@ -1595,19 +1647,14 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
Run<SimplifiedLoweringPhase>();
RunPrintAndVerify("Simplified lowering", true);
+ // From now on it is invalid to look at types on the nodes, because the types
+ // on the nodes might not make sense after representation selection due to the
+ // way we handle truncations; if we'd want to look at types afterwards we'd
+ // essentially need to re-type (large portions of) the graph.
+
+ // In order to catch bugs related to type access after this point, we now
+ // remove the types from the nodes (currently only in Debug builds).
#ifdef DEBUG
- // From now on it is invalid to look at types on the nodes, because:
- //
- // (a) The remaining passes (might) run concurrent to the main thread and
- // therefore must not access the Heap or the Isolate in an uncontrolled
- // way (as done by the type system), and
- // (b) the types on the nodes might not make sense after representation
- // selection due to the way we handle truncations; if we'd want to look
- // at types afterwards we'd essentially need to re-type (large portions
- // of) the graph.
- //
- // In order to catch bugs related to type access after this point we remove
- // the types from the nodes at this point (currently only in Debug builds).
Run<UntyperPhase>();
RunPrintAndVerify("Untyped", true);
#endif
@@ -1707,7 +1754,8 @@ Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info) {
if (!pipeline.CreateGraph()) return Handle<Code>::null();
if (!pipeline.OptimizeGraph(&linkage)) return Handle<Code>::null();
- return pipeline.GenerateCode(&linkage);
+ pipeline.AssembleCode(&linkage);
+ return pipeline.FinalizeCode();
}
// static
@@ -1883,13 +1931,16 @@ bool PipelineImpl::ScheduleAndSelectInstructions(Linkage* linkage,
return true;
}
-Handle<Code> PipelineImpl::GenerateCode(Linkage* linkage) {
+void PipelineImpl::AssembleCode(Linkage* linkage) {
PipelineData* data = this->data_;
-
data->BeginPhaseKind("code generation");
+ data->InitializeCodeGenerator(linkage);
+ Run<AssembleCodePhase>();
+}
- // Generate final machine code.
- Run<GenerateCodePhase>(linkage);
+Handle<Code> PipelineImpl::FinalizeCode() {
+ PipelineData* data = this->data_;
+ Run<FinalizeCodePhase>();
Handle<Code> code = data->code();
if (data->profiler_data()) {
@@ -1937,7 +1988,8 @@ Handle<Code> PipelineImpl::ScheduleAndGenerateCode(
if (!ScheduleAndSelectInstructions(&linkage, false)) return Handle<Code>();
// Generate the final machine code.
- return GenerateCode(&linkage);
+ AssembleCode(&linkage);
+ return FinalizeCode();
}
void PipelineImpl::AllocateRegisters(const RegisterConfiguration* config,
diff --git a/deps/v8/src/compiler/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
index 2967ad73ed..be10a67f24 100644
--- a/deps/v8/src/compiler/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/ppc/code-generator-ppc.cc
@@ -592,11 +592,12 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
AddressingMode mode = kMode_None; \
MemOperand operand = i.MemoryOperand(&mode, &index); \
DoubleRegister value = i.InputDoubleRegister(index); \
- __ frsp(kScratchDoubleReg, value); \
+ /* removed frsp as instruction-selector checked */ \
+ /* value to be kFloat32 */ \
if (mode == kMode_MRI) { \
- __ stfs(kScratchDoubleReg, operand); \
+ __ stfs(value, operand); \
} else { \
- __ stfsx(kScratchDoubleReg, operand); \
+ __ stfsx(value, operand); \
} \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
} while (0)
@@ -704,11 +705,13 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
__ bge(&done); \
DoubleRegister value = i.InputDoubleRegister(3); \
__ frsp(kScratchDoubleReg, value); \
+ /* removed frsp as instruction-selector checked */ \
+ /* value to be kFloat32 */ \
if (mode == kMode_MRI) { \
- __ stfs(kScratchDoubleReg, operand); \
+ __ stfs(value, operand); \
} else { \
CleanUInt32(offset); \
- __ stfsx(kScratchDoubleReg, operand); \
+ __ stfsx(value, operand); \
} \
__ bind(&done); \
DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
diff --git a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
index 449e710389..ea88e81a05 100644
--- a/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/ppc/instruction-selector-ppc.cc
@@ -174,6 +174,14 @@ void VisitBinop(InstructionSelector* selector, Node* node,
} // namespace
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int slot = frame_->AllocateSpillSlot(rep.size());
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index a2cf562115..671aafe381 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -257,6 +257,26 @@ Node* RawMachineAssembler::CallCFunction3(MachineType return_type,
return AddNode(common()->Call(descriptor), function, arg0, arg1, arg2);
}
+Node* RawMachineAssembler::CallCFunction6(
+ MachineType return_type, MachineType arg0_type, MachineType arg1_type,
+ MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, Node* function, Node* arg0, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5) {
+ MachineSignature::Builder builder(zone(), 1, 6);
+ builder.AddReturn(return_type);
+ builder.AddParam(arg0_type);
+ builder.AddParam(arg1_type);
+ builder.AddParam(arg2_type);
+ builder.AddParam(arg3_type);
+ builder.AddParam(arg4_type);
+ builder.AddParam(arg5_type);
+ const CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+
+ return AddNode(common()->Call(descriptor), function, arg0, arg1, arg2, arg3,
+ arg4, arg5);
+}
+
Node* RawMachineAssembler::CallCFunction8(
MachineType return_type, MachineType arg0_type, MachineType arg1_type,
MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,
@@ -278,6 +298,31 @@ Node* RawMachineAssembler::CallCFunction8(
Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
return AddNode(common()->Call(descriptor), arraysize(args), args);
}
+
+Node* RawMachineAssembler::CallCFunction9(
+ MachineType return_type, MachineType arg0_type, MachineType arg1_type,
+ MachineType arg2_type, MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, MachineType arg6_type, MachineType arg7_type,
+ MachineType arg8_type, Node* function, Node* arg0, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5, Node* arg6, Node* arg7, Node* arg8) {
+ MachineSignature::Builder builder(zone(), 1, 9);
+ builder.AddReturn(return_type);
+ builder.AddParam(arg0_type);
+ builder.AddParam(arg1_type);
+ builder.AddParam(arg2_type);
+ builder.AddParam(arg3_type);
+ builder.AddParam(arg4_type);
+ builder.AddParam(arg5_type);
+ builder.AddParam(arg6_type);
+ builder.AddParam(arg7_type);
+ builder.AddParam(arg8_type);
+ Node* args[] = {function, arg0, arg1, arg2, arg3,
+ arg4, arg5, arg6, arg7, arg8};
+ const CallDescriptor* descriptor =
+ Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+ return AddNode(common()->Call(descriptor), arraysize(args), args);
+}
+
BasicBlock* RawMachineAssembler::Use(RawMachineLabel* label) {
label->used_ = true;
return EnsureBlock(label);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index 19a0f3bfd4..a82f9e079a 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -84,8 +84,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* Int32Constant(int32_t value) {
return AddNode(common()->Int32Constant(value));
}
- Node* StackSlot(MachineRepresentation rep) {
- return AddNode(machine()->StackSlot(rep));
+ Node* StackSlot(MachineRepresentation rep, int alignment = 0) {
+ return AddNode(machine()->StackSlot(rep, alignment));
}
Node* Int64Constant(int64_t value) {
return AddNode(common()->Int64Constant(value));
@@ -773,6 +773,13 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* CallCFunction3(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, MachineType arg2_type,
Node* function, Node* arg0, Node* arg1, Node* arg2);
+ // Call to a C function with six arguments.
+ Node* CallCFunction6(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, MachineType arg2_type,
+ MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, Node* function, Node* arg0,
+ Node* arg1, Node* arg2, Node* arg3, Node* arg4,
+ Node* arg5);
// Call to a C function with eight arguments.
Node* CallCFunction8(MachineType return_type, MachineType arg0_type,
MachineType arg1_type, MachineType arg2_type,
@@ -781,6 +788,15 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
MachineType arg7_type, Node* function, Node* arg0,
Node* arg1, Node* arg2, Node* arg3, Node* arg4,
Node* arg5, Node* arg6, Node* arg7);
+ // Call to a C function with nine arguments.
+ Node* CallCFunction9(MachineType return_type, MachineType arg0_type,
+ MachineType arg1_type, MachineType arg2_type,
+ MachineType arg3_type, MachineType arg4_type,
+ MachineType arg5_type, MachineType arg6_type,
+ MachineType arg7_type, MachineType arg8_type,
+ Node* function, Node* arg0, Node* arg1, Node* arg2,
+ Node* arg3, Node* arg4, Node* arg5, Node* arg6,
+ Node* arg7, Node* arg8);
// ===========================================================================
// The following utility methods deal with control flow, hence might switch
diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc
index 0439c536de..f15df671cf 100644
--- a/deps/v8/src/compiler/representation-change.cc
+++ b/deps/v8/src/compiler/representation-change.cc
@@ -364,12 +364,22 @@ Node* RepresentationChanger::GetTaggedPointerRepresentationFor(
}
op = simplified()->ChangeFloat64ToTaggedPointer();
} else if (output_rep == MachineRepresentation::kFloat32) {
- // float32 -> float64 -> tagged
- node = InsertChangeFloat32ToFloat64(node);
- op = simplified()->ChangeFloat64ToTaggedPointer();
+ if (output_type->Is(Type::Number())) {
+ // float32 -> float64 -> tagged
+ node = InsertChangeFloat32ToFloat64(node);
+ op = simplified()->ChangeFloat64ToTaggedPointer();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTaggedPointer);
+ }
} else if (output_rep == MachineRepresentation::kFloat64) {
- // float64 -> tagged
- op = simplified()->ChangeFloat64ToTaggedPointer();
+ if (output_type->Is(Type::Number())) {
+ // float64 -> tagged
+ op = simplified()->ChangeFloat64ToTaggedPointer();
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTaggedPointer);
+ }
} else if (CanBeTaggedSigned(output_rep) &&
use_info.type_check() == TypeCheckKind::kHeapObject) {
if (!output_type->Maybe(Type::SignedSmall())) {
@@ -452,11 +462,14 @@ Node* RepresentationChanger::GetTaggedRepresentationFor(
Type::Unsigned32())) { // float64 -> uint32 -> tagged
node = InsertChangeFloat64ToUint32(node);
op = simplified()->ChangeUint32ToTagged();
- } else {
+ } else if (output_type->Is(Type::Number())) {
op = simplified()->ChangeFloat64ToTagged(
output_type->Maybe(Type::MinusZero())
? CheckForMinusZeroMode::kCheckForMinusZero
: CheckForMinusZeroMode::kDontCheckForMinusZero);
+ } else {
+ return TypeError(node, output_rep, output_type,
+ MachineRepresentation::kTagged);
}
} else {
return TypeError(node, output_rep, output_type,
@@ -654,7 +667,7 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
use_info.type_check() == TypeCheckKind::kSigned32) {
op = simplified()->CheckedFloat64ToInt32(
output_type->Maybe(Type::MinusZero())
- ? CheckForMinusZeroMode::kCheckForMinusZero
+ ? use_info.minus_zero_check()
: CheckForMinusZeroMode::kDontCheckForMinusZero);
} else if (output_type->Is(Type::Unsigned32())) {
op = machine()->ChangeFloat64ToUint32();
@@ -686,7 +699,7 @@ Node* RepresentationChanger::GetWord32RepresentationFor(
} else if (use_info.type_check() == TypeCheckKind::kSigned32) {
op = simplified()->CheckedTaggedToInt32(
output_type->Maybe(Type::MinusZero())
- ? CheckForMinusZeroMode::kCheckForMinusZero
+ ? use_info.minus_zero_check()
: CheckForMinusZeroMode::kDontCheckForMinusZero);
} else if (output_type->Is(Type::Unsigned32())) {
op = simplified()->ChangeTaggedToUint32();
diff --git a/deps/v8/src/compiler/representation-change.h b/deps/v8/src/compiler/representation-change.h
index af96f7333f..b4f3366d42 100644
--- a/deps/v8/src/compiler/representation-change.h
+++ b/deps/v8/src/compiler/representation-change.h
@@ -198,8 +198,8 @@ class UseInfo {
TypeCheckKind::kSignedSmall);
}
static UseInfo CheckedSigned32AsWord32(IdentifyZeros identify_zeros) {
- return UseInfo(MachineRepresentation::kWord32, Truncation::Any(),
- TypeCheckKind::kSigned32);
+ return UseInfo(MachineRepresentation::kWord32,
+ Truncation::Any(identify_zeros), TypeCheckKind::kSigned32);
}
static UseInfo CheckedNumberAsFloat64() {
return UseInfo(MachineRepresentation::kFloat64, Truncation::Float64(),
diff --git a/deps/v8/src/compiler/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
index 228ec3c0d5..f4e8ea13d2 100644
--- a/deps/v8/src/compiler/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/s390/instruction-selector-s390.cc
@@ -702,6 +702,15 @@ void VisitBinOp(InstructionSelector* selector, Node* node,
} // namespace
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int slot = frame_->AllocateSpillSlot(rep.size());
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
+
void InstructionSelector::VisitLoad(Node* node) {
S390OperandGenerator g(this);
ArchOpcode opcode = SelectLoadOpcode(node);
@@ -2050,11 +2059,18 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
return VisitWord32BinOp(selector, node, kS390_Sub32,
SubOperandMode, cont);
case IrOpcode::kInt32MulWithOverflow:
- cont->OverwriteAndNegateIfEqual(kNotEqual);
- return VisitWord32BinOp(
- selector, node, kS390_Mul32WithOverflow,
- OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
- cont);
+ if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitWord32BinOp(
+ selector, node, kS390_Mul32,
+ OperandMode::kAllowRRR | OperandMode::kAllowRM, cont);
+ } else {
+ cont->OverwriteAndNegateIfEqual(kNotEqual);
+ return VisitWord32BinOp(
+ selector, node, kS390_Mul32WithOverflow,
+ OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
+ cont);
+ }
case IrOpcode::kInt32AbsWithOverflow:
cont->OverwriteAndNegateIfEqual(kOverflow);
return VisitWord32UnaryOp(selector, node, kS390_Abs32,
diff --git a/deps/v8/src/compiler/schedule.cc b/deps/v8/src/compiler/schedule.cc
index ea218671ad..3660553041 100644
--- a/deps/v8/src/compiler/schedule.cc
+++ b/deps/v8/src/compiler/schedule.cc
@@ -4,8 +4,8 @@
#include "src/compiler/schedule.h"
-#include "src/compiler/node.h"
#include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
#include "src/ostreams.h"
namespace v8 {
@@ -96,6 +96,8 @@ BasicBlock* BasicBlock::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
return b1;
}
+void BasicBlock::Print() { OFStream(stdout) << this; }
+
std::ostream& operator<<(std::ostream& os, const BasicBlock& block) {
os << "B" << block.id();
#if DEBUG
@@ -415,6 +417,21 @@ void Schedule::EnsureDeferredCodeSingleEntryPoint(BasicBlock* block) {
merger->set_deferred(false);
block->predecessors().clear();
block->predecessors().push_back(merger);
+ MovePhis(block, merger);
+}
+
+void Schedule::MovePhis(BasicBlock* from, BasicBlock* to) {
+ for (size_t i = 0; i < from->NodeCount();) {
+ Node* node = from->NodeAt(i);
+ if (node->opcode() == IrOpcode::kPhi) {
+ to->AddNode(node);
+ from->RemoveNode(from->begin() + i);
+ DCHECK_EQ(nodeid_to_block_[node->id()], from);
+ nodeid_to_block_[node->id()] = to;
+ } else {
+ ++i;
+ }
+ }
}
void Schedule::PropagateDeferredMark() {
diff --git a/deps/v8/src/compiler/schedule.h b/deps/v8/src/compiler/schedule.h
index b5e696dc41..ed69958e8b 100644
--- a/deps/v8/src/compiler/schedule.h
+++ b/deps/v8/src/compiler/schedule.h
@@ -65,6 +65,8 @@ class V8_EXPORT_PRIVATE BasicBlock final
AssemblerDebugInfo debug_info() const { return debug_info_; }
#endif // DEBUG
+ void Print();
+
// Predecessors.
BasicBlockVector& predecessors() { return predecessors_; }
const BasicBlockVector& predecessors() const { return predecessors_; }
@@ -95,6 +97,8 @@ class V8_EXPORT_PRIVATE BasicBlock final
iterator begin() { return nodes_.begin(); }
iterator end() { return nodes_.end(); }
+ void RemoveNode(iterator it) { nodes_.erase(it); }
+
typedef NodeVector::const_iterator const_iterator;
const_iterator begin() const { return nodes_.begin(); }
const_iterator end() const { return nodes_.end(); }
@@ -274,6 +278,8 @@ class V8_EXPORT_PRIVATE Schedule final : public NON_EXPORTED_BASE(ZoneObject) {
void EnsureSplitEdgeForm(BasicBlock* block);
// Ensure entry into a deferred block happens from a single hot block.
void EnsureDeferredCodeSingleEntryPoint(BasicBlock* block);
+ // Move Phi operands to newly created merger blocks
+ void MovePhis(BasicBlock* from, BasicBlock* to);
// Copy deferred block markers down as far as possible
void PropagateDeferredMark();
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.cc b/deps/v8/src/compiler/simd-scalar-lowering.cc
index 8f967788db..6cf88d33cf 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.cc
+++ b/deps/v8/src/compiler/simd-scalar-lowering.cc
@@ -16,6 +16,16 @@ namespace v8 {
namespace internal {
namespace compiler {
+namespace {
+static const int kNumLanes32 = 4;
+static const int kNumLanes16 = 8;
+static const int kNumLanes8 = 16;
+static const int32_t kMask16 = 0xffff;
+static const int32_t kMask8 = 0xff;
+static const int32_t kShift16 = 16;
+static const int32_t kShift8 = 24;
+} // anonymous
+
SimdScalarLowering::SimdScalarLowering(
JSGraph* jsgraph, Signature<MachineRepresentation>* signature)
: jsgraph_(jsgraph),
@@ -35,7 +45,7 @@ SimdScalarLowering::SimdScalarLowering(
void SimdScalarLowering::LowerGraph() {
stack_.push_back({graph()->end(), 0});
state_.Set(graph()->end(), State::kOnStack);
- replacements_[graph()->end()->id()].type = SimdType::kInt32;
+ replacements_[graph()->end()->id()].type = SimdType::kInt32x4;
while (!stack_.empty()) {
NodeState& top = stack_.back();
@@ -73,11 +83,14 @@ void SimdScalarLowering::LowerGraph() {
V(I32x4SConvertF32x4) \
V(I32x4UConvertF32x4) \
V(I32x4Neg) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
V(I32x4Add) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4MinS) \
V(I32x4MaxS) \
+ V(I32x4ShrU) \
V(I32x4MinU) \
V(I32x4MaxU) \
V(S128And) \
@@ -96,7 +109,6 @@ void SimdScalarLowering::LowerGraph() {
V(F32x4Add) \
V(F32x4Sub) \
V(F32x4Mul) \
- V(F32x4Div) \
V(F32x4Min) \
V(F32x4Max)
@@ -120,6 +132,74 @@ void SimdScalarLowering::LowerGraph() {
V(I32x4GtU) \
V(I32x4GeU)
+#define FOREACH_INT16X8_OPCODE(V) \
+ V(I16x8Splat) \
+ V(I16x8ExtractLane) \
+ V(I16x8ReplaceLane) \
+ V(I16x8Neg) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8Add) \
+ V(I16x8AddSaturateS) \
+ V(I16x8Sub) \
+ V(I16x8SubSaturateS) \
+ V(I16x8Mul) \
+ V(I16x8MinS) \
+ V(I16x8MaxS) \
+ V(I16x8ShrU) \
+ V(I16x8AddSaturateU) \
+ V(I16x8SubSaturateU) \
+ V(I16x8MinU) \
+ V(I16x8MaxU)
+
+#define FOREACH_INT8X16_OPCODE(V) \
+ V(I8x16Splat) \
+ V(I8x16ExtractLane) \
+ V(I8x16ReplaceLane) \
+ V(I8x16Neg) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16Add) \
+ V(I8x16AddSaturateS) \
+ V(I8x16Sub) \
+ V(I8x16SubSaturateS) \
+ V(I8x16Mul) \
+ V(I8x16MinS) \
+ V(I8x16MaxS) \
+ V(I8x16ShrU) \
+ V(I8x16AddSaturateU) \
+ V(I8x16SubSaturateU) \
+ V(I8x16MinU) \
+ V(I8x16MaxU)
+
+#define FOREACH_INT16X8_TO_SIMD1X8OPCODE(V) \
+ V(I16x8Eq) \
+ V(I16x8Ne) \
+ V(I16x8LtS) \
+ V(I16x8LeS) \
+ V(I16x8LtU) \
+ V(I16x8LeU)
+
+#define FOREACH_INT8X16_TO_SIMD1X16OPCODE(V) \
+ V(I8x16Eq) \
+ V(I8x16Ne) \
+ V(I8x16LtS) \
+ V(I8x16LeS) \
+ V(I8x16LtU) \
+ V(I8x16LeU)
+
+#define FOREACH_SIMD_TYPE_TO_MACHINE_TYPE(V) \
+ V(Float32x4, Float32) \
+ V(Int32x4, Int32) \
+ V(Int16x8, Int16) \
+ V(Int8x16, Int8)
+
+#define FOREACH_SIMD_TYPE_TO_MACHINE_REP(V) \
+ V(Float32x4, Float32) \
+ V(Int32x4, Word32) \
+ V(Int16x8, Word16) \
+ V(Int8x16, Word8)
+
void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
switch (node->opcode()) {
#define CASE_STMT(name) case IrOpcode::k##name:
@@ -127,11 +207,11 @@ void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
case IrOpcode::kReturn:
case IrOpcode::kParameter:
case IrOpcode::kCall: {
- replacements_[node->id()].type = SimdType::kInt32;
+ replacements_[node->id()].type = SimdType::kInt32x4;
break;
}
FOREACH_FLOAT32X4_OPCODE(CASE_STMT) {
- replacements_[node->id()].type = SimdType::kFloat32;
+ replacements_[node->id()].type = SimdType::kFloat32x4;
break;
}
FOREACH_FLOAT32X4_TO_SIMD1X4OPCODE(CASE_STMT)
@@ -139,24 +219,52 @@ void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
replacements_[node->id()].type = SimdType::kSimd1x4;
break;
}
+ FOREACH_INT16X8_OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kInt16x8;
+ break;
+ }
+ FOREACH_INT16X8_TO_SIMD1X8OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kSimd1x8;
+ break;
+ }
+ FOREACH_INT8X16_OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kInt8x16;
+ break;
+ }
+ FOREACH_INT8X16_TO_SIMD1X16OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kSimd1x16;
+ break;
+ }
default: {
switch (output->opcode()) {
FOREACH_FLOAT32X4_TO_SIMD1X4OPCODE(CASE_STMT)
case IrOpcode::kF32x4SConvertI32x4:
case IrOpcode::kF32x4UConvertI32x4: {
- replacements_[node->id()].type = SimdType::kInt32;
+ replacements_[node->id()].type = SimdType::kInt32x4;
break;
}
FOREACH_INT32X4_TO_SIMD1X4OPCODE(CASE_STMT)
case IrOpcode::kI32x4SConvertF32x4:
case IrOpcode::kI32x4UConvertF32x4: {
- replacements_[node->id()].type = SimdType::kFloat32;
+ replacements_[node->id()].type = SimdType::kFloat32x4;
break;
}
case IrOpcode::kS32x4Select: {
replacements_[node->id()].type = SimdType::kSimd1x4;
break;
}
+ FOREACH_INT16X8_TO_SIMD1X8OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kInt16x8;
+ break;
+ }
+ FOREACH_INT8X16_TO_SIMD1X16OPCODE(CASE_STMT) {
+ replacements_[node->id()].type = SimdType::kInt8x16;
+ break;
+ }
+ case IrOpcode::kS16x8Select: {
+ replacements_[node->id()].type = SimdType::kSimd1x8;
+ break;
+ }
default: {
replacements_[node->id()].type = replacements_[output->id()].type;
}
@@ -200,42 +308,66 @@ static int GetReturnCountAfterLowering(
return result;
}
-void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices) {
- new_indices[0] = index;
- for (size_t i = 1; i < kMaxLanes; ++i) {
- new_indices[i] = graph()->NewNode(machine()->Int32Add(), index,
- graph()->NewNode(common()->Int32Constant(
- static_cast<int>(i) * kLaneWidth)));
+int SimdScalarLowering::NumLanes(SimdType type) {
+ int num_lanes = 0;
+ if (type == SimdType::kFloat32x4 || type == SimdType::kInt32x4 ||
+ type == SimdType::kSimd1x4) {
+ num_lanes = kNumLanes32;
+ } else if (type == SimdType::kInt16x8 || type == SimdType::kSimd1x8) {
+ num_lanes = kNumLanes16;
+ } else if (type == SimdType::kInt8x16 || type == SimdType::kSimd1x16) {
+ num_lanes = kNumLanes8;
+ } else {
+ UNREACHABLE();
+ }
+ return num_lanes;
+}
+
+constexpr int SimdScalarLowering::kLaneOffsets[];
+
+void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices,
+ SimdType type) {
+ int num_lanes = NumLanes(type);
+ int lane_width = kSimd128Size / num_lanes;
+ int laneIndex = kLaneOffsets[0] / lane_width;
+ new_indices[laneIndex] = index;
+ for (int i = 1; i < num_lanes; ++i) {
+ laneIndex = kLaneOffsets[i * lane_width] / lane_width;
+ new_indices[laneIndex] = graph()->NewNode(
+ machine()->Int32Add(), index,
+ graph()->NewNode(
+ common()->Int32Constant(static_cast<int>(i) * lane_width)));
}
}
void SimdScalarLowering::LowerLoadOp(MachineRepresentation rep, Node* node,
- const Operator* load_op) {
+ const Operator* load_op, SimdType type) {
if (rep == MachineRepresentation::kSimd128) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- Node* indices[kMaxLanes];
- GetIndexNodes(index, indices);
- Node* rep_nodes[kMaxLanes];
+ int num_lanes = NumLanes(type);
+ Node** indices = zone()->NewArray<Node*>(num_lanes);
+ GetIndexNodes(index, indices, type);
+ Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
rep_nodes[0] = node;
+ rep_nodes[0]->ReplaceInput(1, indices[0]);
NodeProperties::ChangeOp(rep_nodes[0], load_op);
if (node->InputCount() > 2) {
DCHECK(node->InputCount() > 3);
Node* effect_input = node->InputAt(2);
Node* control_input = node->InputAt(3);
- rep_nodes[3] = graph()->NewNode(load_op, base, indices[3], effect_input,
- control_input);
- rep_nodes[2] = graph()->NewNode(load_op, base, indices[2], rep_nodes[3],
- control_input);
- rep_nodes[1] = graph()->NewNode(load_op, base, indices[1], rep_nodes[2],
- control_input);
+ for (int i = num_lanes - 1; i > 0; --i) {
+ rep_nodes[i] = graph()->NewNode(load_op, base, indices[i], effect_input,
+ control_input);
+ effect_input = rep_nodes[i];
+ }
rep_nodes[0]->ReplaceInput(2, rep_nodes[1]);
} else {
- for (size_t i = 1; i < kMaxLanes; ++i) {
+ for (int i = 1; i < num_lanes; ++i) {
rep_nodes[i] = graph()->NewNode(load_op, base, indices[i]);
}
}
- ReplaceNode(node, rep_nodes);
+ ReplaceNode(node, rep_nodes, num_lanes);
} else {
DefaultLowering(node);
}
@@ -247,36 +379,36 @@ void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
if (rep == MachineRepresentation::kSimd128) {
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- Node* indices[kMaxLanes];
- GetIndexNodes(index, indices);
+ int num_lanes = NumLanes(rep_type);
+ Node** indices = zone()->NewArray<Node*>(num_lanes);
+ GetIndexNodes(index, indices, rep_type);
DCHECK(node->InputCount() > 2);
Node* value = node->InputAt(2);
DCHECK(HasReplacement(1, value));
- Node* rep_nodes[kMaxLanes];
+ Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
rep_nodes[0] = node;
Node** rep_inputs = GetReplacementsWithType(value, rep_type);
rep_nodes[0]->ReplaceInput(2, rep_inputs[0]);
+ rep_nodes[0]->ReplaceInput(1, indices[0]);
NodeProperties::ChangeOp(node, store_op);
if (node->InputCount() > 3) {
DCHECK(node->InputCount() > 4);
Node* effect_input = node->InputAt(3);
Node* control_input = node->InputAt(4);
- rep_nodes[3] = graph()->NewNode(store_op, base, indices[3], rep_inputs[3],
- effect_input, control_input);
- rep_nodes[2] = graph()->NewNode(store_op, base, indices[2], rep_inputs[2],
- rep_nodes[3], control_input);
- rep_nodes[1] = graph()->NewNode(store_op, base, indices[1], rep_inputs[1],
- rep_nodes[2], control_input);
+ for (int i = num_lanes - 1; i > 0; --i) {
+ rep_nodes[i] =
+ graph()->NewNode(store_op, base, indices[i], rep_inputs[i],
+ effect_input, control_input);
+ effect_input = rep_nodes[i];
+ }
rep_nodes[0]->ReplaceInput(3, rep_nodes[1]);
-
} else {
- for (size_t i = 1; i < kMaxLanes; ++i) {
+ for (int i = 1; i < num_lanes; ++i) {
rep_nodes[i] =
graph()->NewNode(store_op, base, indices[i], rep_inputs[i]);
}
}
-
- ReplaceNode(node, rep_nodes);
+ ReplaceNode(node, rep_nodes, num_lanes);
} else {
DefaultLowering(node);
}
@@ -287,47 +419,146 @@ void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType input_rep_type,
DCHECK(node->InputCount() == 2);
Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
+ int num_lanes = NumLanes(input_rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
if (invert_inputs) {
rep_node[i] = graph()->NewNode(op, rep_right[i], rep_left[i]);
} else {
rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]);
}
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, num_lanes);
+}
+
+Node* SimdScalarLowering::FixUpperBits(Node* input, int32_t shift) {
+ return graph()->NewNode(machine()->Word32Sar(),
+ graph()->NewNode(machine()->Word32Shl(), input,
+ jsgraph_->Int32Constant(shift)),
+ jsgraph_->Int32Constant(shift));
+}
+
+void SimdScalarLowering::LowerBinaryOpForSmallInt(Node* node,
+ SimdType input_rep_type,
+ const Operator* op) {
+ DCHECK(node->InputCount() == 2);
+ DCHECK(input_rep_type == SimdType::kInt16x8 ||
+ input_rep_type == SimdType::kInt8x16);
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
+ Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
+ int num_lanes = NumLanes(input_rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ int32_t shift_val =
+ (input_rep_type == SimdType::kInt16x8) ? kShift16 : kShift8;
+ for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = FixUpperBits(graph()->NewNode(op, rep_left[i], rep_right[i]),
+ shift_val);
+ }
+ ReplaceNode(node, rep_node, num_lanes);
+}
+
+Node* SimdScalarLowering::Mask(Node* input, int32_t mask) {
+ return graph()->NewNode(machine()->Word32And(), input,
+ jsgraph_->Int32Constant(mask));
+}
+
+void SimdScalarLowering::LowerSaturateBinaryOp(Node* node,
+ SimdType input_rep_type,
+ const Operator* op,
+ bool is_signed) {
+ DCHECK(node->InputCount() == 2);
+ DCHECK(input_rep_type == SimdType::kInt16x8 ||
+ input_rep_type == SimdType::kInt8x16);
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
+ Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
+ int32_t min = 0;
+ int32_t max = 0;
+ int32_t mask = 0;
+ int32_t shift_val = 0;
+ MachineRepresentation phi_rep;
+ if (input_rep_type == SimdType::kInt16x8) {
+ if (is_signed) {
+ min = std::numeric_limits<int16_t>::min();
+ max = std::numeric_limits<int16_t>::max();
+ } else {
+ min = std::numeric_limits<uint16_t>::min();
+ max = std::numeric_limits<uint16_t>::max();
+ }
+ mask = kMask16;
+ shift_val = kShift16;
+ phi_rep = MachineRepresentation::kWord16;
+ } else {
+ if (is_signed) {
+ min = std::numeric_limits<int8_t>::min();
+ max = std::numeric_limits<int8_t>::max();
+ } else {
+ min = std::numeric_limits<uint8_t>::min();
+ max = std::numeric_limits<uint8_t>::max();
+ }
+ mask = kMask8;
+ shift_val = kShift8;
+ phi_rep = MachineRepresentation::kWord8;
+ }
+ int num_lanes = NumLanes(input_rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
+ Node* op_result = nullptr;
+ Node* left = is_signed ? rep_left[i] : Mask(rep_left[i], mask);
+ Node* right = is_signed ? rep_right[i] : Mask(rep_right[i], mask);
+ op_result = graph()->NewNode(op, left, right);
+ Diamond d_min(graph(), common(),
+ graph()->NewNode(machine()->Int32LessThan(), op_result,
+ jsgraph_->Int32Constant(min)));
+ rep_node[i] = d_min.Phi(phi_rep, jsgraph_->Int32Constant(min), op_result);
+ Diamond d_max(graph(), common(),
+ graph()->NewNode(machine()->Int32LessThan(),
+ jsgraph_->Int32Constant(max), rep_node[i]));
+ rep_node[i] = d_max.Phi(phi_rep, jsgraph_->Int32Constant(max), rep_node[i]);
+ rep_node[i] =
+ is_signed ? rep_node[i] : FixUpperBits(rep_node[i], shift_val);
+ }
+ ReplaceNode(node, rep_node, num_lanes);
}
void SimdScalarLowering::LowerUnaryOp(Node* node, SimdType input_rep_type,
const Operator* op) {
DCHECK(node->InputCount() == 1);
Node** rep = GetReplacementsWithType(node->InputAt(0), input_rep_type);
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
+ int num_lanes = NumLanes(input_rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
rep_node[i] = graph()->NewNode(op, rep[i]);
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, num_lanes);
}
void SimdScalarLowering::LowerIntMinMax(Node* node, const Operator* op,
- bool is_max) {
+ bool is_max, SimdType type) {
DCHECK(node->InputCount() == 2);
- Node** rep_left = GetReplacementsWithType(node->InputAt(0), SimdType::kInt32);
- Node** rep_right =
- GetReplacementsWithType(node->InputAt(1), SimdType::kInt32);
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
+ Node** rep_left = GetReplacementsWithType(node->InputAt(0), type);
+ Node** rep_right = GetReplacementsWithType(node->InputAt(1), type);
+ int num_lanes = NumLanes(type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ MachineRepresentation rep = MachineRepresentation::kNone;
+ if (type == SimdType::kInt32x4) {
+ rep = MachineRepresentation::kWord32;
+ } else if (type == SimdType::kInt16x8) {
+ rep = MachineRepresentation::kWord16;
+ } else if (type == SimdType::kInt8x16) {
+ rep = MachineRepresentation::kWord8;
+ } else {
+ UNREACHABLE();
+ }
+ for (int i = 0; i < num_lanes; ++i) {
Diamond d(graph(), common(),
graph()->NewNode(op, rep_left[i], rep_right[i]));
if (is_max) {
- rep_node[i] =
- d.Phi(MachineRepresentation::kWord32, rep_right[i], rep_left[i]);
+ rep_node[i] = d.Phi(rep, rep_right[i], rep_left[i]);
} else {
- rep_node[i] =
- d.Phi(MachineRepresentation::kWord32, rep_left[i], rep_right[i]);
+ rep_node[i] = d.Phi(rep, rep_left[i], rep_right[i]);
}
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, num_lanes);
}
Node* SimdScalarLowering::BuildF64Trunc(Node* input) {
@@ -362,14 +593,14 @@ Node* SimdScalarLowering::BuildF64Trunc(Node* input) {
void SimdScalarLowering::LowerConvertFromFloat(Node* node, bool is_signed) {
DCHECK(node->InputCount() == 1);
- Node** rep = GetReplacementsWithType(node->InputAt(0), SimdType::kFloat32);
- Node* rep_node[kMaxLanes];
+ Node** rep = GetReplacementsWithType(node->InputAt(0), SimdType::kFloat32x4);
+ Node* rep_node[kNumLanes32];
Node* double_zero = graph()->NewNode(common()->Float64Constant(0.0));
Node* min = graph()->NewNode(
common()->Float64Constant(static_cast<double>(is_signed ? kMinInt : 0)));
Node* max = graph()->NewNode(common()->Float64Constant(
static_cast<double>(is_signed ? kMaxInt : 0xffffffffu)));
- for (int i = 0; i < kMaxLanes; ++i) {
+ for (int i = 0; i < kNumLanes32; ++i) {
Node* double_rep =
graph()->NewNode(machine()->ChangeFloat32ToFloat64(), rep[i]);
Diamond nan_d(graph(), common(), graph()->NewNode(machine()->Float64Equal(),
@@ -390,21 +621,55 @@ void SimdScalarLowering::LowerConvertFromFloat(Node* node, bool is_signed) {
graph()->NewNode(machine()->TruncateFloat64ToUint32(), trunc);
}
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, kNumLanes32);
}
-void SimdScalarLowering::LowerShiftOp(Node* node, const Operator* op) {
- static int32_t shift_mask = 0x1f;
+void SimdScalarLowering::LowerShiftOp(Node* node, SimdType type) {
DCHECK_EQ(1, node->InputCount());
int32_t shift_amount = OpParameter<int32_t>(node);
- Node* shift_node =
- graph()->NewNode(common()->Int32Constant(shift_amount & shift_mask));
- Node** rep = GetReplacementsWithType(node->InputAt(0), SimdType::kInt32);
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
- rep_node[i] = graph()->NewNode(op, rep[i], shift_node);
+ Node* shift_node = graph()->NewNode(common()->Int32Constant(shift_amount));
+ Node** rep = GetReplacementsWithType(node->InputAt(0), type);
+ int num_lanes = NumLanes(type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
+ rep_node[i] = rep[i];
+ switch (node->opcode()) {
+ case IrOpcode::kI8x16ShrU:
+ rep_node[i] = Mask(rep_node[i], kMask8);
+ rep_node[i] =
+ graph()->NewNode(machine()->Word32Shr(), rep_node[i], shift_node);
+ break;
+ case IrOpcode::kI16x8ShrU:
+ rep_node[i] = Mask(rep_node[i], kMask16); // Fall through.
+ case IrOpcode::kI32x4ShrU:
+ rep_node[i] =
+ graph()->NewNode(machine()->Word32Shr(), rep_node[i], shift_node);
+ break;
+ case IrOpcode::kI32x4Shl:
+ rep_node[i] =
+ graph()->NewNode(machine()->Word32Shl(), rep_node[i], shift_node);
+ break;
+ case IrOpcode::kI16x8Shl:
+ rep_node[i] =
+ graph()->NewNode(machine()->Word32Shl(), rep_node[i], shift_node);
+ rep_node[i] = FixUpperBits(rep_node[i], kShift16);
+ break;
+ case IrOpcode::kI8x16Shl:
+ rep_node[i] =
+ graph()->NewNode(machine()->Word32Shl(), rep_node[i], shift_node);
+ rep_node[i] = FixUpperBits(rep_node[i], kShift8);
+ break;
+ case IrOpcode::kI32x4ShrS:
+ case IrOpcode::kI16x8ShrS:
+ case IrOpcode::kI8x16ShrS:
+ rep_node[i] =
+ graph()->NewNode(machine()->Word32Sar(), rep_node[i], shift_node);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, num_lanes);
}
void SimdScalarLowering::LowerNotEqual(Node* node, SimdType input_rep_type,
@@ -412,18 +677,20 @@ void SimdScalarLowering::LowerNotEqual(Node* node, SimdType input_rep_type,
DCHECK(node->InputCount() == 2);
Node** rep_left = GetReplacementsWithType(node->InputAt(0), input_rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(1), input_rep_type);
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
+ int num_lanes = NumLanes(input_rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
Diamond d(graph(), common(),
graph()->NewNode(op, rep_left[i], rep_right[i]));
rep_node[i] = d.Phi(MachineRepresentation::kWord32,
jsgraph_->Int32Constant(0), jsgraph_->Int32Constant(1));
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, num_lanes);
}
void SimdScalarLowering::LowerNode(Node* node) {
SimdType rep_type = ReplacementType(node);
+ int num_lanes = NumLanes(rep_type);
switch (node->opcode()) {
case IrOpcode::kStart: {
int parameter_count = GetParameterCountAfterLowering();
@@ -449,19 +716,19 @@ void SimdScalarLowering::LowerNode(Node* node) {
if (old_index == new_index) {
NodeProperties::ChangeOp(node, common()->Parameter(new_index));
- Node* new_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
+ Node* new_node[kNumLanes32];
+ for (int i = 0; i < kNumLanes32; ++i) {
new_node[i] = nullptr;
}
new_node[0] = node;
if (signature()->GetParam(old_index) ==
MachineRepresentation::kSimd128) {
- for (int i = 1; i < kMaxLanes; ++i) {
+ for (int i = 1; i < kNumLanes32; ++i) {
new_node[i] = graph()->NewNode(common()->Parameter(new_index + i),
graph()->start());
}
}
- ReplaceNode(node, new_node);
+ ReplaceNode(node, new_node, kNumLanes32);
}
}
break;
@@ -470,24 +737,36 @@ void SimdScalarLowering::LowerNode(Node* node) {
MachineRepresentation rep =
LoadRepresentationOf(node->op()).representation();
const Operator* load_op;
- if (rep_type == SimdType::kInt32) {
- load_op = machine()->Load(MachineType::Int32());
- } else if (rep_type == SimdType::kFloat32) {
- load_op = machine()->Load(MachineType::Float32());
+#define LOAD_CASE(sType, mType) \
+ case SimdType::k##sType: \
+ load_op = machine()->Load(MachineType::mType()); \
+ break;
+
+ switch (rep_type) {
+ FOREACH_SIMD_TYPE_TO_MACHINE_TYPE(LOAD_CASE)
+ default:
+ UNREACHABLE();
}
- LowerLoadOp(rep, node, load_op);
+#undef LOAD_CASE
+ LowerLoadOp(rep, node, load_op, rep_type);
break;
}
case IrOpcode::kUnalignedLoad: {
MachineRepresentation rep =
UnalignedLoadRepresentationOf(node->op()).representation();
const Operator* load_op;
- if (rep_type == SimdType::kInt32) {
- load_op = machine()->UnalignedLoad(MachineType::Int32());
- } else if (rep_type == SimdType::kFloat32) {
- load_op = machine()->UnalignedLoad(MachineType::Float32());
+#define UNALIGNED_LOAD_CASE(sType, mType) \
+ case SimdType::k##sType: \
+ load_op = machine()->UnalignedLoad(MachineType::mType()); \
+ break;
+
+ switch (rep_type) {
+ FOREACH_SIMD_TYPE_TO_MACHINE_TYPE(UNALIGNED_LOAD_CASE)
+ default:
+ UNREACHABLE();
}
- LowerLoadOp(rep, node, load_op);
+#undef UNALIGHNED_LOAD_CASE
+ LowerLoadOp(rep, node, load_op, rep_type);
break;
}
case IrOpcode::kStore: {
@@ -496,24 +775,35 @@ void SimdScalarLowering::LowerNode(Node* node) {
WriteBarrierKind write_barrier_kind =
StoreRepresentationOf(node->op()).write_barrier_kind();
const Operator* store_op;
- if (rep_type == SimdType::kInt32) {
- store_op = machine()->Store(StoreRepresentation(
- MachineRepresentation::kWord32, write_barrier_kind));
- } else {
- store_op = machine()->Store(StoreRepresentation(
- MachineRepresentation::kFloat32, write_barrier_kind));
+#define STORE_CASE(sType, mType) \
+ case SimdType::k##sType: \
+ store_op = machine()->Store(StoreRepresentation( \
+ MachineRepresentation::k##mType, write_barrier_kind)); \
+ break;
+
+ switch (rep_type) {
+ FOREACH_SIMD_TYPE_TO_MACHINE_REP(STORE_CASE)
+ default:
+ UNREACHABLE();
}
+#undef STORE_CASE
LowerStoreOp(rep, node, store_op, rep_type);
break;
}
case IrOpcode::kUnalignedStore: {
MachineRepresentation rep = UnalignedStoreRepresentationOf(node->op());
const Operator* store_op;
- if (rep_type == SimdType::kInt32) {
- store_op = machine()->UnalignedStore(MachineRepresentation::kWord32);
- } else {
- store_op = machine()->UnalignedStore(MachineRepresentation::kFloat32);
+#define UNALIGNED_STORE_CASE(sType, mType) \
+ case SimdType::k##sType: \
+ store_op = machine()->UnalignedStore(MachineRepresentation::k##mType); \
+ break;
+
+ switch (rep_type) {
+ FOREACH_SIMD_TYPE_TO_MACHINE_REP(UNALIGNED_STORE_CASE)
+ default:
+ UNREACHABLE();
}
+#undef UNALIGNED_STORE_CASE
LowerStoreOp(rep, node, store_op, rep_type);
break;
}
@@ -541,12 +831,12 @@ void SimdScalarLowering::LowerNode(Node* node) {
if (descriptor->ReturnCount() == 1 &&
descriptor->GetReturnType(0) == MachineType::Simd128()) {
// We access the additional return values through projections.
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
+ Node* rep_node[kNumLanes32];
+ for (int i = 0; i < kNumLanes32; ++i) {
rep_node[i] =
graph()->NewNode(common()->Projection(i), node, graph()->start());
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, kNumLanes32);
}
break;
}
@@ -559,7 +849,7 @@ void SimdScalarLowering::LowerNode(Node* node) {
for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
Node** rep_input =
GetReplacementsWithType(node->InputAt(i), rep_type);
- for (int j = 0; j < kMaxLanes; j++) {
+ for (int j = 0; j < num_lanes; j++) {
rep_node[j]->ReplaceInput(i, rep_input[j]);
}
}
@@ -580,42 +870,93 @@ void SimdScalarLowering::LowerNode(Node* node) {
I32X4_BINOP_CASE(kS128Or, Word32Or)
I32X4_BINOP_CASE(kS128Xor, Word32Xor)
#undef I32X4_BINOP_CASE
- case IrOpcode::kI32x4MaxS: {
- LowerIntMinMax(node, machine()->Int32LessThan(), true);
+ case IrOpcode::kI16x8Add:
+ case IrOpcode::kI8x16Add: {
+ LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Add());
+ break;
+ }
+ case IrOpcode::kI16x8Sub:
+ case IrOpcode::kI8x16Sub: {
+ LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Sub());
+ break;
+ }
+ case IrOpcode::kI16x8Mul:
+ case IrOpcode::kI8x16Mul: {
+ LowerBinaryOpForSmallInt(node, rep_type, machine()->Int32Mul());
break;
}
- case IrOpcode::kI32x4MinS: {
- LowerIntMinMax(node, machine()->Int32LessThan(), false);
+ case IrOpcode::kI16x8AddSaturateS:
+ case IrOpcode::kI8x16AddSaturateS: {
+ LowerSaturateBinaryOp(node, rep_type, machine()->Int32Add(), true);
break;
}
- case IrOpcode::kI32x4MaxU: {
- LowerIntMinMax(node, machine()->Uint32LessThan(), true);
+ case IrOpcode::kI16x8SubSaturateS:
+ case IrOpcode::kI8x16SubSaturateS: {
+ LowerSaturateBinaryOp(node, rep_type, machine()->Int32Sub(), true);
break;
}
- case IrOpcode::kI32x4MinU: {
- LowerIntMinMax(node, machine()->Uint32LessThan(), false);
+ case IrOpcode::kI16x8AddSaturateU:
+ case IrOpcode::kI8x16AddSaturateU: {
+ LowerSaturateBinaryOp(node, rep_type, machine()->Int32Add(), false);
break;
}
- case IrOpcode::kI32x4Neg: {
+ case IrOpcode::kI16x8SubSaturateU:
+ case IrOpcode::kI8x16SubSaturateU: {
+ LowerSaturateBinaryOp(node, rep_type, machine()->Int32Sub(), false);
+ break;
+ }
+ case IrOpcode::kI32x4MaxS:
+ case IrOpcode::kI16x8MaxS:
+ case IrOpcode::kI8x16MaxS: {
+ LowerIntMinMax(node, machine()->Int32LessThan(), true, rep_type);
+ break;
+ }
+ case IrOpcode::kI32x4MinS:
+ case IrOpcode::kI16x8MinS:
+ case IrOpcode::kI8x16MinS: {
+ LowerIntMinMax(node, machine()->Int32LessThan(), false, rep_type);
+ break;
+ }
+ case IrOpcode::kI32x4MaxU:
+ case IrOpcode::kI16x8MaxU:
+ case IrOpcode::kI8x16MaxU: {
+ LowerIntMinMax(node, machine()->Uint32LessThan(), true, rep_type);
+ break;
+ }
+ case IrOpcode::kI32x4MinU:
+ case IrOpcode::kI16x8MinU:
+ case IrOpcode::kI8x16MinU: {
+ LowerIntMinMax(node, machine()->Uint32LessThan(), false, rep_type);
+ break;
+ }
+ case IrOpcode::kI32x4Neg:
+ case IrOpcode::kI16x8Neg:
+ case IrOpcode::kI8x16Neg: {
DCHECK(node->InputCount() == 1);
Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node* rep_node[kMaxLanes];
+ int num_lanes = NumLanes(rep_type);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
Node* zero = graph()->NewNode(common()->Int32Constant(0));
- for (int i = 0; i < kMaxLanes; ++i) {
+ for (int i = 0; i < num_lanes; ++i) {
rep_node[i] = graph()->NewNode(machine()->Int32Sub(), zero, rep[i]);
+ if (node->opcode() == IrOpcode::kI16x8Neg) {
+ rep_node[i] = FixUpperBits(rep_node[i], kShift16);
+ } else if (node->opcode() == IrOpcode::kI8x16Neg) {
+ rep_node[i] = FixUpperBits(rep_node[i], kShift8);
+ }
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, num_lanes);
break;
}
case IrOpcode::kS128Not: {
DCHECK(node->InputCount() == 1);
Node** rep = GetReplacementsWithType(node->InputAt(0), rep_type);
- Node* rep_node[kMaxLanes];
+ Node* rep_node[kNumLanes32];
Node* mask = graph()->NewNode(common()->Int32Constant(0xffffffff));
- for (int i = 0; i < kMaxLanes; ++i) {
+ for (int i = 0; i < kNumLanes32; ++i) {
rep_node[i] = graph()->NewNode(machine()->Word32Xor(), rep[i], mask);
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, kNumLanes32);
break;
}
case IrOpcode::kI32x4SConvertF32x4: {
@@ -626,16 +967,16 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerConvertFromFloat(node, false);
break;
}
- case IrOpcode::kI32x4Shl: {
- LowerShiftOp(node, machine()->Word32Shl());
- break;
- }
- case IrOpcode::kI32x4ShrS: {
- LowerShiftOp(node, machine()->Word32Sar());
- break;
- }
- case IrOpcode::kI32x4ShrU: {
- LowerShiftOp(node, machine()->Word32Shr());
+ case IrOpcode::kI32x4Shl:
+ case IrOpcode::kI16x8Shl:
+ case IrOpcode::kI8x16Shl:
+ case IrOpcode::kI32x4ShrS:
+ case IrOpcode::kI16x8ShrS:
+ case IrOpcode::kI8x16ShrS:
+ case IrOpcode::kI32x4ShrU:
+ case IrOpcode::kI16x8ShrU:
+ case IrOpcode::kI8x16ShrU: {
+ LowerShiftOp(node, rep_type);
break;
}
#define F32X4_BINOP_CASE(name) \
@@ -646,7 +987,6 @@ void SimdScalarLowering::LowerNode(Node* node) {
F32X4_BINOP_CASE(Add)
F32X4_BINOP_CASE(Sub)
F32X4_BINOP_CASE(Mul)
- F32X4_BINOP_CASE(Div)
F32X4_BINOP_CASE(Min)
F32X4_BINOP_CASE(Max)
#undef F32X4_BINOP_CASE
@@ -657,51 +997,57 @@ void SimdScalarLowering::LowerNode(Node* node) {
}
F32X4_UNOP_CASE(Abs)
F32X4_UNOP_CASE(Neg)
- F32X4_UNOP_CASE(Sqrt)
#undef F32x4_UNOP_CASE
case IrOpcode::kF32x4SConvertI32x4: {
- LowerUnaryOp(node, SimdType::kInt32, machine()->RoundInt32ToFloat32());
+ LowerUnaryOp(node, SimdType::kInt32x4, machine()->RoundInt32ToFloat32());
break;
}
case IrOpcode::kF32x4UConvertI32x4: {
- LowerUnaryOp(node, SimdType::kInt32, machine()->RoundUint32ToFloat32());
+ LowerUnaryOp(node, SimdType::kInt32x4, machine()->RoundUint32ToFloat32());
break;
}
case IrOpcode::kI32x4Splat:
- case IrOpcode::kF32x4Splat: {
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
+ case IrOpcode::kF32x4Splat:
+ case IrOpcode::kI16x8Splat:
+ case IrOpcode::kI8x16Splat: {
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
if (HasReplacement(0, node->InputAt(0))) {
rep_node[i] = GetReplacements(node->InputAt(0))[0];
} else {
rep_node[i] = node->InputAt(0);
}
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, num_lanes);
break;
}
case IrOpcode::kI32x4ExtractLane:
- case IrOpcode::kF32x4ExtractLane: {
+ case IrOpcode::kF32x4ExtractLane:
+ case IrOpcode::kI16x8ExtractLane:
+ case IrOpcode::kI8x16ExtractLane: {
int32_t lane = OpParameter<int32_t>(node);
- Node* rep_node[kMaxLanes] = {
- GetReplacementsWithType(node->InputAt(0), rep_type)[lane], nullptr,
- nullptr, nullptr};
- ReplaceNode(node, rep_node);
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ rep_node[0] = GetReplacementsWithType(node->InputAt(0), rep_type)[lane];
+ for (int i = 1; i < num_lanes; ++i) {
+ rep_node[i] = nullptr;
+ }
+ ReplaceNode(node, rep_node, num_lanes);
break;
}
case IrOpcode::kI32x4ReplaceLane:
- case IrOpcode::kF32x4ReplaceLane: {
+ case IrOpcode::kF32x4ReplaceLane:
+ case IrOpcode::kI16x8ReplaceLane:
+ case IrOpcode::kI8x16ReplaceLane: {
DCHECK_EQ(2, node->InputCount());
Node* repNode = node->InputAt(1);
int32_t lane = OpParameter<int32_t>(node);
- DCHECK(lane >= 0 && lane <= 3);
Node** rep_node = GetReplacementsWithType(node->InputAt(0), rep_type);
if (HasReplacement(0, repNode)) {
rep_node[lane] = GetReplacements(repNode)[0];
} else {
rep_node[lane] = repNode;
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, num_lanes);
break;
}
#define COMPARISON_CASE(type, simd_op, lowering_op, invert) \
@@ -709,51 +1055,84 @@ void SimdScalarLowering::LowerNode(Node* node) {
LowerBinaryOp(node, SimdType::k##type, machine()->lowering_op(), invert); \
break; \
}
- COMPARISON_CASE(Float32, kF32x4Eq, Float32Equal, false)
- COMPARISON_CASE(Float32, kF32x4Lt, Float32LessThan, false)
- COMPARISON_CASE(Float32, kF32x4Le, Float32LessThanOrEqual, false)
- COMPARISON_CASE(Float32, kF32x4Gt, Float32LessThan, true)
- COMPARISON_CASE(Float32, kF32x4Ge, Float32LessThanOrEqual, true)
- COMPARISON_CASE(Int32, kI32x4Eq, Word32Equal, false)
- COMPARISON_CASE(Int32, kI32x4LtS, Int32LessThan, false)
- COMPARISON_CASE(Int32, kI32x4LeS, Int32LessThanOrEqual, false)
- COMPARISON_CASE(Int32, kI32x4GtS, Int32LessThan, true)
- COMPARISON_CASE(Int32, kI32x4GeS, Int32LessThanOrEqual, true)
- COMPARISON_CASE(Int32, kI32x4LtU, Uint32LessThan, false)
- COMPARISON_CASE(Int32, kI32x4LeU, Uint32LessThanOrEqual, false)
- COMPARISON_CASE(Int32, kI32x4GtU, Uint32LessThan, true)
- COMPARISON_CASE(Int32, kI32x4GeU, Uint32LessThanOrEqual, true)
+ COMPARISON_CASE(Float32x4, kF32x4Eq, Float32Equal, false)
+ COMPARISON_CASE(Float32x4, kF32x4Lt, Float32LessThan, false)
+ COMPARISON_CASE(Float32x4, kF32x4Le, Float32LessThanOrEqual, false)
+ COMPARISON_CASE(Float32x4, kF32x4Gt, Float32LessThan, true)
+ COMPARISON_CASE(Float32x4, kF32x4Ge, Float32LessThanOrEqual, true)
+ COMPARISON_CASE(Int32x4, kI32x4Eq, Word32Equal, false)
+ COMPARISON_CASE(Int32x4, kI32x4LtS, Int32LessThan, false)
+ COMPARISON_CASE(Int32x4, kI32x4LeS, Int32LessThanOrEqual, false)
+ COMPARISON_CASE(Int32x4, kI32x4GtS, Int32LessThan, true)
+ COMPARISON_CASE(Int32x4, kI32x4GeS, Int32LessThanOrEqual, true)
+ COMPARISON_CASE(Int32x4, kI32x4LtU, Uint32LessThan, false)
+ COMPARISON_CASE(Int32x4, kI32x4LeU, Uint32LessThanOrEqual, false)
+ COMPARISON_CASE(Int32x4, kI32x4GtU, Uint32LessThan, true)
+ COMPARISON_CASE(Int32x4, kI32x4GeU, Uint32LessThanOrEqual, true)
+ COMPARISON_CASE(Int16x8, kI16x8Eq, Word32Equal, false)
+ COMPARISON_CASE(Int16x8, kI16x8LtS, Int32LessThan, false)
+ COMPARISON_CASE(Int16x8, kI16x8LeS, Int32LessThanOrEqual, false)
+ COMPARISON_CASE(Int16x8, kI16x8GtS, Int32LessThan, true)
+ COMPARISON_CASE(Int16x8, kI16x8GeS, Int32LessThanOrEqual, true)
+ COMPARISON_CASE(Int16x8, kI16x8LtU, Uint32LessThan, false)
+ COMPARISON_CASE(Int16x8, kI16x8LeU, Uint32LessThanOrEqual, false)
+ COMPARISON_CASE(Int16x8, kI16x8GtU, Uint32LessThan, true)
+ COMPARISON_CASE(Int16x8, kI16x8GeU, Uint32LessThanOrEqual, true)
+ COMPARISON_CASE(Int8x16, kI8x16Eq, Word32Equal, false)
+ COMPARISON_CASE(Int8x16, kI8x16LtS, Int32LessThan, false)
+ COMPARISON_CASE(Int8x16, kI8x16LeS, Int32LessThanOrEqual, false)
+ COMPARISON_CASE(Int8x16, kI8x16GtS, Int32LessThan, true)
+ COMPARISON_CASE(Int8x16, kI8x16GeS, Int32LessThanOrEqual, true)
+ COMPARISON_CASE(Int8x16, kI8x16LtU, Uint32LessThan, false)
+ COMPARISON_CASE(Int8x16, kI8x16LeU, Uint32LessThanOrEqual, false)
+ COMPARISON_CASE(Int8x16, kI8x16GtU, Uint32LessThan, true)
+ COMPARISON_CASE(Int8x16, kI8x16GeU, Uint32LessThanOrEqual, true)
#undef COMPARISON_CASE
case IrOpcode::kF32x4Ne: {
- LowerNotEqual(node, SimdType::kFloat32, machine()->Float32Equal());
+ LowerNotEqual(node, SimdType::kFloat32x4, machine()->Float32Equal());
break;
}
case IrOpcode::kI32x4Ne: {
- LowerNotEqual(node, SimdType::kInt32, machine()->Word32Equal());
+ LowerNotEqual(node, SimdType::kInt32x4, machine()->Word32Equal());
break;
}
- case IrOpcode::kS32x4Select: {
+ case IrOpcode::kI16x8Ne: {
+ LowerNotEqual(node, SimdType::kInt16x8, machine()->Word32Equal());
+ break;
+ }
+ case IrOpcode::kI8x16Ne: {
+ LowerNotEqual(node, SimdType::kInt8x16, machine()->Word32Equal());
+ break;
+ }
+ case IrOpcode::kS32x4Select:
+ case IrOpcode::kS16x8Select:
+ case IrOpcode::kS8x16Select: {
DCHECK(node->InputCount() == 3);
- DCHECK(ReplacementType(node->InputAt(0)) == SimdType::kSimd1x4);
+ DCHECK(ReplacementType(node->InputAt(0)) == SimdType::kSimd1x4 ||
+ ReplacementType(node->InputAt(0)) == SimdType::kSimd1x8 ||
+ ReplacementType(node->InputAt(0)) == SimdType::kSimd1x16);
Node** boolean_input = GetReplacements(node->InputAt(0));
Node** rep_left = GetReplacementsWithType(node->InputAt(1), rep_type);
Node** rep_right = GetReplacementsWithType(node->InputAt(2), rep_type);
- Node* rep_node[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
+ Node** rep_node = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
Diamond d(graph(), common(),
graph()->NewNode(machine()->Word32Equal(), boolean_input[i],
jsgraph_->Int32Constant(0)));
- if (rep_type == SimdType::kFloat32) {
- rep_node[i] =
- d.Phi(MachineRepresentation::kFloat32, rep_right[1], rep_left[0]);
- } else if (rep_type == SimdType::kInt32) {
- rep_node[i] =
- d.Phi(MachineRepresentation::kWord32, rep_right[1], rep_left[0]);
- } else {
- UNREACHABLE();
+#define SELECT_CASE(sType, mType) \
+ case SimdType::k##sType: \
+ rep_node[i] = \
+ d.Phi(MachineRepresentation::k##mType, rep_right[1], rep_left[0]); \
+ break;
+
+ switch (rep_type) {
+ FOREACH_SIMD_TYPE_TO_MACHINE_REP(SELECT_CASE)
+ default:
+ UNREACHABLE();
}
+#undef SELECT_CASE
}
- ReplaceNode(node, rep_node);
+ ReplaceNode(node, rep_node, num_lanes);
break;
}
default: { DefaultLowering(node); }
@@ -770,7 +1149,7 @@ bool SimdScalarLowering::DefaultLowering(Node* node) {
}
if (HasReplacement(1, input)) {
something_changed = true;
- for (int j = 1; j < kMaxLanes; j++) {
+ for (int j = 1; j < ReplacementCount(input); ++j) {
node->InsertInput(zone(), i + j, GetReplacements(input)[j]);
}
}
@@ -778,18 +1157,17 @@ bool SimdScalarLowering::DefaultLowering(Node* node) {
return something_changed;
}
-void SimdScalarLowering::ReplaceNode(Node* old, Node** new_node) {
- // if new_low == nullptr, then also new_high == nullptr.
- DCHECK(new_node[0] != nullptr ||
- (new_node[1] == nullptr && new_node[2] == nullptr &&
- new_node[3] == nullptr));
- for (int i = 0; i < kMaxLanes; ++i) {
- replacements_[old->id()].node[i] = new_node[i];
+void SimdScalarLowering::ReplaceNode(Node* old, Node** new_nodes, int count) {
+ replacements_[old->id()].node = zone()->NewArray<Node*>(count);
+ for (int i = 0; i < count; ++i) {
+ replacements_[old->id()].node[i] = new_nodes[i];
}
+ replacements_[old->id()].num_replacements = count;
}
bool SimdScalarLowering::HasReplacement(size_t index, Node* node) {
- return replacements_[node->id()].node[index] != nullptr;
+ return replacements_[node->id()].node != nullptr &&
+ replacements_[node->id()].node[index] != nullptr;
}
SimdScalarLowering::SimdType SimdScalarLowering::ReplacementType(Node* node) {
@@ -802,30 +1180,61 @@ Node** SimdScalarLowering::GetReplacements(Node* node) {
return result;
}
+int SimdScalarLowering::ReplacementCount(Node* node) {
+ return replacements_[node->id()].num_replacements;
+}
+
+void SimdScalarLowering::Int32ToFloat32(Node** replacements, Node** result) {
+ for (int i = 0; i < kNumLanes32; ++i) {
+ if (replacements[i] != nullptr) {
+ result[i] =
+ graph()->NewNode(machine()->BitcastInt32ToFloat32(), replacements[i]);
+ } else {
+ result[i] = nullptr;
+ }
+ }
+}
+
+void SimdScalarLowering::Float32ToInt32(Node** replacements, Node** result) {
+ for (int i = 0; i < kNumLanes32; ++i) {
+ if (replacements[i] != nullptr) {
+ result[i] =
+ graph()->NewNode(machine()->BitcastFloat32ToInt32(), replacements[i]);
+ } else {
+ result[i] = nullptr;
+ }
+ }
+}
+
Node** SimdScalarLowering::GetReplacementsWithType(Node* node, SimdType type) {
Node** replacements = GetReplacements(node);
if (ReplacementType(node) == type) {
return GetReplacements(node);
}
- Node** result = zone()->NewArray<Node*>(kMaxLanes);
- if (ReplacementType(node) == SimdType::kInt32 && type == SimdType::kFloat32) {
- for (int i = 0; i < kMaxLanes; ++i) {
- if (replacements[i] != nullptr) {
- result[i] = graph()->NewNode(machine()->BitcastInt32ToFloat32(),
- replacements[i]);
- } else {
- result[i] = nullptr;
- }
+ int num_lanes = NumLanes(type);
+ Node** result = zone()->NewArray<Node*>(num_lanes);
+ if (type == SimdType::kInt32x4) {
+ if (ReplacementType(node) == SimdType::kFloat32x4) {
+ Float32ToInt32(replacements, result);
+ } else if (ReplacementType(node) == SimdType::kInt16x8) {
+ UNIMPLEMENTED();
+ } else {
+ UNREACHABLE();
}
- } else if (ReplacementType(node) == SimdType::kFloat32 &&
- type == SimdType::kInt32) {
- for (int i = 0; i < kMaxLanes; ++i) {
- if (replacements[i] != nullptr) {
- result[i] = graph()->NewNode(machine()->BitcastFloat32ToInt32(),
- replacements[i]);
- } else {
- result[i] = nullptr;
- }
+ } else if (type == SimdType::kFloat32x4) {
+ if (ReplacementType(node) == SimdType::kInt32x4) {
+ Int32ToFloat32(replacements, result);
+ } else if (ReplacementType(node) == SimdType::kInt16x8) {
+ UNIMPLEMENTED();
+ } else {
+ UNREACHABLE();
+ }
+ } else if (type == SimdType::kInt16x8) {
+ if (ReplacementType(node) == SimdType::kInt32x4 ||
+ ReplacementType(node) == SimdType::kFloat32x4) {
+ UNIMPLEMENTED();
+ } else {
+ UNREACHABLE();
}
} else {
UNREACHABLE();
@@ -842,31 +1251,34 @@ void SimdScalarLowering::PreparePhiReplacement(Node* phi) {
// graph verifier.
int value_count = phi->op()->ValueInputCount();
SimdType type = ReplacementType(phi);
- Node** inputs_rep[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
+ int num_lanes = NumLanes(type);
+ Node*** inputs_rep = zone()->NewArray<Node**>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
inputs_rep[i] = zone()->NewArray<Node*>(value_count + 1);
inputs_rep[i][value_count] = NodeProperties::GetControlInput(phi, 0);
}
for (int i = 0; i < value_count; ++i) {
- for (int j = 0; j < kMaxLanes; j++) {
+ for (int j = 0; j < num_lanes; ++j) {
inputs_rep[j][i] = placeholder_;
}
}
- Node* rep_nodes[kMaxLanes];
- for (int i = 0; i < kMaxLanes; ++i) {
- if (type == SimdType::kInt32) {
- rep_nodes[i] = graph()->NewNode(
- common()->Phi(MachineRepresentation::kWord32, value_count),
- value_count + 1, inputs_rep[i], false);
- } else if (type == SimdType::kFloat32) {
- rep_nodes[i] = graph()->NewNode(
- common()->Phi(MachineRepresentation::kFloat32, value_count),
- value_count + 1, inputs_rep[i], false);
- } else {
- UNREACHABLE();
+ Node** rep_nodes = zone()->NewArray<Node*>(num_lanes);
+ for (int i = 0; i < num_lanes; ++i) {
+#define PHI_CASE(sType, mType) \
+ case SimdType::k##sType: \
+ rep_nodes[i] = graph()->NewNode( \
+ common()->Phi(MachineRepresentation::k##mType, value_count), \
+ value_count + 1, inputs_rep[i], false); \
+ break;
+
+ switch (type) {
+ FOREACH_SIMD_TYPE_TO_MACHINE_REP(PHI_CASE)
+ default:
+ UNREACHABLE();
}
+#undef PHI_CASE
}
- ReplaceNode(phi, rep_nodes);
+ ReplaceNode(phi, rep_nodes, num_lanes);
}
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/simd-scalar-lowering.h b/deps/v8/src/compiler/simd-scalar-lowering.h
index 70186fdf11..09c78dc983 100644
--- a/deps/v8/src/compiler/simd-scalar-lowering.h
+++ b/deps/v8/src/compiler/simd-scalar-lowering.h
@@ -28,14 +28,27 @@ class SimdScalarLowering {
private:
enum class State : uint8_t { kUnvisited, kOnStack, kVisited };
- enum class SimdType : uint8_t { kInt32, kFloat32, kSimd1x4 };
-
- static const int kMaxLanes = 4;
- static const int kLaneWidth = 16 / kMaxLanes;
+ enum class SimdType : uint8_t {
+ kFloat32x4,
+ kInt32x4,
+ kInt16x8,
+ kInt8x16,
+ kSimd1x4,
+ kSimd1x8,
+ kSimd1x16
+ };
+#if defined(V8_TARGET_BIG_ENDIAN)
+ static constexpr int kLaneOffsets[16] = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+#else
+ static constexpr int kLaneOffsets[16] = {0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15};
+#endif
struct Replacement {
- Node* node[kMaxLanes];
- SimdType type; // represents what input type is expected
+ Node** node = nullptr;
+ SimdType type; // represents output type
+ int num_replacements = 0;
};
struct NodeState {
@@ -52,24 +65,35 @@ class SimdScalarLowering {
void LowerNode(Node* node);
bool DefaultLowering(Node* node);
- void ReplaceNode(Node* old, Node** new_nodes);
+ int NumLanes(SimdType type);
+ void ReplaceNode(Node* old, Node** new_nodes, int count);
bool HasReplacement(size_t index, Node* node);
Node** GetReplacements(Node* node);
+ int ReplacementCount(Node* node);
+ void Float32ToInt32(Node** replacements, Node** result);
+ void Int32ToFloat32(Node** replacements, Node** result);
Node** GetReplacementsWithType(Node* node, SimdType type);
SimdType ReplacementType(Node* node);
void PreparePhiReplacement(Node* phi);
void SetLoweredType(Node* node, Node* output);
- void GetIndexNodes(Node* index, Node** new_indices);
+ void GetIndexNodes(Node* index, Node** new_indices, SimdType type);
void LowerLoadOp(MachineRepresentation rep, Node* node,
- const Operator* load_op);
+ const Operator* load_op, SimdType type);
void LowerStoreOp(MachineRepresentation rep, Node* node,
const Operator* store_op, SimdType rep_type);
void LowerBinaryOp(Node* node, SimdType input_rep_type, const Operator* op,
bool invert_inputs = false);
+ Node* FixUpperBits(Node* input, int32_t shift);
+ void LowerBinaryOpForSmallInt(Node* node, SimdType input_rep_type,
+ const Operator* op);
+ Node* Mask(Node* input, int32_t mask);
+ void LowerSaturateBinaryOp(Node* node, SimdType input_rep_type,
+ const Operator* op, bool is_signed);
void LowerUnaryOp(Node* node, SimdType input_rep_type, const Operator* op);
- void LowerIntMinMax(Node* node, const Operator* op, bool is_max);
+ void LowerIntMinMax(Node* node, const Operator* op, bool is_max,
+ SimdType type);
void LowerConvertFromFloat(Node* node, bool is_signed);
- void LowerShiftOp(Node* node, const Operator* op);
+ void LowerShiftOp(Node* node, SimdType type);
Node* BuildF64Trunc(Node* input);
void LowerNotEqual(Node* node, SimdType input_rep_type, const Operator* op);
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index d0f952a9ec..1691f1618f 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -1073,6 +1073,51 @@ class RepresentationSelector {
SetOutput(node, MachineRepresentation::kTagged);
}
+ void VisitFrameState(Node* node) {
+ DCHECK_EQ(5, node->op()->ValueInputCount());
+ DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
+
+ ProcessInput(node, 0, UseInfo::AnyTagged()); // Parameters.
+ ProcessInput(node, 1, UseInfo::AnyTagged()); // Registers.
+
+ // Expression stack/accumulator.
+ if (node->InputAt(2)->opcode() == IrOpcode::kStateValues ||
+ node->InputAt(2)->opcode() == IrOpcode::kTypedStateValues) {
+ // TODO(turbofan): This should only be produced by AST graph builder.
+ // Remove once we switch to bytecode graph builder exclusively.
+ ProcessInput(node, 2, UseInfo::AnyTagged());
+ } else {
+ // Accumulator is a special flower - we need to remember its type in
+ // a singleton typed-state-values node (as if it was a singleton
+ // state-values node).
+ if (propagate()) {
+ EnqueueInput(node, 2, UseInfo::Any());
+ } else if (lower()) {
+ Zone* zone = jsgraph_->zone();
+ Node* accumulator = node->InputAt(2);
+ if (accumulator == jsgraph_->OptimizedOutConstant()) {
+ node->ReplaceInput(2, jsgraph_->SingleDeadTypedStateValues());
+ } else {
+ ZoneVector<MachineType>* types =
+ new (zone->New(sizeof(ZoneVector<MachineType>)))
+ ZoneVector<MachineType>(1, zone);
+ (*types)[0] = DeoptMachineTypeOf(
+ GetInfo(accumulator)->representation(), TypeOf(accumulator));
+
+ node->ReplaceInput(2, jsgraph_->graph()->NewNode(
+ jsgraph_->common()->TypedStateValues(
+ types, SparseInputMask::Dense()),
+ accumulator));
+ }
+ }
+ }
+
+ ProcessInput(node, 3, UseInfo::AnyTagged()); // Context.
+ ProcessInput(node, 4, UseInfo::AnyTagged()); // Closure.
+ ProcessInput(node, 5, UseInfo::AnyTagged()); // Outer frame state.
+ return SetOutput(node, MachineRepresentation::kTagged);
+ }
+
void VisitObjectState(Node* node) {
if (propagate()) {
for (int i = 0; i < node->InputCount(); i++) {
@@ -1402,30 +1447,6 @@ class RepresentationSelector {
return;
}
- void VisitOsrGuard(Node* node) {
- VisitInputs(node);
-
- // Insert a dynamic check for the OSR value type if necessary.
- switch (OsrGuardTypeOf(node->op())) {
- case OsrGuardType::kUninitialized:
- // At this point, we should always have a type for the OsrValue.
- UNREACHABLE();
- break;
- case OsrGuardType::kSignedSmall:
- if (lower()) {
- NodeProperties::ChangeOp(node,
- simplified()->CheckedTaggedToTaggedSigned());
- }
- return SetOutput(node, MachineRepresentation::kTaggedSigned);
- case OsrGuardType::kAny: // Nothing to check.
- if (lower()) {
- DeferReplacement(node, node->InputAt(0));
- }
- return SetOutput(node, MachineRepresentation::kTagged);
- }
- UNREACHABLE();
- }
-
// Dispatching routine for visiting the node {node} with the usage {use}.
// Depending on the operator, propagate new usage info to the inputs.
void VisitNode(Node* node, Truncation truncation,
@@ -1531,11 +1552,14 @@ class RepresentationSelector {
// BooleanNot(x: kRepBit) => Word32Equal(x, #0)
node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
NodeProperties::ChangeOp(node, lowering->machine()->Word32Equal());
- } else {
- DCHECK(CanBeTaggedPointer(input_info->representation()));
+ } else if (CanBeTaggedPointer(input_info->representation())) {
// BooleanNot(x: kRepTagged) => WordEqual(x, #false)
node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
NodeProperties::ChangeOp(node, lowering->machine()->WordEqual());
+ } else {
+ DCHECK_EQ(MachineRepresentation::kNone,
+ input_info->representation());
+ DeferReplacement(node, lowering->jsgraph()->Int32Constant(0));
}
} else {
// No input representation requirement; adapt during lowering.
@@ -2700,11 +2724,7 @@ class RepresentationSelector {
switch (mode) {
case CheckFloat64HoleMode::kAllowReturnHole:
if (truncation.IsUnused()) return VisitUnused(node);
- if (truncation.IsUsedAsWord32()) {
- VisitUnop(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- if (lower()) DeferReplacement(node, node->InputAt(0));
- } else if (truncation.IsUsedAsFloat64()) {
+ if (truncation.IsUsedAsFloat64()) {
VisitUnop(node, UseInfo::TruncatingFloat64(),
MachineRepresentation::kFloat64);
if (lower()) DeferReplacement(node, node->InputAt(0));
@@ -2775,6 +2795,8 @@ class RepresentationSelector {
MachineRepresentation::kFloat64);
if (lower()) NodeProperties::ChangeOp(node, Float64Op(node));
return;
+ case IrOpcode::kFrameState:
+ return VisitFrameState(node);
case IrOpcode::kStateValues:
return VisitStateValues(node);
case IrOpcode::kObjectState:
@@ -2783,16 +2805,19 @@ class RepresentationSelector {
// We just get rid of the sigma here. In principle, it should be
// possible to refine the truncation and representation based on
// the sigma's type.
- MachineRepresentation output =
+ MachineRepresentation representation =
GetOutputInfoForPhi(node, TypeOf(node->InputAt(0)), truncation);
- VisitUnop(node, UseInfo(output, truncation), output);
+
+ // For now, we just handle specially the impossible case.
+ MachineRepresentation output = TypeOf(node)->IsInhabited()
+ ? representation
+ : MachineRepresentation::kNone;
+
+ VisitUnop(node, UseInfo(representation, truncation), output);
if (lower()) DeferReplacement(node, node->InputAt(0));
return;
}
- case IrOpcode::kOsrGuard:
- return VisitOsrGuard(node);
-
case IrOpcode::kFinishRegion:
VisitInputs(node);
// Assume the output is tagged pointer.
@@ -2810,10 +2835,11 @@ class RepresentationSelector {
case IrOpcode::kIfException:
case IrOpcode::kIfTrue:
case IrOpcode::kIfFalse:
+ case IrOpcode::kIfValue:
+ case IrOpcode::kIfDefault:
case IrOpcode::kDeoptimize:
case IrOpcode::kEffectPhi:
case IrOpcode::kTerminate:
- case IrOpcode::kFrameState:
case IrOpcode::kCheckpoint:
case IrOpcode::kLoop:
case IrOpcode::kMerge:
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 9fb0fc55bf..476f423749 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -8,6 +8,7 @@
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/compiler/types.h"
+#include "src/objects/map.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index 3750861bf0..ac53bfc72e 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -141,7 +141,8 @@ enum class CheckForMinusZeroMode : uint8_t {
size_t hash_value(CheckForMinusZeroMode);
-std::ostream& operator<<(std::ostream&, CheckForMinusZeroMode);
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+ CheckForMinusZeroMode);
CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator*) WARN_UNUSED_RESULT;
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 94c54ac600..f92d507dfb 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -607,19 +607,6 @@ Type* Typer::Visitor::TypeParameter(Node* node) {
Type* Typer::Visitor::TypeOsrValue(Node* node) { return Type::Any(); }
-Type* Typer::Visitor::TypeOsrGuard(Node* node) {
- switch (OsrGuardTypeOf(node->op())) {
- case OsrGuardType::kUninitialized:
- return Type::None();
- case OsrGuardType::kSignedSmall:
- return Type::SignedSmall();
- case OsrGuardType::kAny:
- return Type::Any();
- }
- UNREACHABLE();
- return nullptr;
-}
-
Type* Typer::Visitor::TypeRetain(Node* node) {
UNREACHABLE();
return nullptr;
@@ -1116,15 +1103,23 @@ Type* Typer::Visitor::TypeJSCreate(Node* node) { return Type::Object(); }
Type* Typer::Visitor::TypeJSCreateArguments(Node* node) {
- return Type::OtherObject();
+ switch (CreateArgumentsTypeOf(node->op())) {
+ case CreateArgumentsType::kRestParameter:
+ return Type::Array();
+ case CreateArgumentsType::kMappedArguments:
+ case CreateArgumentsType::kUnmappedArguments:
+ return Type::OtherObject();
+ }
+ UNREACHABLE();
+ return nullptr;
}
+Type* Typer::Visitor::TypeJSCreateArray(Node* node) { return Type::Array(); }
-Type* Typer::Visitor::TypeJSCreateArray(Node* node) {
+Type* Typer::Visitor::TypeJSCreateGeneratorObject(Node* node) {
return Type::OtherObject();
}
-
Type* Typer::Visitor::TypeJSCreateClosure(Node* node) {
return Type::Function();
}
@@ -1139,7 +1134,7 @@ Type* Typer::Visitor::TypeJSCreateKeyValueArray(Node* node) {
}
Type* Typer::Visitor::TypeJSCreateLiteralArray(Node* node) {
- return Type::OtherObject();
+ return Type::Array();
}
@@ -1340,6 +1335,10 @@ Type* Typer::Visitor::TypeJSCreateScriptContext(Node* node) {
// JS other operators.
+Type* Typer::Visitor::TypeJSConstructForwardVarargs(Node* node) {
+ return Type::Receiver();
+}
+
Type* Typer::Visitor::TypeJSConstruct(Node* node) { return Type::Receiver(); }
Type* Typer::Visitor::TypeJSConstructWithSpread(Node* node) {
@@ -1516,6 +1515,7 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
// Object functions.
case kObjectAssign:
+ return Type::Receiver();
case kObjectCreate:
return Type::OtherObject();
case kObjectHasOwnProperty:
@@ -1527,7 +1527,7 @@ Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
case kRegExpCompile:
return Type::OtherObject();
case kRegExpExec:
- return Type::Union(Type::OtherObject(), Type::Null(), t->zone());
+ return Type::Union(Type::Array(), Type::Null(), t->zone());
case kRegExpTest:
return Type::Boolean();
case kRegExpToString:
diff --git a/deps/v8/src/compiler/types.cc b/deps/v8/src/compiler/types.cc
index e0de4ef97a..ef2d3a0ef6 100644
--- a/deps/v8/src/compiler/types.cc
+++ b/deps/v8/src/compiler/types.cc
@@ -208,6 +208,8 @@ Type::bitset BitsetType::Lub(i::Map* map) {
return kOtherCallable;
}
return kOtherObject;
+ case JS_ARRAY_TYPE:
+ return kArray;
case JS_VALUE_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_DATE_TYPE:
@@ -216,7 +218,6 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case JS_ASYNC_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE:
case JS_ARRAY_BUFFER_TYPE:
- case JS_ARRAY_TYPE:
case JS_REGEXP_TYPE: // TODO(rossberg): there should be a RegExp type.
case JS_TYPED_ARRAY_TYPE:
case JS_DATA_VIEW_TYPE:
@@ -297,6 +298,7 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case PROPERTY_CELL_TYPE:
case MODULE_TYPE:
case MODULE_INFO_ENTRY_TYPE:
+ case CELL_TYPE:
return kOtherInternal;
// Remaining instance types are unsupported for now. If any of them do
@@ -311,24 +313,23 @@ Type::bitset BitsetType::Lub(i::Map* map) {
case FILLER_TYPE:
case ACCESS_CHECK_INFO_TYPE:
case INTERCEPTOR_INFO_TYPE:
- case CALL_HANDLER_INFO_TYPE:
case OBJECT_TEMPLATE_INFO_TYPE:
case ALLOCATION_MEMENTO_TYPE:
- case TYPE_FEEDBACK_INFO_TYPE:
case ALIASED_ARGUMENTS_ENTRY_TYPE:
case PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE:
case PROMISE_REACTION_JOB_INFO_TYPE:
case DEBUG_INFO_TYPE:
- case BREAK_POINT_INFO_TYPE:
case STACK_FRAME_INFO_TYPE:
- case CELL_TYPE:
case WEAK_CELL_TYPE:
case PROTOTYPE_INFO_TYPE:
case TUPLE2_TYPE:
case TUPLE3_TYPE:
case CONTEXT_EXTENSION_TYPE:
- case CONSTANT_ELEMENTS_PAIR_TYPE:
case ASYNC_GENERATOR_REQUEST_TYPE:
+ case PADDING_TYPE_1:
+ case PADDING_TYPE_2:
+ case PADDING_TYPE_3:
+ case PADDING_TYPE_4:
UNREACHABLE();
return kNone;
}
diff --git a/deps/v8/src/compiler/types.h b/deps/v8/src/compiler/types.h
index fe0df3300f..452ac7658e 100644
--- a/deps/v8/src/compiler/types.h
+++ b/deps/v8/src/compiler/types.h
@@ -126,6 +126,7 @@ namespace compiler {
V(Hole, 1u << 22) \
V(OtherInternal, 1u << 23) \
V(ExternalPointer, 1u << 24) \
+ V(Array, 1u << 25) \
\
V(Signed31, kUnsigned30 | kNegative31) \
V(Signed32, kSigned31 | kOtherUnsigned31 | \
@@ -166,12 +167,14 @@ namespace compiler {
V(Primitive, kSymbol | kPlainPrimitive) \
V(OtherUndetectableOrUndefined, kOtherUndetectable | kUndefined) \
V(Proxy, kCallableProxy | kOtherProxy) \
+ V(ArrayOrOtherObject, kArray | kOtherObject) \
+ V(ArrayOrProxy, kArray | kProxy) \
V(DetectableCallable, kFunction | kBoundFunction | \
kOtherCallable | kCallableProxy) \
V(Callable, kDetectableCallable | kOtherUndetectable) \
- V(NonCallable, kOtherObject | kOtherProxy) \
+ V(NonCallable, kArray | kOtherObject | kOtherProxy) \
V(NonCallableOrNull, kNonCallable | kNull) \
- V(DetectableObject, kFunction | kBoundFunction | \
+ V(DetectableObject, kArray | kFunction | kBoundFunction | \
kOtherCallable | kOtherObject) \
V(DetectableReceiver, kDetectableObject | kProxy) \
V(DetectableReceiverOrNull, kDetectableReceiver | kNull) \
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index 3c79c67fff..a1310ed22f 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -401,23 +401,6 @@ void Verifier::Visitor::Check(Node* node) {
// Type is merged from other values in the graph and could be any.
CheckTypeIs(node, Type::Any());
break;
- case IrOpcode::kOsrGuard:
- // OSR values have a value and a control input.
- CHECK_EQ(1, value_count);
- CHECK_EQ(1, effect_count);
- CHECK_EQ(1, control_count);
- switch (OsrGuardTypeOf(node->op())) {
- case OsrGuardType::kUninitialized:
- CheckTypeIs(node, Type::None());
- break;
- case OsrGuardType::kSignedSmall:
- CheckTypeIs(node, Type::SignedSmall());
- break;
- case OsrGuardType::kAny:
- CheckTypeIs(node, Type::Any());
- break;
- }
- break;
case IrOpcode::kProjection: {
// Projection has an input that produces enough values.
int index = static_cast<int>(ProjectionIndexOf(node->op()));
@@ -509,12 +492,19 @@ void Verifier::Visitor::Check(Node* node) {
CHECK_EQ(0, control_count);
CHECK_EQ(0, effect_count);
CHECK_EQ(6, input_count);
- for (int i = 0; i < 3; ++i) {
+ // Check that the parameters and registers are kStateValues or
+ // kTypedStateValues.
+ for (int i = 0; i < 2; ++i) {
CHECK(NodeProperties::GetValueInput(node, i)->opcode() ==
IrOpcode::kStateValues ||
NodeProperties::GetValueInput(node, i)->opcode() ==
IrOpcode::kTypedStateValues);
}
+ // The accumulator (InputAt(2)) cannot be kStateValues, but it can be
+ // kTypedStateValues (to signal the type). Once AST graph builder
+ // is removed, we should check this here. Until then, AST graph
+ // builder can generate expression stack as InputAt(2), which can
+ // still be kStateValues.
break;
}
case IrOpcode::kStateValues:
@@ -598,12 +588,12 @@ void Verifier::Visitor::Check(Node* node) {
CheckTypeIs(node, Type::Object());
break;
case IrOpcode::kJSCreateArguments:
- // Type is OtherObject.
- CheckTypeIs(node, Type::OtherObject());
+ // Type is Array \/ OtherObject.
+ CheckTypeIs(node, Type::ArrayOrOtherObject());
break;
case IrOpcode::kJSCreateArray:
- // Type is OtherObject.
- CheckTypeIs(node, Type::OtherObject());
+ // Type is Array.
+ CheckTypeIs(node, Type::Array());
break;
case IrOpcode::kJSCreateClosure:
// Type is Function.
@@ -618,6 +608,9 @@ void Verifier::Visitor::Check(Node* node) {
CheckTypeIs(node, Type::OtherObject());
break;
case IrOpcode::kJSCreateLiteralArray:
+ // Type is Array.
+ CheckTypeIs(node, Type::Array());
+ break;
case IrOpcode::kJSCreateLiteralObject:
case IrOpcode::kJSCreateLiteralRegExp:
// Type is OtherObject.
@@ -707,6 +700,7 @@ void Verifier::Visitor::Check(Node* node) {
break;
}
+ case IrOpcode::kJSConstructForwardVarargs:
case IrOpcode::kJSConstruct:
case IrOpcode::kJSConstructWithSpread:
case IrOpcode::kJSConvertReceiver:
@@ -746,6 +740,10 @@ void Verifier::Visitor::Check(Node* node) {
CheckNotTyped(node);
break;
+ case IrOpcode::kJSCreateGeneratorObject:
+ CheckTypeIs(node, Type::OtherObject());
+ break;
+
case IrOpcode::kJSGeneratorRestoreContinuation:
CheckTypeIs(node, Type::SignedSmall());
break;
@@ -1459,6 +1457,7 @@ void Verifier::Run(Graph* graph, Typing typing, CheckInputs check_inputs) {
for (Node* other : node->uses()) {
if (all.IsLive(other) && other != proj &&
other->opcode() == IrOpcode::kProjection &&
+ other->InputAt(0) == node &&
ProjectionIndexOf(other->op()) == ProjectionIndexOf(proj->op())) {
V8_Fatal(__FILE__, __LINE__,
"Node #%d:%s has duplicate projections #%d and #%d",
@@ -1712,10 +1711,11 @@ void Verifier::VerifyNode(Node* node) {
CHECK_EQ(OperatorProperties::GetTotalInputCount(node->op()),
node->InputCount());
// If this node has no effect or no control outputs,
- // we check that no its uses are effect or control inputs.
+ // we check that none of its uses are effect or control inputs.
bool check_no_control = node->op()->ControlOutputCount() == 0;
bool check_no_effect = node->op()->EffectOutputCount() == 0;
bool check_no_frame_state = node->opcode() != IrOpcode::kFrameState;
+ int effect_edges = 0;
if (check_no_effect || check_no_control) {
for (Edge edge : node->use_edges()) {
Node* const user = edge.from();
@@ -1724,6 +1724,7 @@ void Verifier::VerifyNode(Node* node) {
CHECK(!check_no_control);
} else if (NodeProperties::IsEffectEdge(edge)) {
CHECK(!check_no_effect);
+ effect_edges++;
} else if (NodeProperties::IsFrameStateEdge(edge)) {
CHECK(!check_no_frame_state);
}
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index b6b9e3ff05..56c8f6cbef 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -66,7 +66,8 @@ void MergeControlToEnd(JSGraph* jsgraph, Node* node) {
}
Node* BuildModifyThreadInWasmFlag(bool new_value, JSGraph* jsgraph,
- Node** effect_ptr, Node* control) {
+ Node* centry_stub_node, Node** effect_ptr,
+ Node* control) {
// TODO(eholk): generate code to modify the thread-local storage directly,
// rather than calling the runtime.
if (!trap_handler::UseTrapHandler()) {
@@ -83,7 +84,7 @@ Node* BuildModifyThreadInWasmFlag(bool new_value, JSGraph* jsgraph,
// CEntryStubConstant nodes have to be created and cached in the main
// thread. At the moment this is only done for CEntryStubConstant(1).
DCHECK_EQ(1, fun->result_size);
- Node* inputs[] = {jsgraph->CEntryStubConstant(fun->result_size),
+ Node* inputs[] = {centry_stub_node,
jsgraph->ExternalConstant(
ExternalReference(f, jsgraph->isolate())), // ref
jsgraph->Int32Constant(fun->nargs), // arity
@@ -100,15 +101,16 @@ Node* BuildModifyThreadInWasmFlag(bool new_value, JSGraph* jsgraph,
// Only call this function for code which is not reused across instantiations,
// as we do not patch the embedded context.
Node* BuildCallToRuntimeWithContext(Runtime::FunctionId f, JSGraph* jsgraph,
- Node* context, Node** parameters,
- int parameter_count, Node** effect_ptr,
- Node** control) {
+ Node* centry_stub_node, Node* context,
+ Node** parameters, int parameter_count,
+ Node** effect_ptr, Node** control) {
// Setting and clearing the thread-in-wasm flag should not be done as a normal
// runtime call.
DCHECK_NE(f, Runtime::kSetThreadInWasm);
DCHECK_NE(f, Runtime::kClearThreadInWasm);
// We're leaving Wasm code, so clear the flag.
- *control = BuildModifyThreadInWasmFlag(false, jsgraph, effect_ptr, *control);
+ *control = BuildModifyThreadInWasmFlag(false, jsgraph, centry_stub_node,
+ effect_ptr, *control);
const Runtime::Function* fun = Runtime::FunctionForId(f);
CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
@@ -123,7 +125,7 @@ Node* BuildCallToRuntimeWithContext(Runtime::FunctionId f, JSGraph* jsgraph,
DCHECK_GE(kMaxParams, parameter_count);
Node* inputs[kMaxParams + 6];
int count = 0;
- inputs[count++] = jsgraph->CEntryStubConstant(fun->result_size);
+ inputs[count++] = centry_stub_node;
for (int i = 0; i < parameter_count; i++) {
inputs[count++] = parameters[i];
}
@@ -139,27 +141,30 @@ Node* BuildCallToRuntimeWithContext(Runtime::FunctionId f, JSGraph* jsgraph,
*effect_ptr = node;
// Restore the thread-in-wasm flag, since we have returned to Wasm.
- *control = BuildModifyThreadInWasmFlag(true, jsgraph, effect_ptr, *control);
+ *control = BuildModifyThreadInWasmFlag(true, jsgraph, centry_stub_node,
+ effect_ptr, *control);
return node;
}
Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
- Node** parameters, int parameter_count,
- Node** effect_ptr, Node** control) {
- return BuildCallToRuntimeWithContext(f, jsgraph, jsgraph->NoContextConstant(),
- parameters, parameter_count, effect_ptr,
- control);
+ Node* centry_stub_node, Node** parameters,
+ int parameter_count, Node** effect_ptr,
+ Node** control) {
+ return BuildCallToRuntimeWithContext(f, jsgraph, centry_stub_node,
+ jsgraph->NoContextConstant(), parameters,
+ parameter_count, effect_ptr, control);
}
} // namespace
WasmGraphBuilder::WasmGraphBuilder(
wasm::ModuleEnv* module_env, Zone* zone, JSGraph* jsgraph,
- wasm::FunctionSig* sig,
+ Handle<Code> centry_stub, wasm::FunctionSig* sig,
compiler::SourcePositionTable* source_position_table)
: zone_(zone),
jsgraph_(jsgraph),
+ centry_stub_node_(jsgraph_->HeapConstant(centry_stub)),
module_(module_env),
signature_tables_(zone),
function_tables_(zone),
@@ -1066,6 +1071,7 @@ static bool ReverseBytesSupported(MachineOperatorBuilder* m,
size_t size_in_bytes) {
switch (size_in_bytes) {
case 4:
+ case 16:
return m->Word32ReverseBytes().IsSupported();
case 8:
return m->Word64ReverseBytes().IsSupported();
@@ -1102,6 +1108,9 @@ Node* WasmGraphBuilder::BuildChangeEndianness(Node* node, MachineType memtype,
// No need to change endianness for byte size, return original node
return node;
break;
+ case MachineRepresentation::kSimd128:
+ DCHECK(ReverseBytesSupported(m, valueSizeInBytes));
+ break;
default:
UNREACHABLE();
break;
@@ -1124,6 +1133,27 @@ Node* WasmGraphBuilder::BuildChangeEndianness(Node* node, MachineType memtype,
case 8:
result = graph()->NewNode(m->Word64ReverseBytes().op(), value);
break;
+ case 16: {
+ Node* byte_reversed_lanes[4];
+ for (int lane = 0; lane < 4; lane++) {
+ byte_reversed_lanes[lane] = graph()->NewNode(
+ m->Word32ReverseBytes().op(),
+ graph()->NewNode(jsgraph()->machine()->I32x4ExtractLane(lane),
+ value));
+ }
+
+ // This is making a copy of the value.
+ result =
+ graph()->NewNode(jsgraph()->machine()->S128And(), value, value);
+
+ for (int lane = 0; lane < 4; lane++) {
+ result =
+ graph()->NewNode(jsgraph()->machine()->I32x4ReplaceLane(3 - lane),
+ result, byte_reversed_lanes[lane]);
+ }
+
+ break;
+ }
default:
UNREACHABLE();
}
@@ -1696,9 +1726,9 @@ Node* WasmGraphBuilder::GrowMemory(Node* input) {
Node* parameters[] = {BuildChangeUint32ToSmi(input)};
Node* old_effect = *effect_;
- Node* call = BuildCallToRuntime(Runtime::kWasmGrowMemory, jsgraph(),
- parameters, arraysize(parameters), effect_,
- &check_input_range.if_true);
+ Node* call = BuildCallToRuntime(
+ Runtime::kWasmGrowMemory, jsgraph(), centry_stub_node_, parameters,
+ arraysize(parameters), effect_, &check_input_range.if_true);
Node* result = BuildChangeSmiToInt32(call);
@@ -1728,17 +1758,18 @@ Node* WasmGraphBuilder::Throw(Node* input) {
graph()->NewNode(machine->Word32And(), input, Int32Constant(0xFFFFu)));
Node* parameters[] = {lower, upper}; // thrown value
- return BuildCallToRuntime(Runtime::kWasmThrow, jsgraph(), parameters,
- arraysize(parameters), effect_, control_);
+ return BuildCallToRuntime(Runtime::kWasmThrow, jsgraph(), centry_stub_node_,
+ parameters, arraysize(parameters), effect_,
+ control_);
}
Node* WasmGraphBuilder::Catch(Node* input, wasm::WasmCodePosition position) {
CommonOperatorBuilder* common = jsgraph()->common();
Node* parameters[] = {input}; // caught value
- Node* value =
- BuildCallToRuntime(Runtime::kWasmGetCaughtExceptionValue, jsgraph(),
- parameters, arraysize(parameters), effect_, control_);
+ Node* value = BuildCallToRuntime(Runtime::kWasmGetCaughtExceptionValue,
+ jsgraph(), centry_stub_node_, parameters,
+ arraysize(parameters), effect_, control_);
Node* is_smi;
Node* is_heap;
@@ -2219,7 +2250,7 @@ Node* WasmGraphBuilder::CallIndirect(uint32_t sig_index, Node** args,
Int32Constant(kPointerSizeLog2)),
Int32Constant(fixed_offset)),
*effect_, *control_);
- auto map = const_cast<wasm::SignatureMap&>(
+ auto& map = const_cast<wasm::SignatureMap&>(
module_->module->function_tables[0].map);
Node* sig_match = graph()->NewNode(
machine->WordEqual(), load_sig,
@@ -2588,14 +2619,16 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
graph()->start());
// Set the ThreadInWasm flag before we do the actual call.
- BuildModifyThreadInWasmFlag(true, jsgraph(), effect_, *control_);
+ BuildModifyThreadInWasmFlag(true, jsgraph(), centry_stub_node_, effect_,
+ *control_);
if (!wasm::IsJSCompatibleSignature(sig_)) {
// Throw a TypeError. Use the context of the calling javascript function
// (passed as a parameter), such that the generated code is context
// independent.
BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, jsgraph(),
- context, nullptr, 0, effect_, control_);
+ centry_stub_node_, context, nullptr, 0,
+ effect_, control_);
// Add a dummy call to the wasm function so that the generated wrapper
// contains a reference to the wrapped wasm function. Without this reference
@@ -2635,7 +2668,8 @@ void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
*effect_ = call;
// Clear the ThreadInWasmFlag
- BuildModifyThreadInWasmFlag(false, jsgraph(), effect_, *control_);
+ BuildModifyThreadInWasmFlag(false, jsgraph(), centry_stub_node_, effect_,
+ *control_);
Node* retval = call;
Node* jsval = ToJS(
@@ -2673,7 +2707,8 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
Node* context =
jsgraph()->HeapConstant(jsgraph()->isolate()->native_context());
BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, jsgraph(),
- context, nullptr, 0, effect_, control_);
+ centry_stub_node_, context, nullptr, 0,
+ effect_, control_);
// We don't need to return a value here, as the runtime call will not return
// anyway (the c entry stub will trigger stack unwinding).
ReturnVoid();
@@ -2684,7 +2719,8 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
Node* call = nullptr;
- BuildModifyThreadInWasmFlag(false, jsgraph(), effect_, *control_);
+ BuildModifyThreadInWasmFlag(false, jsgraph(), centry_stub_node_, effect_,
+ *control_);
if (target->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(target);
@@ -2749,7 +2785,8 @@ void WasmGraphBuilder::BuildWasmToJSWrapper(Handle<JSReceiver> target,
*effect_ = call;
SetSourcePosition(call, 0);
- BuildModifyThreadInWasmFlag(true, jsgraph(), effect_, *control_);
+ BuildModifyThreadInWasmFlag(true, jsgraph(), centry_stub_node_, effect_,
+ *control_);
// Convert the return value back.
Node* val = sig->return_count() == 0
@@ -2834,8 +2871,8 @@ void WasmGraphBuilder::BuildWasmInterpreterEntry(
jsgraph()->SmiConstant(function_index), // function index
arg_buffer, // argument buffer
};
- BuildCallToRuntime(Runtime::kWasmRunInterpreter, jsgraph(), parameters,
- arraysize(parameters), effect_, control_);
+ BuildCallToRuntime(Runtime::kWasmRunInterpreter, jsgraph(), centry_stub_node_,
+ parameters, arraysize(parameters), effect_, control_);
// Read back the return value.
if (sig->return_count() == 0) {
@@ -2882,10 +2919,9 @@ Node* WasmGraphBuilder::CurrentMemoryPages() {
// CurrentMemoryPages will not be called from asm.js, hence we cannot be in
// lazy-compilation mode, hence the instance will be set.
DCHECK_EQ(wasm::kWasmOrigin, module_->module->get_origin());
- DCHECK_NOT_NULL(module_);
- DCHECK_NOT_NULL(module_->instance);
- Node* call = BuildCallToRuntime(Runtime::kWasmMemorySize, jsgraph(), nullptr,
- 0, effect_, control_);
+ Node* call =
+ BuildCallToRuntime(Runtime::kWasmMemorySize, jsgraph(), centry_stub_node_,
+ nullptr, 0, effect_, control_);
Node* result = BuildChangeSmiToInt32(call);
return result;
}
@@ -3188,8 +3224,6 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
return graph()->NewNode(jsgraph()->machine()->F32x4Abs(), inputs[0]);
case wasm::kExprF32x4Neg:
return graph()->NewNode(jsgraph()->machine()->F32x4Neg(), inputs[0]);
- case wasm::kExprF32x4Sqrt:
- return graph()->NewNode(jsgraph()->machine()->F32x4Sqrt(), inputs[0]);
case wasm::kExprF32x4RecipApprox:
return graph()->NewNode(jsgraph()->machine()->F32x4RecipApprox(),
inputs[0]);
@@ -3199,27 +3233,21 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
case wasm::kExprF32x4Add:
return graph()->NewNode(jsgraph()->machine()->F32x4Add(), inputs[0],
inputs[1]);
+ case wasm::kExprF32x4AddHoriz:
+ return graph()->NewNode(jsgraph()->machine()->F32x4AddHoriz(), inputs[0],
+ inputs[1]);
case wasm::kExprF32x4Sub:
return graph()->NewNode(jsgraph()->machine()->F32x4Sub(), inputs[0],
inputs[1]);
case wasm::kExprF32x4Mul:
return graph()->NewNode(jsgraph()->machine()->F32x4Mul(), inputs[0],
inputs[1]);
- case wasm::kExprF32x4Div:
- return graph()->NewNode(jsgraph()->machine()->F32x4Div(), inputs[0],
- inputs[1]);
case wasm::kExprF32x4Min:
return graph()->NewNode(jsgraph()->machine()->F32x4Min(), inputs[0],
inputs[1]);
case wasm::kExprF32x4Max:
return graph()->NewNode(jsgraph()->machine()->F32x4Max(), inputs[0],
inputs[1]);
- case wasm::kExprF32x4RecipRefine:
- return graph()->NewNode(jsgraph()->machine()->F32x4RecipRefine(),
- inputs[0], inputs[1]);
- case wasm::kExprF32x4RecipSqrtRefine:
- return graph()->NewNode(jsgraph()->machine()->F32x4RecipSqrtRefine(),
- inputs[0], inputs[1]);
case wasm::kExprF32x4Eq:
return graph()->NewNode(jsgraph()->machine()->F32x4Eq(), inputs[0],
inputs[1]);
@@ -3257,6 +3285,9 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
case wasm::kExprI32x4Add:
return graph()->NewNode(jsgraph()->machine()->I32x4Add(), inputs[0],
inputs[1]);
+ case wasm::kExprI32x4AddHoriz:
+ return graph()->NewNode(jsgraph()->machine()->I32x4AddHoriz(), inputs[0],
+ inputs[1]);
case wasm::kExprI32x4Sub:
return graph()->NewNode(jsgraph()->machine()->I32x4Sub(), inputs[0],
inputs[1]);
@@ -3330,6 +3361,9 @@ Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
case wasm::kExprI16x8AddSaturateS:
return graph()->NewNode(jsgraph()->machine()->I16x8AddSaturateS(),
inputs[0], inputs[1]);
+ case wasm::kExprI16x8AddHoriz:
+ return graph()->NewNode(jsgraph()->machine()->I16x8AddHoriz(), inputs[0],
+ inputs[1]);
case wasm::kExprI16x8Sub:
return graph()->NewNode(jsgraph()->machine()->I16x8Sub(), inputs[0],
inputs[1]);
@@ -3605,21 +3639,22 @@ Node* WasmGraphBuilder::SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
}
}
-Node* WasmGraphBuilder::SimdSwizzleOp(wasm::WasmOpcode opcode, uint32_t swizzle,
+Node* WasmGraphBuilder::SimdShuffleOp(uint8_t shuffle[16], unsigned lanes,
const NodeVector& inputs) {
has_simd_ = true;
- switch (opcode) {
- case wasm::kExprS32x4Swizzle:
- return graph()->NewNode(jsgraph()->machine()->S32x4Swizzle(swizzle),
- inputs[0]);
- case wasm::kExprS16x8Swizzle:
- return graph()->NewNode(jsgraph()->machine()->S16x8Swizzle(swizzle),
- inputs[0]);
- case wasm::kExprS8x16Swizzle:
- return graph()->NewNode(jsgraph()->machine()->S8x16Swizzle(swizzle),
- inputs[0]);
+ switch (lanes) {
+ case 4:
+ return graph()->NewNode(jsgraph()->machine()->S32x4Shuffle(shuffle),
+ inputs[0], inputs[1]);
+ case 8:
+ return graph()->NewNode(jsgraph()->machine()->S16x8Shuffle(shuffle),
+ inputs[0], inputs[1]);
+ case 16:
+ return graph()->NewNode(jsgraph()->machine()->S8x16Shuffle(shuffle),
+ inputs[0], inputs[1]);
default:
- return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
+ UNREACHABLE();
+ return nullptr;
}
}
@@ -3662,7 +3697,8 @@ Handle<Code> CompileJSToWasmWrapper(Isolate* isolate,
Node* effect = nullptr;
wasm::ModuleEnv module_env(module, nullptr);
- WasmGraphBuilder builder(&module_env, &zone, &jsgraph, func->sig);
+ WasmGraphBuilder builder(&module_env, &zone, &jsgraph,
+ CEntryStub(isolate, 1).GetCode(), func->sig);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.BuildJSToWasmWrapper(wasm_code, func->sig);
@@ -3741,7 +3777,8 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
origin == wasm::kAsmJsOrigin ? new (&zone) SourcePositionTable(&graph)
: nullptr;
- WasmGraphBuilder builder(nullptr, &zone, &jsgraph, sig,
+ WasmGraphBuilder builder(nullptr, &zone, &jsgraph,
+ CEntryStub(isolate, 1).GetCode(), sig,
source_position_table);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
@@ -3792,16 +3829,17 @@ Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
}
if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
const char* function_name = nullptr;
- int function_name_size = 0;
+ size_t function_name_size = 0;
if (!import_name.is_null()) {
Handle<String> handle = import_name.ToHandleChecked();
function_name = handle->ToCString().get();
- function_name_size = handle->length();
+ function_name_size = static_cast<size_t>(handle->length());
}
- RecordFunctionCompilation(
- CodeEventListener::FUNCTION_TAG, isolate, code, "wasm-to-js", index,
- {module_name->ToCString().get(), module_name->length()},
- {function_name, function_name_size});
+ RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
+ "wasm-to-js", index,
+ {module_name->ToCString().get(),
+ static_cast<size_t>(module_name->length())},
+ {function_name, function_name_size});
}
return code;
@@ -3822,7 +3860,8 @@ Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
Node* control = nullptr;
Node* effect = nullptr;
- WasmGraphBuilder builder(nullptr, &zone, &jsgraph, sig);
+ WasmGraphBuilder builder(nullptr, &zone, &jsgraph,
+ CEntryStub(isolate, 1).GetCode(), sig);
builder.set_control_ptr(&control);
builder.set_effect_ptr(&effect);
builder.BuildWasmInterpreterEntry(func_index, sig, instance);
@@ -3887,14 +3926,14 @@ SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
SourcePositionTable* source_position_table =
new (jsgraph_->zone()) SourcePositionTable(graph);
WasmGraphBuilder builder(module_env_, jsgraph_->zone(), jsgraph_,
- func_body_.sig, source_position_table);
+ centry_stub_, func_body_.sig, source_position_table);
graph_construction_result_ =
wasm::BuildTFGraph(isolate_->allocator(), &builder, func_body_);
if (graph_construction_result_.failed()) {
if (FLAG_trace_wasm_compiler) {
OFStream os(stdout);
- os << "Compilation failed: " << graph_construction_result_.error_msg
+ os << "Compilation failed: " << graph_construction_result_.error_msg()
<< std::endl;
}
return nullptr;
@@ -3924,6 +3963,9 @@ Vector<const char> GetDebugName(Zone* zone, wasm::WasmName name, int index) {
if (!name.is_empty()) {
return name;
}
+#ifndef DEBUG
+ return {};
+#endif
constexpr int kBufferLength = 15;
EmbeddedVector<char, kBufferLength> name_vector;
@@ -3938,54 +3980,48 @@ Vector<const char> GetDebugName(Zone* zone, wasm::WasmName name, int index) {
WasmCompilationUnit::WasmCompilationUnit(Isolate* isolate,
wasm::ModuleBytesEnv* module_env,
- const wasm::WasmFunction* function)
+ const wasm::WasmFunction* function,
+ bool is_sync)
: WasmCompilationUnit(
isolate, &module_env->module_env,
wasm::FunctionBody{
function->sig, module_env->wire_bytes.start(),
module_env->wire_bytes.start() + function->code_start_offset,
module_env->wire_bytes.start() + function->code_end_offset},
- module_env->wire_bytes.GetNameOrNull(function),
- function->func_index) {}
+ module_env->wire_bytes.GetNameOrNull(function), function->func_index,
+ is_sync) {}
WasmCompilationUnit::WasmCompilationUnit(Isolate* isolate,
wasm::ModuleEnv* module_env,
wasm::FunctionBody body,
- wasm::WasmName name, int index)
+ wasm::WasmName name, int index,
+ bool is_sync)
: isolate_(isolate),
module_env_(module_env),
func_body_(body),
func_name_(name),
- graph_zone_(new Zone(isolate->allocator(), ZONE_NAME)),
- jsgraph_(new (graph_zone()) JSGraph(
- isolate, new (graph_zone()) Graph(graph_zone()),
- new (graph_zone()) CommonOperatorBuilder(graph_zone()), nullptr,
- nullptr,
- new (graph_zone()) MachineOperatorBuilder(
- graph_zone(), MachineType::PointerRepresentation(),
- InstructionSelector::SupportedMachineOperatorFlags(),
- InstructionSelector::AlignmentRequirements()))),
- compilation_zone_(isolate->allocator(), ZONE_NAME),
- info_(GetDebugName(&compilation_zone_, name, index), isolate,
- &compilation_zone_, Code::ComputeFlags(Code::WASM_FUNCTION)),
- func_index_(index),
- protected_instructions_(&compilation_zone_) {}
-
-void WasmCompilationUnit::InitializeHandles() {
- // Create and cache this node in the main thread, which contains a handle to
- // the code object of the c-entry stub.
- jsgraph_->CEntryStubConstant(1);
- DCHECK(!handles_initialized_);
-#if DEBUG
- handles_initialized_ = true;
-#endif // DEBUG
-}
+ is_sync_(is_sync),
+ centry_stub_(CEntryStub(isolate, 1).GetCode()),
+ func_index_(index) {}
void WasmCompilationUnit::ExecuteCompilation() {
- DCHECK(handles_initialized_);
- // TODO(ahaas): The counters are not thread-safe at the moment.
- // HistogramTimerScope wasm_compile_function_time_scope(
- // isolate_->counters()->wasm_compile_function_time());
+ if (is_sync_) {
+ // TODO(karlschimpf): Make this work when asynchronous.
+ // https://bugs.chromium.org/p/v8/issues/detail?id=6361
+ HistogramTimerScope wasm_compile_function_time_scope(
+ isolate_->counters()->wasm_compile_function_time());
+ ExecuteCompilationInternal();
+ }
+ ExecuteCompilationInternal();
+ // Record the memory cost this unit places on the system until
+ // it is finalized. That may be "0" in error cases.
+ if (job_) {
+ size_t cost = job_->AllocatedMemory();
+ set_memory_cost(cost);
+ }
+}
+
+void WasmCompilationUnit::ExecuteCompilationInternal() {
if (FLAG_trace_wasm_compiler) {
if (func_name_.start() != nullptr) {
PrintF("Compiling WASM function %d:'%.*s'\n\n", func_index(),
@@ -3998,7 +4034,14 @@ void WasmCompilationUnit::ExecuteCompilation() {
double decode_ms = 0;
size_t node_count = 0;
- std::unique_ptr<Zone> graph_zone(graph_zone_.release());
+ Zone graph_zone(isolate_->allocator(), ZONE_NAME);
+ jsgraph_ = new (&graph_zone) JSGraph(
+ isolate_, new (&graph_zone) Graph(&graph_zone),
+ new (&graph_zone) CommonOperatorBuilder(&graph_zone), nullptr, nullptr,
+ new (&graph_zone) MachineOperatorBuilder(
+ &graph_zone, MachineType::PointerRepresentation(),
+ InstructionSelector::SupportedMachineOperatorFlags(),
+ InstructionSelector::AlignmentRequirements()));
SourcePositionTable* source_positions = BuildGraphForWasmFunction(&decode_ms);
if (graph_construction_result_.failed()) {
@@ -4012,22 +4055,31 @@ void WasmCompilationUnit::ExecuteCompilation() {
pipeline_timer.Start();
}
+ compilation_zone_.reset(new Zone(isolate_->allocator(), ZONE_NAME));
+
// Run the compiler pipeline to generate machine code.
CallDescriptor* descriptor = wasm::ModuleEnv::GetWasmCallDescriptor(
- &compilation_zone_, func_body_.sig);
+ compilation_zone_.get(), func_body_.sig);
if (jsgraph_->machine()->Is32()) {
- descriptor =
- module_env_->GetI32WasmCallDescriptor(&compilation_zone_, descriptor);
+ descriptor = module_env_->GetI32WasmCallDescriptor(compilation_zone_.get(),
+ descriptor);
}
+ info_.reset(new CompilationInfo(
+ GetDebugName(compilation_zone_.get(), func_name_, func_index_), isolate_,
+ compilation_zone_.get(), Code::ComputeFlags(Code::WASM_FUNCTION)));
+ ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions(
+ compilation_zone_.get());
+
job_.reset(Pipeline::NewWasmCompilationJob(
- &info_, jsgraph_, descriptor, source_positions, &protected_instructions_,
- !module_env_->module->is_wasm()));
+ info_.get(), jsgraph_, descriptor, source_positions,
+ &protected_instructions, !module_env_->module->is_wasm()));
ok_ = job_->ExecuteJob() == CompilationJob::SUCCEEDED;
// TODO(bradnelson): Improve histogram handling of size_t.
- // TODO(ahaas): The counters are not thread-safe at the moment.
- // isolate_->counters()->wasm_compile_function_peak_memory_bytes()
- // ->AddSample(
- // static_cast<int>(jsgraph->graph()->zone()->allocation_size()));
+ if (is_sync_)
+ // TODO(karlschimpf): Make this work when asynchronous.
+ // https://bugs.chromium.org/p/v8/issues/detail?id=6361
+ isolate_->counters()->wasm_compile_function_peak_memory_bytes()->AddSample(
+ static_cast<int>(jsgraph_->graph()->zone()->allocation_size()));
if (FLAG_trace_wasm_decode_time) {
double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
@@ -4037,6 +4089,8 @@ void WasmCompilationUnit::ExecuteCompilation() {
static_cast<unsigned>(func_body_.end - func_body_.start), decode_ms,
node_count, pipeline_ms);
}
+ // The graph zone is about to get out of scope. Avoid invalid references.
+ jsgraph_ = nullptr;
}
Handle<Code> WasmCompilationUnit::FinishCompilation(
@@ -4064,7 +4118,7 @@ Handle<Code> WasmCompilationUnit::FinishCompilation(
if (job_->FinalizeJob() != CompilationJob::SUCCEEDED) {
return Handle<Code>::null();
}
- Handle<Code> code = info_.code();
+ Handle<Code> code = info_->code();
DCHECK(!code.is_null());
if (isolate_->logger()->is_logging_code_events() ||
@@ -4089,7 +4143,6 @@ Handle<Code> WasmCompilationUnit::CompileWasmFunction(
wasm::ErrorThrower* thrower, Isolate* isolate,
wasm::ModuleBytesEnv* module_env, const wasm::WasmFunction* function) {
WasmCompilationUnit unit(isolate, module_env, function);
- unit.InitializeHandles();
unit.ExecuteCompilation();
return unit.FinishCompilation(thrower);
}
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 128bfbde00..f356f624d7 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -48,13 +48,14 @@ namespace compiler {
class WasmCompilationUnit final {
public:
WasmCompilationUnit(Isolate* isolate, wasm::ModuleBytesEnv* module_env,
- const wasm::WasmFunction* function);
+ const wasm::WasmFunction* function, bool is_sync = true);
WasmCompilationUnit(Isolate* isolate, wasm::ModuleEnv* module_env,
- wasm::FunctionBody body, wasm::WasmName name, int index);
+ wasm::FunctionBody body, wasm::WasmName name, int index,
+ bool is_sync = true);
- Zone* graph_zone() { return graph_zone_.get(); }
int func_index() const { return func_index_; }
+ void ReopenCentryStub() { centry_stub_ = handle(*centry_stub_, isolate_); }
void InitializeHandles();
void ExecuteCompilation();
Handle<Code> FinishCompilation(wasm::ErrorThrower* thrower);
@@ -64,6 +65,9 @@ class WasmCompilationUnit final {
wasm::ModuleBytesEnv* module_env,
const wasm::WasmFunction* function);
+ void set_memory_cost(size_t memory_cost) { memory_cost_ = memory_cost; }
+ size_t memory_cost() const { return memory_cost_; }
+
private:
SourcePositionTable* BuildGraphForWasmFunction(double* decode_ms);
@@ -71,21 +75,22 @@ class WasmCompilationUnit final {
wasm::ModuleEnv* module_env_;
wasm::FunctionBody func_body_;
wasm::WasmName func_name_;
- // The graph zone is deallocated at the end of ExecuteCompilation.
- std::unique_ptr<Zone> graph_zone_;
- JSGraph* jsgraph_;
- Zone compilation_zone_;
- CompilationInfo info_;
+ bool is_sync_;
+ // The graph zone is deallocated at the end of ExecuteCompilation by virtue of
+ // it being zone allocated.
+ JSGraph* jsgraph_ = nullptr;
+ // the compilation_zone_, info_, and job_ fields need to survive past
+ // ExecuteCompilation, onto FinishCompilation (which happens on the main
+ // thread).
+ std::unique_ptr<Zone> compilation_zone_;
+ std::unique_ptr<CompilationInfo> info_;
std::unique_ptr<CompilationJob> job_;
+ Handle<Code> centry_stub_;
int func_index_;
wasm::Result<wasm::DecodeStruct*> graph_construction_result_;
bool ok_ = true;
-#if DEBUG
- bool handles_initialized_ = false;
-#endif // DEBUG
- ZoneVector<trap_handler::ProtectedInstructionData>
- protected_instructions_; // Instructions that are protected by the signal
- // handler.
+ size_t memory_cost_ = 0;
+ void ExecuteCompilationInternal();
DISALLOW_COPY_AND_ASSIGN(WasmCompilationUnit);
};
@@ -115,7 +120,8 @@ typedef ZoneVector<Node*> NodeVector;
class WasmGraphBuilder {
public:
WasmGraphBuilder(
- wasm::ModuleEnv* module_env, Zone* z, JSGraph* g, wasm::FunctionSig* sig,
+ wasm::ModuleEnv* module_env, Zone* z, JSGraph* g,
+ Handle<Code> centry_stub_, wasm::FunctionSig* sig,
compiler::SourcePositionTable* source_position_table = nullptr);
Node** Buffer(size_t count) {
@@ -252,7 +258,7 @@ class WasmGraphBuilder {
Node* SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
const NodeVector& inputs);
- Node* SimdSwizzleOp(wasm::WasmOpcode opcode, uint32_t swizzle,
+ Node* SimdShuffleOp(uint8_t shuffle[16], unsigned lanes,
const NodeVector& inputs);
bool has_simd() const { return has_simd_; }
@@ -265,6 +271,7 @@ class WasmGraphBuilder {
Zone* zone_;
JSGraph* jsgraph_;
+ Node* centry_stub_node_;
wasm::ModuleEnv* module_ = nullptr;
Node* mem_buffer_ = nullptr;
Node* mem_size_ = nullptr;
diff --git a/deps/v8/src/compiler/x64/code-generator-x64.cc b/deps/v8/src/compiler/x64/code-generator-x64.cc
index 3215ec24f7..86c547f460 100644
--- a/deps/v8/src/compiler/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/x64/code-generator-x64.cc
@@ -762,7 +762,7 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
namespace {
-void AdjustStackPointerForTailCall(MacroAssembler* masm,
+void AdjustStackPointerForTailCall(Assembler* assembler,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
@@ -770,10 +770,10 @@ void AdjustStackPointerForTailCall(MacroAssembler* masm,
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
- masm->subq(rsp, Immediate(stack_slot_delta * kPointerSize));
+ assembler->subq(rsp, Immediate(stack_slot_delta * kPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
- masm->addq(rsp, Immediate(-stack_slot_delta * kPointerSize));
+ assembler->addq(rsp, Immediate(-stack_slot_delta * kPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
@@ -2006,6 +2006,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Movsd(operand, i.InputDoubleRegister(index));
}
break;
+ case kX64Movdqu: {
+ CpuFeatureScope sse_scope(masm(), SSSE3);
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+ __ pc_offset());
+ if (instr->HasOutput()) {
+ __ movdqu(i.OutputSimd128Register(), i.MemoryOperand());
+ } else {
+ size_t index = 0;
+ Operand operand = i.MemoryOperand(&index);
+ __ movdqu(operand, i.InputSimd128Register(index));
+ }
+ break;
+ }
case kX64BitcastFI:
if (instr->InputAt(0)->IsFPStackSlot()) {
__ movl(i.OutputRegister(), i.InputOperand(0));
@@ -2187,6 +2200,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ paddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
}
+ case kX64I32x4AddHoriz: {
+ CpuFeatureScope sse_scope(masm(), SSSE3);
+ __ phaddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
case kX64I32x4Sub: {
__ psubd(i.OutputSimd128Register(), i.InputSimd128Register(1));
break;
@@ -2235,7 +2253,205 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xorps(dst, dst);
break;
}
- case kX64S32x4Select: {
+ case kX64I16x8Splat: {
+ XMMRegister dst = i.OutputSimd128Register();
+ __ movd(dst, i.InputRegister(0));
+ __ pshuflw(dst, dst, 0x0);
+ __ pshufhw(dst, dst, 0x0);
+ __ pshufd(dst, dst, 0x0);
+ break;
+ }
+ case kX64I16x8ExtractLane: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ Register dst = i.OutputRegister();
+ __ pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
+ __ movsxwl(dst, dst);
+ break;
+ }
+ case kX64I16x8ReplaceLane: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ if (instr->InputAt(2)->IsRegister()) {
+ __ pinsrw(i.OutputSimd128Register(), i.InputRegister(2),
+ i.InputInt8(1));
+ } else {
+ __ pinsrw(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ }
+ break;
+ }
+ case kX64I16x8Shl: {
+ __ psllw(i.OutputSimd128Register(), i.InputInt8(1));
+ break;
+ }
+ case kX64I16x8ShrS: {
+ __ psraw(i.OutputSimd128Register(), i.InputInt8(1));
+ break;
+ }
+ case kX64I16x8Add: {
+ __ paddw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8AddSaturateS: {
+ __ paddsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8AddHoriz: {
+ CpuFeatureScope sse_scope(masm(), SSSE3);
+ __ phaddw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8Sub: {
+ __ psubw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8SubSaturateS: {
+ __ psubsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8Mul: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pmullw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8MinS: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pminsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8MaxS: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8Eq: {
+ __ pcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8Ne: {
+ __ pcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+ break;
+ }
+ case kX64I16x8ShrU: {
+ __ psrlw(i.OutputSimd128Register(), i.InputInt8(1));
+ break;
+ }
+ case kX64I16x8AddSaturateU: {
+ __ paddusw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8SubSaturateU: {
+ __ psubusw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8MinU: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pminuw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I16x8MaxU: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16Splat: {
+ CpuFeatureScope sse_scope(masm(), SSSE3);
+ XMMRegister dst = i.OutputSimd128Register();
+ __ movd(dst, i.InputRegister(0));
+ __ xorps(kScratchDoubleReg, kScratchDoubleReg);
+ __ pshufb(dst, kScratchDoubleReg);
+ break;
+ }
+ case kX64I8x16ExtractLane: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ Register dst = i.OutputRegister();
+ __ pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
+ __ movsxbl(dst, dst);
+ break;
+ }
+ case kX64I8x16ReplaceLane: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ if (instr->InputAt(2)->IsRegister()) {
+ __ pinsrb(i.OutputSimd128Register(), i.InputRegister(2),
+ i.InputInt8(1));
+ } else {
+ __ pinsrb(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+ }
+ break;
+ }
+ case kX64I8x16Add: {
+ __ paddb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16AddSaturateS: {
+ __ paddsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16Sub: {
+ __ psubb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16SubSaturateS: {
+ __ psubsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16MinS: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pminsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16MaxS: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pmaxsb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16Eq: {
+ __ pcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16Ne: {
+ __ pcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ pcmpeqb(kScratchDoubleReg, kScratchDoubleReg);
+ __ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
+ break;
+ }
+ case kX64I8x16AddSaturateU: {
+ __ paddusb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16SubSaturateU: {
+ __ psubusb(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16MinU: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pminub(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64I8x16MaxU: {
+ CpuFeatureScope sse_scope(masm(), SSE4_1);
+ __ pmaxub(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64S128And: {
+ __ pand(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64S128Or: {
+ __ por(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64S128Xor: {
+ __ pxor(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ break;
+ }
+ case kX64S128Not: {
+ XMMRegister dst = i.OutputSimd128Register();
+ __ pcmpeqd(dst, dst);
+ __ pxor(dst, i.InputSimd128Register(1));
+ break;
+ }
+ case kX64S128Select: {
// Mask used here is stored in dst.
XMMRegister dst = i.OutputSimd128Register();
__ movaps(kScratchDoubleReg, i.InputSimd128Register(1));
diff --git a/deps/v8/src/compiler/x64/instruction-codes-x64.h b/deps/v8/src/compiler/x64/instruction-codes-x64.h
index 0133f80d4b..959a7d2d03 100644
--- a/deps/v8/src/compiler/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/x64/instruction-codes-x64.h
@@ -132,6 +132,7 @@ namespace compiler {
V(X64Movq) \
V(X64Movsd) \
V(X64Movss) \
+ V(X64Movdqu) \
V(X64BitcastFI) \
V(X64BitcastDL) \
V(X64BitcastIF) \
@@ -149,6 +150,7 @@ namespace compiler {
V(X64I32x4Shl) \
V(X64I32x4ShrS) \
V(X64I32x4Add) \
+ V(X64I32x4AddHoriz) \
V(X64I32x4Sub) \
V(X64I32x4Mul) \
V(X64I32x4MinS) \
@@ -158,7 +160,46 @@ namespace compiler {
V(X64I32x4ShrU) \
V(X64I32x4MinU) \
V(X64I32x4MaxU) \
- V(X64S32x4Select) \
+ V(X64I16x8Splat) \
+ V(X64I16x8ExtractLane) \
+ V(X64I16x8ReplaceLane) \
+ V(X64I16x8Shl) \
+ V(X64I16x8ShrS) \
+ V(X64I16x8Add) \
+ V(X64I16x8AddSaturateS) \
+ V(X64I16x8AddHoriz) \
+ V(X64I16x8Sub) \
+ V(X64I16x8SubSaturateS) \
+ V(X64I16x8Mul) \
+ V(X64I16x8MinS) \
+ V(X64I16x8MaxS) \
+ V(X64I16x8Eq) \
+ V(X64I16x8Ne) \
+ V(X64I16x8ShrU) \
+ V(X64I16x8AddSaturateU) \
+ V(X64I16x8SubSaturateU) \
+ V(X64I16x8MinU) \
+ V(X64I16x8MaxU) \
+ V(X64I8x16Splat) \
+ V(X64I8x16ExtractLane) \
+ V(X64I8x16ReplaceLane) \
+ V(X64I8x16Add) \
+ V(X64I8x16AddSaturateS) \
+ V(X64I8x16Sub) \
+ V(X64I8x16SubSaturateS) \
+ V(X64I8x16MinS) \
+ V(X64I8x16MaxS) \
+ V(X64I8x16Eq) \
+ V(X64I8x16Ne) \
+ V(X64I8x16AddSaturateU) \
+ V(X64I8x16SubSaturateU) \
+ V(X64I8x16MinU) \
+ V(X64I8x16MaxU) \
+ V(X64S128And) \
+ V(X64S128Or) \
+ V(X64S128Xor) \
+ V(X64S128Not) \
+ V(X64S128Select) \
V(X64S128Zero)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
index b66d853aba..0f4c37f033 100644
--- a/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-scheduler-x64.cc
@@ -129,6 +129,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4Shl:
case kX64I32x4ShrS:
case kX64I32x4Add:
+ case kX64I32x4AddHoriz:
case kX64I32x4Sub:
case kX64I32x4Mul:
case kX64I32x4MinS:
@@ -138,8 +139,47 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64I32x4ShrU:
case kX64I32x4MinU:
case kX64I32x4MaxU:
+ case kX64I16x8Splat:
+ case kX64I16x8ExtractLane:
+ case kX64I16x8ReplaceLane:
+ case kX64I16x8Shl:
+ case kX64I16x8ShrS:
+ case kX64I16x8Add:
+ case kX64I16x8AddSaturateS:
+ case kX64I16x8AddHoriz:
+ case kX64I16x8Sub:
+ case kX64I16x8SubSaturateS:
+ case kX64I16x8Mul:
+ case kX64I16x8MinS:
+ case kX64I16x8MaxS:
+ case kX64I16x8Eq:
+ case kX64I16x8Ne:
+ case kX64I16x8ShrU:
+ case kX64I16x8AddSaturateU:
+ case kX64I16x8SubSaturateU:
+ case kX64I16x8MinU:
+ case kX64I16x8MaxU:
+ case kX64I8x16Splat:
+ case kX64I8x16ExtractLane:
+ case kX64I8x16ReplaceLane:
+ case kX64I8x16Add:
+ case kX64I8x16AddSaturateS:
+ case kX64I8x16Sub:
+ case kX64I8x16SubSaturateS:
+ case kX64I8x16MinS:
+ case kX64I8x16MaxS:
+ case kX64I8x16Eq:
+ case kX64I8x16Ne:
+ case kX64I8x16AddSaturateU:
+ case kX64I8x16SubSaturateU:
+ case kX64I8x16MinU:
+ case kX64I8x16MaxU:
+ case kX64S128And:
+ case kX64S128Or:
+ case kX64S128Xor:
+ case kX64S128Not:
+ case kX64S128Select:
case kX64S128Zero:
- case kX64S32x4Select:
return (instr->addressing_mode() == kMode_None)
? kNoOpcodeFlags
: kIsLoadOperation | kHasSideEffect;
@@ -181,6 +221,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64Movq:
case kX64Movsd:
case kX64Movss:
+ case kX64Movdqu:
return instr->HasOutput() ? kIsLoadOperation : kHasSideEffect;
case kX64StackCheck:
diff --git a/deps/v8/src/compiler/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
index 89dc956318..3f4e2b3b1c 100644
--- a/deps/v8/src/compiler/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/x64/instruction-selector-x64.cc
@@ -26,7 +26,8 @@ class X64OperandGenerator final : public OperandGenerator {
return true;
case IrOpcode::kInt64Constant: {
const int64_t value = OpParameter<int64_t>(node);
- return value == static_cast<int64_t>(static_cast<int32_t>(value));
+ return std::numeric_limits<int32_t>::min() < value &&
+ value <= std::numeric_limits<int32_t>::max();
}
case IrOpcode::kNumberConstant: {
const double value = OpParameter<double>(node);
@@ -230,6 +231,8 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
opcode = kX64Movq;
break;
case MachineRepresentation::kSimd128: // Fall through.
+ opcode = kX64Movdqu;
+ break;
case MachineRepresentation::kSimd1x4: // Fall through.
case MachineRepresentation::kSimd1x8: // Fall through.
case MachineRepresentation::kSimd1x16: // Fall through.
@@ -265,6 +268,8 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
return kX64Movq;
break;
case MachineRepresentation::kSimd128: // Fall through.
+ return kX64Movdqu;
+ break;
case MachineRepresentation::kSimd1x4: // Fall through.
case MachineRepresentation::kSimd1x8: // Fall through.
case MachineRepresentation::kSimd1x16: // Fall through.
@@ -278,6 +283,15 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
} // namespace
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int slot = frame_->AllocateSpillSlot(rep.size());
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
+
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
X64OperandGenerator g(this);
@@ -2438,7 +2452,15 @@ VISIT_ATOMIC_BINOP(Or)
VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
-#define SIMD_TYPES(V) V(I32x4)
+#define SIMD_TYPES(V) \
+ V(I32x4) \
+ V(I16x8) \
+ V(I8x16)
+
+#define SIMD_FORMAT_LIST(V) \
+ V(32x4) \
+ V(16x8) \
+ V(8x16)
#define SIMD_ZERO_OP_LIST(V) \
V(S128Zero) \
@@ -2446,13 +2468,9 @@ VISIT_ATOMIC_BINOP(Xor)
V(S1x8Zero) \
V(S1x16Zero)
-#define SIMD_SHIFT_OPCODES(V) \
- V(I32x4Shl) \
- V(I32x4ShrS) \
- V(I32x4ShrU)
-
#define SIMD_BINOP_LIST(V) \
V(I32x4Add) \
+ V(I32x4AddHoriz) \
V(I32x4Sub) \
V(I32x4Mul) \
V(I32x4MinS) \
@@ -2460,7 +2478,46 @@ VISIT_ATOMIC_BINOP(Xor)
V(I32x4Eq) \
V(I32x4Ne) \
V(I32x4MinU) \
- V(I32x4MaxU)
+ V(I32x4MaxU) \
+ V(I16x8Add) \
+ V(I16x8AddSaturateS) \
+ V(I16x8AddHoriz) \
+ V(I16x8Sub) \
+ V(I16x8SubSaturateS) \
+ V(I16x8Mul) \
+ V(I16x8MinS) \
+ V(I16x8MaxS) \
+ V(I16x8Eq) \
+ V(I16x8Ne) \
+ V(I16x8AddSaturateU) \
+ V(I16x8SubSaturateU) \
+ V(I16x8MinU) \
+ V(I16x8MaxU) \
+ V(I8x16Add) \
+ V(I8x16AddSaturateS) \
+ V(I8x16Sub) \
+ V(I8x16SubSaturateS) \
+ V(I8x16MinS) \
+ V(I8x16MaxS) \
+ V(I8x16Eq) \
+ V(I8x16Ne) \
+ V(I8x16AddSaturateU) \
+ V(I8x16SubSaturateU) \
+ V(I8x16MinU) \
+ V(I8x16MaxU) \
+ V(S128And) \
+ V(S128Or) \
+ V(S128Xor)
+
+#define SIMD_UNOP_LIST(V) V(S128Not)
+
+#define SIMD_SHIFT_OPCODES(V) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU)
#define VISIT_SIMD_SPLAT(Type) \
void InstructionSelector::Visit##Type##Splat(Node* node) { \
@@ -2510,6 +2567,15 @@ SIMD_ZERO_OP_LIST(SIMD_VISIT_ZERO_OP)
SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
+#define VISIT_SIMD_UNOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ Emit(kX64##Opcode, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0))); \
+ }
+SIMD_UNOP_LIST(VISIT_SIMD_UNOP)
+#undef VISIT_SIMD_UNOP
+
#define VISIT_SIMD_BINOP(Opcode) \
void InstructionSelector::Visit##Opcode(Node* node) { \
X64OperandGenerator g(this); \
@@ -2519,12 +2585,15 @@ SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
SIMD_BINOP_LIST(VISIT_SIMD_BINOP)
#undef VISIT_SIMD_BINOP
-void InstructionSelector::VisitS32x4Select(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64S32x4Select, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)),
- g.UseRegister(node->InputAt(2)));
-}
+#define SIMD_VISIT_SELECT_OP(format) \
+ void InstructionSelector::VisitS##format##Select(Node* node) { \
+ X64OperandGenerator g(this); \
+ Emit(kX64S128Select, g.DefineSameAsFirst(node), \
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
+ g.UseRegister(node->InputAt(2))); \
+ }
+SIMD_FORMAT_LIST(SIMD_VISIT_SELECT_OP)
+#undef SIMD_VISIT_SELECT_OP
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
diff --git a/deps/v8/src/compiler/x87/instruction-selector-x87.cc b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
index c11ac287d0..b5594b8894 100644
--- a/deps/v8/src/compiler/x87/instruction-selector-x87.cc
+++ b/deps/v8/src/compiler/x87/instruction-selector-x87.cc
@@ -168,6 +168,14 @@ class X87OperandGenerator final : public OperandGenerator {
}
};
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int slot = frame_->AllocateSpillSlot(rep.size());
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
diff --git a/deps/v8/src/compiler/zone-stats.cc b/deps/v8/src/compiler/zone-stats.cc
index 8942df5555..626ad4072c 100644
--- a/deps/v8/src/compiler/zone-stats.cc
+++ b/deps/v8/src/compiler/zone-stats.cc
@@ -68,11 +68,11 @@ ZoneStats::~ZoneStats() {
DCHECK(stats_.empty());
}
-size_t ZoneStats::GetMaxAllocatedBytes() {
+size_t ZoneStats::GetMaxAllocatedBytes() const {
return std::max(max_allocated_bytes_, GetCurrentAllocatedBytes());
}
-size_t ZoneStats::GetCurrentAllocatedBytes() {
+size_t ZoneStats::GetCurrentAllocatedBytes() const {
size_t total = 0;
for (Zone* zone : zones_) {
total += static_cast<size_t>(zone->allocation_size());
@@ -80,7 +80,7 @@ size_t ZoneStats::GetCurrentAllocatedBytes() {
return total;
}
-size_t ZoneStats::GetTotalAllocatedBytes() {
+size_t ZoneStats::GetTotalAllocatedBytes() const {
return total_deleted_bytes_ + GetCurrentAllocatedBytes();
}
diff --git a/deps/v8/src/compiler/zone-stats.h b/deps/v8/src/compiler/zone-stats.h
index 39adca3693..6e0cd5fe4e 100644
--- a/deps/v8/src/compiler/zone-stats.h
+++ b/deps/v8/src/compiler/zone-stats.h
@@ -66,9 +66,9 @@ class V8_EXPORT_PRIVATE ZoneStats final {
explicit ZoneStats(AccountingAllocator* allocator);
~ZoneStats();
- size_t GetMaxAllocatedBytes();
- size_t GetTotalAllocatedBytes();
- size_t GetCurrentAllocatedBytes();
+ size_t GetMaxAllocatedBytes() const;
+ size_t GetTotalAllocatedBytes() const;
+ size_t GetCurrentAllocatedBytes() const;
private:
Zone* NewEmptyZone(const char* zone_name);
diff --git a/deps/v8/src/contexts-inl.h b/deps/v8/src/contexts-inl.h
index 3c7ebe3a5b..8eb2750e1a 100644
--- a/deps/v8/src/contexts-inl.h
+++ b/deps/v8/src/contexts-inl.h
@@ -6,8 +6,10 @@
#define V8_CONTEXTS_INL_H_
#include "src/contexts.h"
+#include "src/heap/heap.h"
#include "src/objects-inl.h"
#include "src/objects/dictionary.h"
+#include "src/objects/map-inl.h"
#include "src/objects/regexp-match-info.h"
namespace v8 {
@@ -130,7 +132,7 @@ bool Context::IsScriptContext() {
return map == map->GetHeap()->script_context_map();
}
-bool Context::OptimizedCodeMapIsCleared() {
+bool Context::OSROptimizedCodeCacheIsCleared() {
return osr_code_table() == GetHeap()->empty_fixed_array();
}
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index e622807b81..a4795af0f2 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -416,18 +416,19 @@ static const int kOsrAstIdOffset = 2;
static const int kEntryLength = 3;
static const int kInitialLength = kEntryLength;
-int Context::SearchOptimizedCodeMapEntry(SharedFunctionInfo* shared,
- BailoutId osr_ast_id) {
+int Context::SearchOSROptimizedCodeCacheEntry(SharedFunctionInfo* shared,
+ BailoutId osr_ast_id) {
DisallowHeapAllocation no_gc;
DCHECK(this->IsNativeContext());
- if (!OptimizedCodeMapIsCleared()) {
- FixedArray* optimized_code_map = this->osr_code_table();
- int length = optimized_code_map->length();
+ DCHECK(!osr_ast_id.IsNone());
+ if (!OSROptimizedCodeCacheIsCleared()) {
+ FixedArray* osr_code_table = this->osr_code_table();
+ int length = osr_code_table->length();
Smi* osr_ast_id_smi = Smi::FromInt(osr_ast_id.ToInt());
for (int i = 0; i < length; i += kEntryLength) {
- if (WeakCell::cast(optimized_code_map->get(i + kSharedOffset))->value() ==
+ if (WeakCell::cast(osr_code_table->get(i + kSharedOffset))->value() ==
shared &&
- optimized_code_map->get(i + kOsrAstIdOffset) == osr_ast_id_smi) {
+ osr_code_table->get(i + kOsrAstIdOffset) == osr_ast_id_smi) {
return i;
}
}
@@ -435,10 +436,10 @@ int Context::SearchOptimizedCodeMapEntry(SharedFunctionInfo* shared,
return -1;
}
-Code* Context::SearchOptimizedCodeMap(SharedFunctionInfo* shared,
- BailoutId osr_ast_id) {
+Code* Context::SearchOSROptimizedCodeCache(SharedFunctionInfo* shared,
+ BailoutId osr_ast_id) {
DCHECK(this->IsNativeContext());
- int entry = SearchOptimizedCodeMapEntry(shared, osr_ast_id);
+ int entry = SearchOSROptimizedCodeCacheEntry(shared, osr_ast_id);
if (entry != -1) {
FixedArray* code_map = osr_code_table();
DCHECK_LE(entry + kEntryLength, code_map->length());
@@ -448,11 +449,13 @@ Code* Context::SearchOptimizedCodeMap(SharedFunctionInfo* shared,
return nullptr;
}
-void Context::AddToOptimizedCodeMap(Handle<Context> native_context,
- Handle<SharedFunctionInfo> shared,
- Handle<Code> code,
- BailoutId osr_ast_id) {
+void Context::AddToOSROptimizedCodeCache(Handle<Context> native_context,
+ Handle<SharedFunctionInfo> shared,
+ Handle<Code> code,
+ BailoutId osr_ast_id) {
DCHECK(native_context->IsNativeContext());
+ DCHECK(!osr_ast_id.IsNone());
+ DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
Isolate* isolate = native_context->GetIsolate();
if (isolate->serializer_enabled()) return;
@@ -460,12 +463,13 @@ void Context::AddToOptimizedCodeMap(Handle<Context> native_context,
Handle<FixedArray> new_code_map;
int entry;
- if (native_context->OptimizedCodeMapIsCleared()) {
+ if (native_context->OSROptimizedCodeCacheIsCleared()) {
new_code_map = isolate->factory()->NewFixedArray(kInitialLength, TENURED);
entry = 0;
} else {
Handle<FixedArray> old_code_map(native_context->osr_code_table(), isolate);
- entry = native_context->SearchOptimizedCodeMapEntry(*shared, osr_ast_id);
+ entry =
+ native_context->SearchOSROptimizedCodeCacheEntry(*shared, osr_ast_id);
if (entry >= 0) {
// Just set the code of the entry.
Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
@@ -517,11 +521,11 @@ void Context::AddToOptimizedCodeMap(Handle<Context> native_context,
}
}
-void Context::EvictFromOptimizedCodeMap(Code* optimized_code,
- const char* reason) {
+void Context::EvictFromOSROptimizedCodeCache(Code* optimized_code,
+ const char* reason) {
DCHECK(IsNativeContext());
DisallowHeapAllocation no_gc;
- if (OptimizedCodeMapIsCleared()) return;
+ if (OSROptimizedCodeCacheIsCleared()) return;
Heap* heap = GetHeap();
FixedArray* code_map = osr_code_table();
@@ -556,12 +560,12 @@ void Context::EvictFromOptimizedCodeMap(Code* optimized_code,
// Always trim even when array is cleared because of heap verifier.
heap->RightTrimFixedArray(code_map, length - dst);
if (code_map->length() == 0) {
- ClearOptimizedCodeMap();
+ ClearOSROptimizedCodeCache();
}
}
}
-void Context::ClearOptimizedCodeMap() {
+void Context::ClearOSROptimizedCodeCache() {
DCHECK(IsNativeContext());
FixedArray* empty_fixed_array = GetHeap()->empty_fixed_array();
set_osr_code_table(empty_fixed_array);
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index ee13995aaa..8377139edd 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -5,7 +5,6 @@
#ifndef V8_CONTEXTS_H_
#define V8_CONTEXTS_H_
-#include "src/heap/heap.h"
#include "src/objects.h"
namespace v8 {
@@ -35,64 +34,58 @@ enum ContextLookupFlags {
// must always be allocated via Heap::AllocateContext() or
// Factory::NewContext.
-#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
- V(ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX, JSFunction, \
- async_function_await_caught) \
- V(ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX, JSFunction, \
- async_function_await_uncaught) \
- V(ASYNC_FUNCTION_PROMISE_CREATE_INDEX, JSFunction, \
- async_function_promise_create) \
- V(ASYNC_FUNCTION_PROMISE_RELEASE_INDEX, JSFunction, \
- async_function_promise_release) \
- V(IS_ARRAYLIKE, JSFunction, is_arraylike) \
- V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \
- V(GET_TEMPLATE_CALL_SITE_INDEX, JSFunction, get_template_call_site) \
- V(MAKE_ERROR_INDEX, JSFunction, make_error) \
- V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
- V(MAKE_SYNTAX_ERROR_INDEX, JSFunction, make_syntax_error) \
- V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
- V(MAKE_URI_ERROR_INDEX, JSFunction, make_uri_error) \
- V(OBJECT_CREATE, JSFunction, object_create) \
- V(OBJECT_DEFINE_PROPERTIES, JSFunction, object_define_properties) \
- V(OBJECT_DEFINE_PROPERTY, JSFunction, object_define_property) \
- V(OBJECT_FREEZE, JSFunction, object_freeze) \
- V(OBJECT_GET_PROTOTYPE_OF, JSFunction, object_get_prototype_of) \
- V(OBJECT_IS_EXTENSIBLE, JSFunction, object_is_extensible) \
- V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen) \
- V(OBJECT_IS_SEALED, JSFunction, object_is_sealed) \
- V(OBJECT_KEYS, JSFunction, object_keys) \
- V(REGEXP_INTERNAL_MATCH, JSFunction, regexp_internal_match) \
- V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
- V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
- V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
- V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
- V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments) \
- V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable) \
- V(TYPED_ARRAY_CONSTRUCT_BY_ARRAY_BUFFER_INDEX, JSFunction, \
- typed_array_construct_by_array_buffer) \
- V(TYPED_ARRAY_CONSTRUCT_BY_ARRAY_LIKE_INDEX, JSFunction, \
- typed_array_construct_by_array_like) \
- V(TYPED_ARRAY_CONSTRUCT_BY_LENGTH_INDEX, JSFunction, \
- typed_array_construct_by_length) \
- V(TYPED_ARRAY_INITIALIZE_INDEX, JSFunction, typed_array_initialize) \
- V(TYPED_ARRAY_SET_FROM_ARRAY_LIKE, JSFunction, \
- typed_array_set_from_array_like) \
- V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
- V(MATH_POW_INDEX, JSFunction, math_pow) \
- V(NEW_PROMISE_CAPABILITY_INDEX, JSFunction, new_promise_capability) \
- V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \
- promise_internal_constructor) \
- V(PROMISE_INTERNAL_REJECT_INDEX, JSFunction, promise_internal_reject) \
- V(IS_PROMISE_INDEX, JSFunction, is_promise) \
- V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
- V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
- V(PROMISE_HANDLE_INDEX, JSFunction, promise_handle) \
- V(PROMISE_HANDLE_REJECT_INDEX, JSFunction, promise_handle_reject) \
- V(ASYNC_GENERATOR_AWAIT_CAUGHT, JSFunction, async_generator_await_caught) \
- V(ASYNC_GENERATOR_AWAIT_UNCAUGHT, JSFunction, \
- async_generator_await_uncaught) \
- V(ASYNC_GENERATOR_YIELD, JSFunction, async_generator_yield) \
- V(ASYNC_GENERATOR_RAW_YIELD, JSFunction, async_generator_raw_yield)
+#define NATIVE_CONTEXT_INTRINSIC_FUNCTIONS(V) \
+ V(ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX, JSFunction, \
+ async_function_await_caught) \
+ V(ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX, JSFunction, \
+ async_function_await_uncaught) \
+ V(ASYNC_FUNCTION_PROMISE_CREATE_INDEX, JSFunction, \
+ async_function_promise_create) \
+ V(ASYNC_FUNCTION_PROMISE_RELEASE_INDEX, JSFunction, \
+ async_function_promise_release) \
+ V(IS_ARRAYLIKE, JSFunction, is_arraylike) \
+ V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \
+ V(GET_TEMPLATE_CALL_SITE_INDEX, JSFunction, get_template_call_site) \
+ V(MAKE_ERROR_INDEX, JSFunction, make_error) \
+ V(MAKE_RANGE_ERROR_INDEX, JSFunction, make_range_error) \
+ V(MAKE_SYNTAX_ERROR_INDEX, JSFunction, make_syntax_error) \
+ V(MAKE_TYPE_ERROR_INDEX, JSFunction, make_type_error) \
+ V(MAKE_URI_ERROR_INDEX, JSFunction, make_uri_error) \
+ V(OBJECT_CREATE, JSFunction, object_create) \
+ V(OBJECT_DEFINE_PROPERTIES, JSFunction, object_define_properties) \
+ V(OBJECT_DEFINE_PROPERTY, JSFunction, object_define_property) \
+ V(OBJECT_FREEZE, JSFunction, object_freeze) \
+ V(OBJECT_GET_PROTOTYPE_OF, JSFunction, object_get_prototype_of) \
+ V(OBJECT_IS_EXTENSIBLE, JSFunction, object_is_extensible) \
+ V(OBJECT_IS_FROZEN, JSFunction, object_is_frozen) \
+ V(OBJECT_IS_SEALED, JSFunction, object_is_sealed) \
+ V(OBJECT_KEYS, JSFunction, object_keys) \
+ V(REGEXP_INTERNAL_MATCH, JSFunction, regexp_internal_match) \
+ V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
+ V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
+ V(REFLECT_DEFINE_PROPERTY_INDEX, JSFunction, reflect_define_property) \
+ V(REFLECT_DELETE_PROPERTY_INDEX, JSFunction, reflect_delete_property) \
+ V(SPREAD_ARGUMENTS_INDEX, JSFunction, spread_arguments) \
+ V(SPREAD_ITERABLE_INDEX, JSFunction, spread_iterable) \
+ V(TYPED_ARRAY_CONSTRUCT_BY_ARRAY_BUFFER_INDEX, JSFunction, \
+ typed_array_construct_by_array_buffer) \
+ V(TYPED_ARRAY_CONSTRUCT_BY_ARRAY_LIKE_INDEX, JSFunction, \
+ typed_array_construct_by_array_like) \
+ V(TYPED_ARRAY_CONSTRUCT_BY_LENGTH_INDEX, JSFunction, \
+ typed_array_construct_by_length) \
+ V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
+ V(MATH_POW_INDEX, JSFunction, math_pow) \
+ V(NEW_PROMISE_CAPABILITY_INDEX, JSFunction, new_promise_capability) \
+ V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \
+ promise_internal_constructor) \
+ V(PROMISE_INTERNAL_REJECT_INDEX, JSFunction, promise_internal_reject) \
+ V(IS_PROMISE_INDEX, JSFunction, is_promise) \
+ V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
+ V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
+ V(PROMISE_HANDLE_INDEX, JSFunction, promise_handle) \
+ V(PROMISE_HANDLE_REJECT_INDEX, JSFunction, promise_handle_reject) \
+ V(ASYNC_GENERATOR_AWAIT_CAUGHT, JSFunction, async_generator_await_caught) \
+ V(ASYNC_GENERATOR_AWAIT_UNCAUGHT, JSFunction, async_generator_await_uncaught)
#define NATIVE_CONTEXT_IMPORTED_FIELDS(V) \
V(ARRAY_CONCAT_INDEX, JSFunction, array_concat) \
@@ -240,6 +233,7 @@ enum ContextLookupFlags {
V(DATA_PROPERTY_DESCRIPTOR_MAP_INDEX, Map, data_property_descriptor_map) \
V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
+ V(DEBUG_CONTEXT_ID_INDEX, Object, debug_context_id) \
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \
V(ERRORS_THROWN_INDEX, Smi, errors_thrown) \
@@ -349,6 +343,8 @@ enum ContextLookupFlags {
V(SLOW_ALIASED_ARGUMENTS_MAP_INDEX, Map, slow_aliased_arguments_map) \
V(SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP, Map, \
slow_object_with_null_prototype_map) \
+ V(SLOW_OBJECT_WITH_OBJECT_PROTOTYPE_MAP, Map, \
+ slow_object_with_object_prototype_map) \
V(SLOW_TEMPLATE_INSTANTIATIONS_CACHE_INDEX, UnseededNumberDictionary, \
slow_template_instantiations_cache) \
V(STRICT_ARGUMENTS_MAP_INDEX, Map, strict_arguments_map) \
@@ -597,22 +593,22 @@ class Context: public FixedArray {
// Removes a specific optimized code object from the optimized code map.
// In case of non-OSR the code reference is cleared from the cache entry but
// the entry itself is left in the map in order to proceed sharing literals.
- void EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason);
+ void EvictFromOSROptimizedCodeCache(Code* optimized_code, const char* reason);
// Clear optimized code map.
- void ClearOptimizedCodeMap();
+ void ClearOSROptimizedCodeCache();
// A native context keeps track of all osrd optimized functions.
- inline bool OptimizedCodeMapIsCleared();
- Code* SearchOptimizedCodeMap(SharedFunctionInfo* shared,
- BailoutId osr_ast_id);
- int SearchOptimizedCodeMapEntry(SharedFunctionInfo* shared,
- BailoutId osr_ast_id);
-
- static void AddToOptimizedCodeMap(Handle<Context> native_context,
- Handle<SharedFunctionInfo> shared,
- Handle<Code> code,
+ inline bool OSROptimizedCodeCacheIsCleared();
+ Code* SearchOSROptimizedCodeCache(SharedFunctionInfo* shared,
BailoutId osr_ast_id);
+ int SearchOSROptimizedCodeCacheEntry(SharedFunctionInfo* shared,
+ BailoutId osr_ast_id);
+
+ static void AddToOSROptimizedCodeCache(Handle<Context> native_context,
+ Handle<SharedFunctionInfo> shared,
+ Handle<Code> code,
+ BailoutId osr_ast_id);
// A native context holds a list of all functions with optimized code.
void AddOptimizedFunction(JSFunction* function);
diff --git a/deps/v8/src/counters.h b/deps/v8/src/counters.h
index 44fb9ae0d4..bb917cc518 100644
--- a/deps/v8/src/counters.h
+++ b/deps/v8/src/counters.h
@@ -679,6 +679,7 @@ class RuntimeCallTimer final {
V(UnboundScript_GetName) \
V(UnboundScript_GetSourceMappingURL) \
V(UnboundScript_GetSourceURL) \
+ V(Value_InstanceOf) \
V(Value_TypeOf) \
V(ValueDeserializer_ReadHeader) \
V(ValueDeserializer_ReadValue) \
@@ -715,6 +716,9 @@ class RuntimeCallTimer final {
V(FunctionCallback) \
V(GC) \
V(GC_AllAvailableGarbage) \
+ V(GC_IncrementalMarkingJob) \
+ V(GC_IncrementalMarkingObserver) \
+ V(GC_SlowAllocateRaw) \
V(GCEpilogueCallback) \
V(GCPrologueCallback) \
V(GenericNamedPropertyDefinerCallback) \
@@ -935,7 +939,8 @@ class RuntimeCallTimerScope {
51) \
HR(wasm_functions_per_wasm_module, V8.WasmFunctionsPerModule.wasm, 1, \
100000, 51) \
- HR(array_buffer_big_allocations, V8.ArrayBufferBigAllocations, 0, 4096, 13) \
+ HR(array_buffer_big_allocations, V8.ArrayBufferLargeAllocations, 0, 4096, \
+ 13) \
HR(array_buffer_new_size_failures, V8.ArrayBufferNewSizeFailures, 0, 4096, 13)
#define HISTOGRAM_TIMER_LIST(HT) \
@@ -1015,24 +1020,25 @@ class RuntimeCallTimerScope {
HM(heap_sample_code_space_committed, V8.MemoryHeapSampleCodeSpaceCommitted) \
HM(heap_sample_maximum_committed, V8.MemoryHeapSampleMaximumCommitted)
-#define HISTOGRAM_MEMORY_LIST(HM) \
- HM(memory_heap_committed, V8.MemoryHeapCommitted) \
- HM(memory_heap_used, V8.MemoryHeapUsed) \
- /* Asm/Wasm */ \
- HM(wasm_decode_asm_module_peak_memory_bytes, \
- V8.WasmDecodeModulePeakMemoryBytes.asm) \
- HM(wasm_decode_wasm_module_peak_memory_bytes, \
- V8.WasmDecodeModulePeakMemoryBytes.wasm) \
- HM(wasm_compile_function_peak_memory_bytes, \
- V8.WasmCompileFunctionPeakMemoryBytes) \
- HM(wasm_asm_min_mem_pages_count, V8.WasmMinMemPagesCount.asm) \
- HM(wasm_wasm_min_mem_pages_count, V8.WasmMinMemPagesCount.wasm) \
- HM(wasm_asm_max_mem_pages_count, V8.WasmMaxMemPagesCount.asm) \
- HM(wasm_wasm_max_mem_pages_count, V8.WasmMaxMemPagesCount.wasm) \
- HM(wasm_asm_function_size_bytes, V8.WasmFunctionSizeBytes.asm) \
- HM(wasm_wasm_function_size_bytes, V8.WasmFunctionSizeBytes.wasm) \
- HM(wasm_asm_module_size_bytes, V8.WasmModuleSizeBytes.asm) \
- HM(wasm_wasm_module_size_bytes, V8.WasmModuleSizeBytes.wasm)
+#define HISTOGRAM_MEMORY_LIST(HM) \
+ HM(memory_heap_committed, V8.MemoryHeapCommitted) \
+ HM(memory_heap_used, V8.MemoryHeapUsed) \
+ /* Asm/Wasm */ \
+ HM(wasm_decode_asm_module_peak_memory_bytes, \
+ V8.WasmDecodeModulePeakMemoryBytes.asm) \
+ HM(wasm_decode_wasm_module_peak_memory_bytes, \
+ V8.WasmDecodeModulePeakMemoryBytes.wasm) \
+ HM(wasm_compile_function_peak_memory_bytes, \
+ V8.WasmCompileFunctionPeakMemoryBytes) \
+ HM(wasm_asm_min_mem_pages_count, V8.WasmMinMemPagesCount.asm) \
+ HM(wasm_wasm_min_mem_pages_count, V8.WasmMinMemPagesCount.wasm) \
+ HM(wasm_wasm_max_mem_pages_count, V8.WasmMaxMemPagesCount.wasm) \
+ HM(wasm_asm_function_size_bytes, V8.WasmFunctionSizeBytes.asm) \
+ HM(wasm_wasm_function_size_bytes, V8.WasmFunctionSizeBytes.wasm) \
+ HM(wasm_asm_module_size_bytes, V8.WasmModuleSizeBytes.asm) \
+ HM(wasm_wasm_module_size_bytes, V8.WasmModuleSizeBytes.wasm) \
+ HM(asm_wasm_translation_peak_memory_bytes, \
+ V8.AsmWasmTranslationPeakMemoryBytes)
// WARNING: STATS_COUNTER_LIST_* is a very large macro that is causing MSVC
// Intellisense to crash. It was broken into two macros (each of length 40
diff --git a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
index 5671afa8c3..8d3924db7d 100644
--- a/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -5293,7 +5293,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
__ ldr(result,
- FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+ FieldMemOperand(result, DescriptorArray::kEnumCacheBridgeOffset));
__ ldr(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmp(result, Operand::Zero());
diff --git a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
index f23b6e31ed..c86971c6ce 100644
--- a/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
+++ b/deps/v8/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -2667,7 +2667,8 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ Bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
- __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+ __ Ldr(result,
+ FieldMemOperand(result, DescriptorArray::kEnumCacheBridgeOffset));
__ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
DeoptimizeIfZero(result, instr, DeoptimizeReason::kNoCache);
diff --git a/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc b/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc
index e1eb11692f..89b2b7aede 100644
--- a/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc
+++ b/deps/v8/src/crankshaft/hydrogen-environment-liveness.cc
@@ -222,7 +222,7 @@ void HEnvironmentLivenessAnalysisPhase::Run() {
#ifdef DEBUG
bool HEnvironmentLivenessAnalysisPhase::VerifyClosures(
Handle<JSFunction> a, Handle<JSFunction> b) {
- Heap::RelocationLock for_heap_access(isolate()->heap());
+ base::LockGuard<base::Mutex> guard(isolate()->heap()->relocation_mutex());
AllowHandleDereference for_verification;
return a.is_identical_to(b);
}
diff --git a/deps/v8/src/crankshaft/hydrogen-gvn.cc b/deps/v8/src/crankshaft/hydrogen-gvn.cc
index e586f4778f..70320052b0 100644
--- a/deps/v8/src/crankshaft/hydrogen-gvn.cc
+++ b/deps/v8/src/crankshaft/hydrogen-gvn.cc
@@ -5,8 +5,6 @@
#include "src/crankshaft/hydrogen-gvn.h"
#include "src/crankshaft/hydrogen.h"
-#include "src/list.h"
-#include "src/list-inl.h"
#include "src/objects-inl.h"
#include "src/v8.h"
@@ -653,23 +651,19 @@ SideEffects
HGlobalValueNumberingPhase::CollectSideEffectsOnPathsToDominatedBlock(
HBasicBlock* dominator, HBasicBlock* dominated) {
SideEffects side_effects;
- List<HBasicBlock*> blocks;
- for (;;) {
- for (int i = 0; i < dominated->predecessors()->length(); ++i) {
- HBasicBlock* block = dominated->predecessors()->at(i);
- if (dominator->block_id() < block->block_id() &&
- block->block_id() < dominated->block_id() &&
- !visited_on_paths_.Contains(block->block_id())) {
- visited_on_paths_.Add(block->block_id());
- side_effects.Add(block_side_effects_[block->block_id()]);
- if (block->IsLoopHeader()) {
- side_effects.Add(loop_side_effects_[block->block_id()]);
- }
- blocks.Add(block);
+ for (int i = 0; i < dominated->predecessors()->length(); ++i) {
+ HBasicBlock* block = dominated->predecessors()->at(i);
+ if (dominator->block_id() < block->block_id() &&
+ block->block_id() < dominated->block_id() &&
+ !visited_on_paths_.Contains(block->block_id())) {
+ visited_on_paths_.Add(block->block_id());
+ side_effects.Add(block_side_effects_[block->block_id()]);
+ if (block->IsLoopHeader()) {
+ side_effects.Add(loop_side_effects_[block->block_id()]);
}
+ side_effects.Add(CollectSideEffectsOnPathsToDominatedBlock(
+ dominator, block));
}
- if (blocks.is_empty()) break;
- dominated = blocks.RemoveLast();
}
return side_effects;
}
diff --git a/deps/v8/src/crankshaft/hydrogen-instructions.h b/deps/v8/src/crankshaft/hydrogen-instructions.h
index 3ff0194710..8874f9aabd 100644
--- a/deps/v8/src/crankshaft/hydrogen-instructions.h
+++ b/deps/v8/src/crankshaft/hydrogen-instructions.h
@@ -5198,11 +5198,6 @@ class HObjectAccess final {
return HObjectAccess(kInobject, SharedFunctionInfo::kCodeOffset);
}
- static HObjectAccess ForOptimizedCodeMap() {
- return HObjectAccess(kInobject,
- SharedFunctionInfo::kOptimizedCodeMapOffset);
- }
-
static HObjectAccess ForFunctionContextPointer() {
return HObjectAccess(kInobject, JSFunction::kContextOffset);
}
diff --git a/deps/v8/src/crankshaft/hydrogen.cc b/deps/v8/src/crankshaft/hydrogen.cc
index e3794e33ff..5a110f4aa8 100644
--- a/deps/v8/src/crankshaft/hydrogen.cc
+++ b/deps/v8/src/crankshaft/hydrogen.cc
@@ -42,6 +42,7 @@
// GetRootConstructor
#include "src/ic/ic-inl.h"
#include "src/isolate-inl.h"
+#include "src/objects/map.h"
#include "src/runtime/runtime.h"
#if V8_TARGET_ARCH_IA32
@@ -117,7 +118,7 @@ class HOptimizedGraphBuilderWithPositions : public HOptimizedGraphBuilder {
};
HCompilationJob::Status HCompilationJob::PrepareJobImpl() {
- if (!isolate()->use_crankshaft() ||
+ if (!isolate()->use_optimizer() ||
info()->shared_info()->must_use_ignition_turbo()) {
// Crankshaft is entirely disabled.
return FAILED;
@@ -738,7 +739,7 @@ class ReachabilityAnalyzer BASE_EMBEDDED {
void HGraph::Verify(bool do_full_verify) const {
- Heap::RelocationLock relocation_lock(isolate()->heap());
+ base::LockGuard<base::Mutex> guard(isolate()->heap()->relocation_mutex());
AllowHandleDereference allow_deref;
AllowDeferredHandleDereference allow_deferred_deref;
for (int i = 0; i < blocks_.length(); i++) {
@@ -1592,172 +1593,6 @@ HValue* HGraphBuilder::BuildCopyElementsOnWrite(HValue* object,
return environment()->Pop();
}
-HValue* HGraphBuilder::BuildElementIndexHash(HValue* index) {
- int32_t seed_value = static_cast<uint32_t>(isolate()->heap()->HashSeed());
- HValue* seed = Add<HConstant>(seed_value);
- HValue* hash = AddUncasted<HBitwise>(Token::BIT_XOR, index, seed);
-
- // hash = ~hash + (hash << 15);
- HValue* shifted_hash = AddUncasted<HShl>(hash, Add<HConstant>(15));
- HValue* not_hash = AddUncasted<HBitwise>(Token::BIT_XOR, hash,
- graph()->GetConstantMinus1());
- hash = AddUncasted<HAdd>(shifted_hash, not_hash);
-
- // hash = hash ^ (hash >> 12);
- shifted_hash = AddUncasted<HShr>(hash, Add<HConstant>(12));
- hash = AddUncasted<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
-
- // hash = hash + (hash << 2);
- shifted_hash = AddUncasted<HShl>(hash, Add<HConstant>(2));
- hash = AddUncasted<HAdd>(hash, shifted_hash);
-
- // hash = hash ^ (hash >> 4);
- shifted_hash = AddUncasted<HShr>(hash, Add<HConstant>(4));
- hash = AddUncasted<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
-
- // hash = hash * 2057;
- hash = AddUncasted<HMul>(hash, Add<HConstant>(2057));
- hash->ClearFlag(HValue::kCanOverflow);
-
- // hash = hash ^ (hash >> 16);
- shifted_hash = AddUncasted<HShr>(hash, Add<HConstant>(16));
- return AddUncasted<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
-}
-
-HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
- HValue* elements,
- HValue* key,
- HValue* hash) {
- HValue* capacity =
- Add<HLoadKeyed>(elements, Add<HConstant>(NameDictionary::kCapacityIndex),
- nullptr, nullptr, FAST_ELEMENTS);
-
- HValue* mask = AddUncasted<HSub>(capacity, graph()->GetConstant1());
- mask->ChangeRepresentation(Representation::Integer32());
- mask->ClearFlag(HValue::kCanOverflow);
-
- HValue* entry = hash;
- HValue* count = graph()->GetConstant1();
- Push(entry);
- Push(count);
-
- HIfContinuation return_or_loop_continuation(graph()->CreateBasicBlock(),
- graph()->CreateBasicBlock());
- HIfContinuation found_key_match_continuation(graph()->CreateBasicBlock(),
- graph()->CreateBasicBlock());
- LoopBuilder probe_loop(this);
- probe_loop.BeginBody(2); // Drop entry, count from last environment to
- // appease live range building without simulates.
-
- count = Pop();
- entry = Pop();
- entry = AddUncasted<HBitwise>(Token::BIT_AND, entry, mask);
- int entry_size = SeededNumberDictionary::kEntrySize;
- HValue* base_index = AddUncasted<HMul>(entry, Add<HConstant>(entry_size));
- base_index->ClearFlag(HValue::kCanOverflow);
- int start_offset = SeededNumberDictionary::kElementsStartIndex;
- HValue* key_index =
- AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset));
- key_index->ClearFlag(HValue::kCanOverflow);
-
- HValue* candidate_key =
- Add<HLoadKeyed>(elements, key_index, nullptr, nullptr, FAST_ELEMENTS);
- IfBuilder if_undefined(this);
- if_undefined.If<HCompareObjectEqAndBranch>(candidate_key,
- graph()->GetConstantUndefined());
- if_undefined.Then();
- {
- // element == undefined means "not found". Call the runtime.
- // TODO(jkummerow): walk the prototype chain instead.
- Add<HPushArguments>(receiver, key);
- Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kKeyedGetProperty),
- 2));
- }
- if_undefined.Else();
- {
- IfBuilder if_match(this);
- if_match.If<HCompareObjectEqAndBranch>(candidate_key, key);
- if_match.Then();
- if_match.Else();
-
- // Update non-internalized string in the dictionary with internalized key?
- IfBuilder if_update_with_internalized(this);
- HValue* smi_check =
- if_update_with_internalized.IfNot<HIsSmiAndBranch>(candidate_key);
- if_update_with_internalized.And();
- HValue* map = AddLoadMap(candidate_key, smi_check);
- HValue* instance_type =
- Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
- HValue* not_internalized_bit = AddUncasted<HBitwise>(
- Token::BIT_AND, instance_type,
- Add<HConstant>(static_cast<int>(kIsNotInternalizedMask)));
- if_update_with_internalized.If<HCompareNumericAndBranch>(
- not_internalized_bit, graph()->GetConstant0(), Token::NE);
- if_update_with_internalized.And();
- if_update_with_internalized.IfNot<HCompareObjectEqAndBranch>(
- candidate_key, graph()->GetConstantHole());
- if_update_with_internalized.AndIf<HStringCompareAndBranch>(candidate_key,
- key, Token::EQ);
- if_update_with_internalized.Then();
- // Replace a key that is a non-internalized string by the equivalent
- // internalized string for faster further lookups.
- Add<HStoreKeyed>(elements, key_index, key, nullptr, FAST_ELEMENTS);
- if_update_with_internalized.Else();
-
- if_update_with_internalized.JoinContinuation(&found_key_match_continuation);
- if_match.JoinContinuation(&found_key_match_continuation);
-
- IfBuilder found_key_match(this, &found_key_match_continuation);
- found_key_match.Then();
- // Key at current probe matches. Relevant bits in the |details| field must
- // be zero, otherwise the dictionary element requires special handling.
- HValue* details_index =
- AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset + 2));
- details_index->ClearFlag(HValue::kCanOverflow);
- HValue* details = Add<HLoadKeyed>(elements, details_index, nullptr, nullptr,
- FAST_ELEMENTS);
- int details_mask = PropertyDetails::KindField::kMask;
- details = AddUncasted<HBitwise>(Token::BIT_AND, details,
- Add<HConstant>(details_mask));
- IfBuilder details_compare(this);
- details_compare.If<HCompareNumericAndBranch>(details, New<HConstant>(kData),
- Token::EQ);
- details_compare.Then();
- HValue* result_index =
- AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset + 1));
- result_index->ClearFlag(HValue::kCanOverflow);
- Push(Add<HLoadKeyed>(elements, result_index, nullptr, nullptr,
- FAST_ELEMENTS));
- details_compare.Else();
- Add<HPushArguments>(receiver, key);
- Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kKeyedGetProperty),
- 2));
- details_compare.End();
-
- found_key_match.Else();
- found_key_match.JoinContinuation(&return_or_loop_continuation);
- }
- if_undefined.JoinContinuation(&return_or_loop_continuation);
-
- IfBuilder return_or_loop(this, &return_or_loop_continuation);
- return_or_loop.Then();
- probe_loop.Break();
-
- return_or_loop.Else();
- entry = AddUncasted<HAdd>(entry, count);
- entry->ClearFlag(HValue::kCanOverflow);
- count = AddUncasted<HAdd>(count, graph()->GetConstant1());
- count->ClearFlag(HValue::kCanOverflow);
- Push(entry);
- Push(count);
-
- probe_loop.EndBody();
-
- return_or_loop.End();
-
- return Pop();
-}
-
HValue* HGraphBuilder::BuildCreateIterResultObject(HValue* value,
HValue* done) {
NoObservableSideEffectsScope scope(this);
@@ -2969,74 +2804,6 @@ HInstruction* HGraphBuilder::BuildGetNativeContext() {
HObjectAccess::ForContextSlot(Context::NATIVE_CONTEXT_INDEX));
}
-
-HInstruction* HGraphBuilder::BuildGetNativeContext(HValue* closure) {
- // Get the global object, then the native context
- HInstruction* context = Add<HLoadNamedField>(
- closure, nullptr, HObjectAccess::ForFunctionContextPointer());
- return Add<HLoadNamedField>(
- context, nullptr,
- HObjectAccess::ForContextSlot(Context::NATIVE_CONTEXT_INDEX));
-}
-
-
-HValue* HGraphBuilder::BuildGetParentContext(HValue* depth, int depth_value) {
- HValue* script_context = context();
- if (depth != NULL) {
- HValue* zero = graph()->GetConstant0();
-
- Push(script_context);
- Push(depth);
-
- LoopBuilder loop(this);
- loop.BeginBody(2); // Drop script_context and depth from last environment
- // to appease live range building without simulates.
- depth = Pop();
- script_context = Pop();
-
- script_context = Add<HLoadNamedField>(
- script_context, nullptr,
- HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
- depth = AddUncasted<HSub>(depth, graph()->GetConstant1());
- depth->ClearFlag(HValue::kCanOverflow);
-
- IfBuilder if_break(this);
- if_break.If<HCompareNumericAndBranch, HValue*>(depth, zero, Token::EQ);
- if_break.Then();
- {
- Push(script_context); // The result.
- loop.Break();
- }
- if_break.Else();
- {
- Push(script_context);
- Push(depth);
- }
- loop.EndBody();
- if_break.End();
-
- script_context = Pop();
- } else if (depth_value > 0) {
- // Unroll the above loop.
- for (int i = 0; i < depth_value; i++) {
- script_context = Add<HLoadNamedField>(
- script_context, nullptr,
- HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
- }
- }
- return script_context;
-}
-
-
-HInstruction* HGraphBuilder::BuildGetArrayFunction() {
- HInstruction* native_context = BuildGetNativeContext();
- HInstruction* index =
- Add<HConstant>(static_cast<int32_t>(Context::ARRAY_FUNCTION_INDEX));
- return Add<HLoadKeyed>(native_context, index, nullptr, nullptr,
- FAST_ELEMENTS);
-}
-
-
HValue* HGraphBuilder::BuildArrayBufferViewFieldAccessor(HValue* object,
HValue* checked_object,
FieldIndex index) {
@@ -3066,12 +2833,6 @@ HValue* HGraphBuilder::BuildArrayBufferViewFieldAccessor(HValue* object,
return Pop();
}
-HValue* HGraphBuilder::AddLoadJSBuiltin(int context_index) {
- HValue* native_context = BuildGetNativeContext();
- HObjectAccess function_access = HObjectAccess::ForContextSlot(context_index);
- return Add<HLoadNamedField>(native_context, nullptr, function_access);
-}
-
HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info,
bool track_positions)
: HGraphBuilder(info, CallInterfaceDescriptor(), track_positions),
@@ -7154,9 +6915,11 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
}
// Elements_kind transition support.
- MapHandleList transition_target(maps->length());
+ MapHandles transition_target;
+ transition_target.reserve(maps->length());
// Collect possible transition targets.
- MapHandleList possible_transitioned_maps(maps->length());
+ MapHandles possible_transitioned_maps;
+ possible_transitioned_maps.reserve(maps->length());
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
// Loads from strings or loads with a mix of string and non-string maps
@@ -7165,7 +6928,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
ElementsKind elements_kind = map->elements_kind();
if (CanInlineElementAccess(map) && IsFastElementsKind(elements_kind) &&
elements_kind != GetInitialFastElementsKind()) {
- possible_transitioned_maps.Add(map);
+ possible_transitioned_maps.push_back(map);
}
if (IsSloppyArgumentsElementsKind(elements_kind)) {
HInstruction* result =
@@ -7177,17 +6940,20 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
// Get transition target for each map (NULL == no transition).
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
+ // Don't generate elements kind transitions from stable maps.
Map* transitioned_map =
- map->FindElementsKindTransitionedMap(&possible_transitioned_maps);
+ map->is_stable()
+ ? nullptr
+ : map->FindElementsKindTransitionedMap(possible_transitioned_maps);
if (transitioned_map != nullptr) {
- DCHECK(!map->is_stable());
- transition_target.Add(handle(transitioned_map));
+ transition_target.push_back(handle(transitioned_map));
} else {
- transition_target.Add(Handle<Map>());
+ transition_target.push_back(Handle<Map>());
}
}
- MapHandleList untransitionable_maps(maps->length());
+ MapHandles untransitionable_maps;
+ untransitionable_maps.reserve(maps->length());
HTransitionElementsKind* transition = NULL;
for (int i = 0; i < maps->length(); ++i) {
Handle<Map> map = maps->at(i);
@@ -7199,14 +6965,14 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
transition = Add<HTransitionElementsKind>(object, map,
transition_target.at(i));
} else {
- untransitionable_maps.Add(map);
+ untransitionable_maps.push_back(map);
}
}
// If only one map is left after transitioning, handle this case
// monomorphically.
- DCHECK(untransitionable_maps.length() >= 1);
- if (untransitionable_maps.length() == 1) {
+ DCHECK(untransitionable_maps.size() >= 1);
+ if (untransitionable_maps.size() == 1) {
Handle<Map> untransitionable_map = untransitionable_maps[0];
HInstruction* instr = NULL;
if (!CanInlineElementAccess(untransitionable_map)) {
@@ -7223,8 +6989,7 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
HBasicBlock* join = graph()->CreateBasicBlock();
- for (int i = 0; i < untransitionable_maps.length(); ++i) {
- Handle<Map> map = untransitionable_maps[i];
+ for (Handle<Map> map : untransitionable_maps) {
ElementsKind elements_kind = map->elements_kind();
HBasicBlock* this_map = graph()->CreateBasicBlock();
HBasicBlock* other_map = graph()->CreateBasicBlock();
@@ -9102,7 +8867,6 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(
}
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
Handle<Object> call_data_obj(api_call_info->data(), isolate());
- bool call_data_undefined = call_data_obj->IsUndefined(isolate());
HValue* call_data = Add<HConstant>(call_data_obj);
ApiFunction fun(v8::ToCData<Address>(api_call_info->callback()));
ExternalReference ref = ExternalReference(&fun,
@@ -9116,7 +8880,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(
HInstruction* call = nullptr;
CHECK(argc <= CallApiCallbackStub::kArgMax);
if (!is_function) {
- CallApiCallbackStub stub(isolate(), is_store, call_data_undefined,
+ CallApiCallbackStub stub(isolate(), is_store,
!optimization.is_constant_call());
Handle<Code> code = stub.GetCode();
HConstant* code_value = Add<HConstant>(code);
@@ -9124,7 +8888,7 @@ bool HOptimizedGraphBuilder::TryInlineApiCall(
code_value, argc + 1, stub.GetCallInterfaceDescriptor(),
Vector<HValue*>(op_vals, arraysize(op_vals)), syntactic_tail_call_mode);
} else {
- CallApiCallbackStub stub(isolate(), argc, call_data_undefined, false);
+ CallApiCallbackStub stub(isolate(), argc, false);
Handle<Code> code = stub.GetCode();
HConstant* code_value = Add<HConstant>(code);
call = New<HCallWithDescriptor>(
@@ -9777,11 +9541,14 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
expr->IsMonomorphic() &&
IsAllocationInlineable(expr->target())) {
Handle<JSFunction> constructor = expr->target();
- DCHECK(
- constructor->shared()->construct_stub() ==
- isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric) ||
- constructor->shared()->construct_stub() ==
- isolate()->builtins()->builtin(Builtins::kJSConstructStubApi));
+ DCHECK(constructor->shared()->construct_stub() ==
+ isolate()->builtins()->builtin(
+ Builtins::kJSConstructStubGenericRestrictedReturn) ||
+ constructor->shared()->construct_stub() ==
+ isolate()->builtins()->builtin(
+ Builtins::kJSConstructStubGenericUnrestrictedReturn) ||
+ constructor->shared()->construct_stub() ==
+ isolate()->builtins()->builtin(Builtins::kJSConstructStubApi));
HValue* check = Add<HCheckValue>(function, constructor);
// Force completion of inobject slack tracking before generating
@@ -9925,6 +9692,23 @@ void HOptimizedGraphBuilder::GenerateArrayBufferViewGetByteOffset(
FieldIndex::ForInObjectOffset(JSArrayBufferView::kByteOffsetOffset)));
}
+void HOptimizedGraphBuilder::GenerateArrayBufferViewWasNeutered(
+ CallRuntime* expr) {
+ NoObservableSideEffectsScope scope(this);
+ DCHECK_EQ(expr->arguments()->length(), 1);
+ CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
+ HValue* view = Pop();
+
+ HInstruction* buffer = Add<HLoadNamedField>(
+ view, nullptr, HObjectAccess::ForJSArrayBufferViewBuffer());
+ HInstruction* flags = Add<HLoadNamedField>(
+ buffer, nullptr, HObjectAccess::ForJSArrayBufferBitField());
+ HValue* was_neutered_mask =
+ Add<HConstant>(1 << JSArrayBuffer::WasNeutered::kShift);
+ HValue* was_neutered =
+ AddUncasted<HBitwise>(Token::BIT_AND, flags, was_neutered_mask);
+ return ast_context()->ReturnValue(was_neutered);
+}
void HOptimizedGraphBuilder::GenerateTypedArrayGetLength(
CallRuntime* expr) {
@@ -10011,12 +9795,11 @@ void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
CHECK_ALIVE(VisitForValue(prop->key()));
HValue* key = Pop();
HValue* obj = Pop();
- Add<HPushArguments>(obj, key);
- HInstruction* instr = New<HCallRuntime>(
- Runtime::FunctionForId(is_strict(function_language_mode())
- ? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy),
- 2);
+ HValue* language_mode = Add<HConstant>(
+ static_cast<int32_t>(function_language_mode()), Representation::Smi());
+ Add<HPushArguments>(obj, key, language_mode);
+ HInstruction* instr =
+ New<HCallRuntime>(Runtime::FunctionForId(Runtime::kDeleteProperty), 3);
return ast_context()->ReturnInstruction(instr, expr->id());
} else if (proxy != NULL) {
Variable* var = proxy->var();
diff --git a/deps/v8/src/crankshaft/hydrogen.h b/deps/v8/src/crankshaft/hydrogen.h
index 9b2e710d57..656bbf0e8e 100644
--- a/deps/v8/src/crankshaft/hydrogen.h
+++ b/deps/v8/src/crankshaft/hydrogen.h
@@ -1379,20 +1379,10 @@ class HGraphBuilder {
ElementsKind kind,
HValue* length);
- void BuildTransitionElementsKind(HValue* object,
- HValue* map,
- ElementsKind from_kind,
- ElementsKind to_kind,
- bool is_jsarray);
-
HValue* BuildNumberToString(HValue* object, AstType* type);
HValue* BuildToNumber(HValue* input);
HValue* BuildToObject(HValue* receiver);
- HValue* BuildUncheckedDictionaryElementLoad(HValue* receiver,
- HValue* elements, HValue* key,
- HValue* hash);
-
// ES6 section 7.4.7 CreateIterResultObject ( value, done )
HValue* BuildCreateIterResultObject(HValue* value, HValue* done);
@@ -1475,8 +1465,6 @@ class HGraphBuilder {
ElementsKind kind,
HValue *dependency = NULL);
- HValue* AddLoadJSBuiltin(int context_index);
-
HValue* EnforceNumberType(HValue* number, AstType* expected);
HValue* TruncateToNumber(HValue* value, AstType** expected);
@@ -1797,8 +1785,6 @@ class HGraphBuilder {
HValue* length,
HValue* capacity);
- HValue* BuildElementIndexHash(HValue* index);
-
void BuildCreateAllocationMemento(HValue* previous_object,
HValue* previous_object_size,
HValue* payload);
@@ -1809,14 +1795,8 @@ class HGraphBuilder {
Handle<JSObject> holder,
bool ensure_no_elements = false);
- HInstruction* BuildGetNativeContext(HValue* closure);
HInstruction* BuildGetNativeContext();
- // Builds a loop version if |depth| is specified or unrolls the loop to
- // |depth_value| iterations otherwise.
- HValue* BuildGetParentContext(HValue* depth, int depth_value);
-
- HInstruction* BuildGetArrayFunction();
HValue* BuildArrayBufferViewFieldAccessor(HValue* object,
HValue* checked_object,
FieldIndex index);
@@ -2179,6 +2159,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder,
F(TypedArrayMaxSizeInHeap) \
F(ArrayBufferViewGetByteLength) \
F(ArrayBufferViewGetByteOffset) \
+ F(ArrayBufferViewWasNeutered) \
F(TypedArrayGetLength) \
/* ArrayBuffer */ \
F(ArrayBufferGetByteLength) \
diff --git a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
index 25b3fbce0e..1c9c1999d0 100644
--- a/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/crankshaft/ia32/lithium-codegen-ia32.cc
@@ -5060,8 +5060,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
- __ mov(result,
- FieldOperand(result, DescriptorArray::kEnumCacheOffset));
+ __ mov(result, FieldOperand(result, DescriptorArray::kEnumCacheBridgeOffset));
__ mov(result,
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
diff --git a/deps/v8/src/crankshaft/mips/OWNERS b/deps/v8/src/crankshaft/mips/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/src/crankshaft/mips/OWNERS
+++ b/deps/v8/src/crankshaft/mips/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
index 7fb6f38fcf..942baa08c0 100644
--- a/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/crankshaft/mips/lithium-codegen-mips.cc
@@ -375,9 +375,7 @@ bool LCodeGen::GenerateJumpTable() {
}
// Add the base address to the offset previously loaded in entry_offset.
- __ Addu(entry_offset, entry_offset,
- Operand(ExternalReference::ForDeoptEntry(base)));
- __ Jump(entry_offset);
+ __ Jump(entry_offset, Operand(ExternalReference::ForDeoptEntry(base)));
}
__ RecordComment("]");
@@ -3538,8 +3536,7 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
} else {
DCHECK(instr->target()->IsRegister());
Register target = ToRegister(instr->target());
- __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(target);
+ __ Jump(target, Code::kHeaderSize - kHeapObjectTag);
}
} else {
LPointerMap* pointers = instr->pointer_map();
@@ -3554,8 +3551,7 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(instr->target()->IsRegister());
Register target = ToRegister(instr->target());
generator.BeforeCall(__ CallSize(target));
- __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Call(target);
+ __ Call(target, Code::kHeaderSize - kHeapObjectTag);
}
generator.AfterCall();
}
@@ -5322,7 +5318,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
__ lw(result,
- FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+ FieldMemOperand(result, DescriptorArray::kEnumCacheBridgeOffset));
__ lw(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache, result,
diff --git a/deps/v8/src/crankshaft/mips64/OWNERS b/deps/v8/src/crankshaft/mips64/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/src/crankshaft/mips64/OWNERS
+++ b/deps/v8/src/crankshaft/mips64/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
index 9cfa19a15d..38d04cfd39 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -91,7 +91,7 @@ void LCodeGen::SaveCallerDoubles() {
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
- __ sdc1(DoubleRegister::from_code(save_iterator.Current()),
+ __ Sdc1(DoubleRegister::from_code(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -107,7 +107,7 @@ void LCodeGen::RestoreCallerDoubles() {
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
- __ ldc1(DoubleRegister::from_code(save_iterator.Current()),
+ __ Ldc1(DoubleRegister::from_code(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -148,7 +148,7 @@ bool LCodeGen::GeneratePrologue() {
Label loop;
__ bind(&loop);
__ Dsubu(a0, a0, Operand(kPointerSize));
- __ sd(a1, MemOperand(a0, 2 * kPointerSize));
+ __ Sd(a1, MemOperand(a0, 2 * kPointerSize));
__ Branch(&loop, ne, a0, Operand(sp));
__ Pop(a0, a1);
} else {
@@ -198,7 +198,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
// Context is returned in both v0. It replaces the context passed to us.
// It's saved in the stack and kept live in cp.
__ mov(cp, v0);
- __ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = info()->scope()->num_parameters();
int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
@@ -209,10 +209,10 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
// Load parameter from stack.
- __ ld(a0, MemOperand(fp, parameter_offset));
+ __ Ld(a0, MemOperand(fp, parameter_offset));
// Store it in the context.
MemOperand target = ContextMemOperand(cp, var->index());
- __ sd(a0, target);
+ __ Sd(a0, target);
// Update the write barrier. This clobbers a3 and a0.
if (need_write_barrier) {
__ RecordWriteContextSlot(
@@ -417,7 +417,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
}
return scratch;
} else if (op->IsStackSlot()) {
- __ ld(scratch, ToMemOperand(op));
+ __ Ld(scratch, ToMemOperand(op));
return scratch;
}
UNREACHABLE();
@@ -454,7 +454,7 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
}
} else if (op->IsStackSlot()) {
MemOperand mem_op = ToMemOperand(op);
- __ ldc1(dbl_scratch, mem_op);
+ __ Ldc1(dbl_scratch, mem_op);
return dbl_scratch;
}
UNREACHABLE();
@@ -697,7 +697,7 @@ void LCodeGen::LoadContextFromDeferred(LOperand* context) {
if (context->IsRegister()) {
__ Move(cp, ToRegister(context));
} else if (context->IsStackSlot()) {
- __ ld(cp, ToMemOperand(context));
+ __ Ld(cp, ToMemOperand(context));
} else if (context->IsConstantOperand()) {
HConstant* constant =
chunk_->LookupConstant(LConstantOperand::cast(context));
@@ -776,16 +776,16 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
Label no_deopt;
__ Push(a1, scratch);
__ li(scratch, Operand(count));
- __ lw(a1, MemOperand(scratch));
+ __ Lw(a1, MemOperand(scratch));
__ Subu(a1, a1, Operand(1));
__ Branch(&no_deopt, ne, a1, Operand(zero_reg));
__ li(a1, Operand(FLAG_deopt_every_n_times));
- __ sw(a1, MemOperand(scratch));
+ __ Sw(a1, MemOperand(scratch));
__ Pop(a1, scratch);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&no_deopt);
- __ sw(a1, MemOperand(scratch));
+ __ Sw(a1, MemOperand(scratch));
__ Pop(a1, scratch);
}
@@ -1765,8 +1765,8 @@ void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
if (FLAG_debug_code) {
Register scratch = scratch0();
- __ ld(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ Ld(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ And(scratch, scratch,
Operand(kStringRepresentationMask | kStringEncodingMask));
@@ -1779,9 +1779,9 @@ void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
if (encoding == String::ONE_BYTE_ENCODING) {
- __ lbu(result, operand);
+ __ Lbu(result, operand);
} else {
- __ lhu(result, operand);
+ __ Lhu(result, operand);
}
}
@@ -1804,9 +1804,9 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
if (encoding == String::ONE_BYTE_ENCODING) {
- __ sb(value, operand);
+ __ Sb(value, operand);
} else {
- __ sh(value, operand);
+ __ Sh(value, operand);
}
}
@@ -2069,12 +2069,12 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (type.IsHeapNumber()) {
DCHECK(!info()->IsStub());
DoubleRegister dbl_scratch = double_scratch0();
- __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ __ Ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
// Test the double value. Zero and NaN are false.
EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
} else if (type.IsString()) {
DCHECK(!info()->IsStub());
- __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
+ __ Ld(at, FieldMemOperand(reg, String::kLengthOffset));
EmitBranch(instr, ne, at, Operand(zero_reg));
} else {
ToBooleanHints expected = instr->hydrogen()->expected_input_types();
@@ -2111,10 +2111,10 @@ void LCodeGen::DoBranch(LBranch* instr) {
const Register map = scratch0();
if (expected & ToBooleanHint::kNeedsMap) {
- __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ Ld(map, FieldMemOperand(reg, HeapObject::kMapOffset));
if (expected & ToBooleanHint::kCanBeUndetectable) {
// Undetectable -> false.
- __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ Lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(at, at, Operand(1 << Map::kIsUndetectable));
__ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
}
@@ -2122,7 +2122,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected & ToBooleanHint::kReceiver) {
// spec object -> true.
- __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_),
ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
}
@@ -2130,9 +2130,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected & ToBooleanHint::kString) {
// String value -> false iff empty.
Label not_string;
- __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
- __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
+ __ Ld(at, FieldMemOperand(reg, String::kLengthOffset));
__ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
__ Branch(instr->FalseLabel(chunk_));
__ bind(&not_string);
@@ -2141,7 +2141,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected & ToBooleanHint::kSymbol) {
// Symbol value -> true.
const Register scratch = scratch1();
- __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
}
@@ -2151,7 +2151,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Label not_heap_number;
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
__ Branch(&not_heap_number, ne, map, Operand(at));
- __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ __ Ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
__ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
ne, dbl_scratch, kDoubleRegZero);
// Falls through if dbl_scratch == 0.
@@ -2345,8 +2345,8 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
}
- __ ld(temp, FieldMemOperand(input, HeapObject::kMapOffset));
- __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+ __ Ld(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ Lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
__ And(at, temp, Operand(1 << Map::kIsUndetectable));
EmitBranch(instr, ne, at, Operand(zero_reg));
}
@@ -2453,8 +2453,8 @@ void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
// temp now contains the constructor function. Grab the
// instance class name from there.
- __ ld(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ ld(temp,
+ __ Ld(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(temp,
FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
// The class name we are testing against is internalized since it's a literal.
// The name in the constructor is internalized because of the way the context
@@ -2483,7 +2483,7 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- __ ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ Ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
EmitBranch(instr, eq, temp, Operand(instr->map()));
}
@@ -2505,28 +2505,28 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
}
// Loop through the {object}s prototype chain looking for the {prototype}.
- __ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
Label loop;
__ bind(&loop);
// Deoptimize if the object needs to be access checked.
- __ lbu(object_instance_type,
+ __ Lbu(object_instance_type,
FieldMemOperand(object_map, Map::kBitFieldOffset));
__ And(object_instance_type, object_instance_type,
Operand(1 << Map::kIsAccessCheckNeeded));
DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, object_instance_type,
Operand(zero_reg));
- __ lbu(object_instance_type,
+ __ Lbu(object_instance_type,
FieldMemOperand(object_map, Map::kInstanceTypeOffset));
DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy, object_instance_type,
Operand(JS_PROXY_TYPE));
- __ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ Ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
__ LoadRoot(at, Heap::kNullValueRootIndex);
EmitFalseBranch(instr, eq, object_prototype, Operand(at));
EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
__ Branch(&loop, USE_DELAY_SLOT);
- __ ld(object_map, FieldMemOperand(object_prototype,
+ __ Ld(object_map, FieldMemOperand(object_prototype,
HeapObject::kMapOffset)); // In delay slot.
}
@@ -2560,7 +2560,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
// managed by the register allocator and tearing down the frame, it's
// safe to write to the context register.
__ push(v0);
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit);
}
if (info()->saves_caller_doubles()) {
@@ -2592,7 +2592,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ ld(result, ContextMemOperand(context, instr->slot_index()));
+ __ Ld(result, ContextMemOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -2617,7 +2617,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Label skip_assignment;
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ ld(scratch, target);
+ __ Ld(scratch, target);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
@@ -2627,7 +2627,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
}
}
- __ sd(value, target);
+ __ Sd(value, target);
if (instr->hydrogen()->NeedsWriteBarrier()) {
SmiCheck check_needed =
instr->hydrogen()->value()->type().IsHeapObject()
@@ -2659,13 +2659,13 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (instr->hydrogen()->representation().IsDouble()) {
DoubleRegister result = ToDoubleRegister(instr->result());
- __ ldc1(result, FieldMemOperand(object, offset));
+ __ Ldc1(result, FieldMemOperand(object, offset));
return;
}
Register result = ToRegister(instr->result());
if (!access.IsInobject()) {
- __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ Ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
object = result;
}
@@ -2695,8 +2695,8 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
Register result = ToRegister(instr->result());
// Get the prototype or initial map from the function.
- __ ld(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Ld(result,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// Check that the function has a prototype or an initial map.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -2708,7 +2708,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
__ Branch(&done, ne, scratch, Operand(MAP_TYPE));
// Get the prototype from the initial map.
- __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ __ Ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
// All done.
__ bind(&done);
@@ -2731,13 +2731,13 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
if (instr->index()->IsConstantOperand()) {
int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
int index = (const_length - const_index) + 1;
- __ ld(result, MemOperand(arguments, index * kPointerSize));
+ __ Ld(result, MemOperand(arguments, index * kPointerSize));
} else {
Register index = ToRegister(instr->index());
__ li(at, Operand(const_length + 1));
__ Dsubu(result, at, index);
__ Dlsa(at, arguments, result, kPointerSizeLog2);
- __ ld(result, MemOperand(at));
+ __ Ld(result, MemOperand(at));
}
} else if (instr->index()->IsConstantOperand()) {
Register length = ToRegister(instr->length());
@@ -2746,10 +2746,10 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
if (loc != 0) {
__ Dsubu(result, length, Operand(loc));
__ Dlsa(at, arguments, result, kPointerSizeLog2);
- __ ld(result, MemOperand(at));
+ __ Ld(result, MemOperand(at));
} else {
__ Dlsa(at, arguments, length, kPointerSizeLog2);
- __ ld(result, MemOperand(at));
+ __ Ld(result, MemOperand(at));
}
} else {
Register length = ToRegister(instr->length());
@@ -2757,7 +2757,7 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
__ Dsubu(result, length, index);
__ Daddu(result, result, 1);
__ Dlsa(at, arguments, result, kPointerSizeLog2);
- __ ld(result, MemOperand(at));
+ __ Ld(result, MemOperand(at));
}
}
@@ -2800,10 +2800,10 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ Daddu(scratch0(), scratch0(), external_pointer);
}
if (elements_kind == FLOAT32_ELEMENTS) {
- __ lwc1(result, MemOperand(scratch0(), base_offset));
+ __ Lwc1(result, MemOperand(scratch0(), base_offset));
__ cvt_d_s(result, result);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ ldc1(result, MemOperand(scratch0(), base_offset));
+ __ Ldc1(result, MemOperand(scratch0(), base_offset));
}
} else {
Register result = ToRegister(instr->result());
@@ -2812,23 +2812,23 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
element_size_shift, shift_size, base_offset);
switch (elements_kind) {
case INT8_ELEMENTS:
- __ lb(result, mem_operand);
+ __ Lb(result, mem_operand);
break;
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
- __ lbu(result, mem_operand);
+ __ Lbu(result, mem_operand);
break;
case INT16_ELEMENTS:
- __ lh(result, mem_operand);
+ __ Lh(result, mem_operand);
break;
case UINT16_ELEMENTS:
- __ lhu(result, mem_operand);
+ __ Lhu(result, mem_operand);
break;
case INT32_ELEMENTS:
- __ lw(result, mem_operand);
+ __ Lw(result, mem_operand);
break;
case UINT32_ELEMENTS:
- __ lw(result, mem_operand);
+ __ Lw(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
DeoptimizeIf(Ugreater_equal, instr, DeoptimizeReason::kNegativeValue,
result, Operand(0x80000000));
@@ -2889,7 +2889,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
__ Daddu(scratch, scratch, at);
}
- __ ldc1(result, MemOperand(scratch));
+ __ Ldc1(result, MemOperand(scratch));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ FmoveHigh(scratch, result);
@@ -2965,7 +2965,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
// The comparison only needs LS bits of value, which is a smi.
- __ ld(result, FieldMemOperand(result, PropertyCell::kValueOffset));
+ __ Ld(result, FieldMemOperand(result, PropertyCell::kValueOffset));
DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result,
Operand(Smi::FromInt(Isolate::kProtectorValid)));
}
@@ -3039,8 +3039,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
} else if (instr->hydrogen()->arguments_adaptor()) {
// Check if the calling frame is an arguments adaptor frame.
Label done, adapted;
- __ ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(result,
+ __ Ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ld(result,
MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Xor(temp, result,
Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -3066,8 +3066,8 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
__ Branch(&done, eq, fp, Operand(elem));
// Arguments adaptor frame present. Get argument length from there.
- __ ld(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(result,
+ __ Ld(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ld(result,
MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(result);
@@ -3089,20 +3089,19 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
if (!instr->hydrogen()->known_function()) {
// Do not transform the receiver to object for strict mode functions.
- __ ld(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
// Do not transform the receiver to object for builtins.
int32_t strict_mode_function_mask =
1 << SharedFunctionInfo::kStrictModeBitWithinByte;
int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
- __ lbu(at,
+ __ Lbu(at,
FieldMemOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset));
__ And(at, at, Operand(strict_mode_function_mask));
__ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
- __ lbu(at,
- FieldMemOperand(scratch, SharedFunctionInfo::kNativeByteOffset));
+ __ Lbu(at, FieldMemOperand(scratch, SharedFunctionInfo::kNativeByteOffset));
__ And(at, at, Operand(native_mask));
__ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
}
@@ -3123,9 +3122,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ Branch(&result_in_receiver);
__ bind(&global_object);
- __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
- __ ld(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
- __ ld(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
+ __ Ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
+ __ Ld(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+ __ Ld(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
if (result.is(receiver)) {
__ bind(&result_in_receiver);
@@ -3170,7 +3169,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ dsll(scratch, length, kPointerSizeLog2);
__ bind(&loop);
__ Daddu(scratch, elements, scratch);
- __ ld(scratch, MemOperand(scratch));
+ __ Ld(scratch, MemOperand(scratch));
__ push(scratch);
__ Dsubu(length, length, Operand(1));
__ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
@@ -3217,7 +3216,7 @@ void LCodeGen::DoDrop(LDrop* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ ld(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ld(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
@@ -3225,7 +3224,7 @@ void LCodeGen::DoContext(LContext* instr) {
// If there is a non-return use, the context must be moved to a register.
Register result = ToRegister(instr->result());
if (info()->IsOptimizing()) {
- __ ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
// If there is no frame, the context must be in cp.
DCHECK(result.is(cp));
@@ -3256,7 +3255,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
if (can_invoke_directly) {
// Change context.
- __ ld(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
+ __ Ld(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
// Always initialize new target and number of actual arguments.
__ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
@@ -3273,7 +3272,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ Call(self, RelocInfo::CODE_TARGET);
}
} else {
- __ ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+ __ Ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
if (is_tail_call) {
__ Jump(at);
} else {
@@ -3303,7 +3302,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register scratch = scratch0();
// Deoptimize if not a heap number.
- __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ Ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
Operand(at));
@@ -3311,7 +3310,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Label done;
Register exponent = scratch0();
scratch = no_reg;
- __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+ __ Lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive, just
// return it.
__ Move(result, input);
@@ -3347,15 +3346,15 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
__ mov(tmp1, v0);
// Restore input_reg after call to runtime.
__ LoadFromSafepointRegisterSlot(input, input);
- __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+ __ Lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
__ bind(&allocated);
// exponent: floating point exponent value.
// tmp1: allocated heap number.
__ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
- __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
- __ lwu(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
- __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+ __ Sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+ __ Lwu(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+ __ Sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
__ StoreToSafepointRegisterSlot(tmp1, result);
}
@@ -3601,7 +3600,7 @@ void LCodeGen::DoPower(LPower* instr) {
Label no_deopt;
__ JumpIfSmi(tagged_exponent, &no_deopt);
DCHECK(!a7.is(tagged_exponent));
- __ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
+ __ Lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, a7, Operand(at));
__ bind(&no_deopt);
@@ -3676,14 +3675,14 @@ void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
// Check if next frame is an arguments adaptor frame.
Register caller_args_count_reg = scratch1;
Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ Ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
__ Branch(&no_arguments_adaptor, ne, scratch3,
Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
// Drop current frame and load arguments count from arguments adaptor frame.
__ mov(fp, scratch2);
- __ ld(caller_args_count_reg,
+ __ Ld(caller_args_count_reg,
MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(caller_args_count_reg);
__ Branch(&formal_parameter_count_loaded);
@@ -3788,7 +3787,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
Label packed_case;
// We might need a change here,
// look at the first argument.
- __ ld(a5, MemOperand(sp, 0));
+ __ Ld(a5, MemOperand(sp, 0));
__ Branch(&packed_case, eq, a5, Operand(zero_reg));
ElementsKind holey_kind = GetHoleyElementsKind(kind);
@@ -3820,8 +3819,7 @@ void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
Register code_object = ToRegister(instr->code_object());
__ Daddu(code_object, code_object,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ sd(code_object,
- FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ __ Sd(code_object, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
}
@@ -3864,7 +3862,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
DCHECK(!instr->hydrogen()->has_transition());
DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
DoubleRegister value = ToDoubleRegister(instr->value());
- __ sdc1(value, FieldMemOperand(object, offset));
+ __ Sdc1(value, FieldMemOperand(object, offset));
return;
}
@@ -3872,7 +3870,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Handle<Map> transition = instr->hydrogen()->transition_map();
AddDeprecationDependency(transition);
__ li(scratch1, Operand(transition));
- __ sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
Register temp = ToRegister(instr->temp());
// Update the write barrier for the map field.
@@ -3888,7 +3886,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register destination = object;
if (!access.IsInobject()) {
destination = scratch1;
- __ ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ Ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
}
if (representation.IsSmi() && SmiValuesAre32Bits() &&
@@ -3907,7 +3905,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (FLAG_unbox_double_fields && representation.IsDouble()) {
DCHECK(access.IsInobject());
DoubleRegister value = ToDoubleRegister(instr->value());
- __ sdc1(value, operand);
+ __ Sdc1(value, operand);
} else {
DCHECK(instr->value()->IsRegister());
Register value = ToRegister(instr->value());
@@ -3998,9 +3996,9 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
if (elements_kind == FLOAT32_ELEMENTS) {
__ cvt_s_d(double_scratch0(), value);
- __ swc1(double_scratch0(), MemOperand(address, base_offset));
+ __ Swc1(double_scratch0(), MemOperand(address, base_offset));
} else { // Storing doubles, not floats.
- __ sdc1(value, MemOperand(address, base_offset));
+ __ Sdc1(value, MemOperand(address, base_offset));
}
} else {
Register value(ToRegister(instr->value()));
@@ -4012,15 +4010,15 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
case INT8_ELEMENTS:
- __ sb(value, mem_operand);
+ __ Sb(value, mem_operand);
break;
case INT16_ELEMENTS:
case UINT16_ELEMENTS:
- __ sh(value, mem_operand);
+ __ Sh(value, mem_operand);
break;
case INT32_ELEMENTS:
case UINT32_ELEMENTS:
- __ sw(value, mem_operand);
+ __ Sw(value, mem_operand);
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
@@ -4078,9 +4076,9 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
if (instr->NeedsCanonicalization()) {
__ FPUCanonicalizeNaN(double_scratch, value);
- __ sdc1(double_scratch, MemOperand(scratch, 0));
+ __ Sdc1(double_scratch, MemOperand(scratch, 0));
} else {
- __ sdc1(value, MemOperand(scratch, 0));
+ __ Sdc1(value, MemOperand(scratch, 0));
}
}
@@ -4211,7 +4209,7 @@ void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
if (instr->elements()->IsRegister()) {
__ mov(result, ToRegister(instr->elements()));
} else {
- __ ld(result, ToMemOperand(instr->elements()));
+ __ Ld(result, ToMemOperand(instr->elements()));
}
__ bind(deferred->exit());
@@ -4231,7 +4229,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
if (instr->object()->IsRegister()) {
__ mov(result, ToRegister(instr->object()));
} else {
- __ ld(result, ToMemOperand(instr->object()));
+ __ Ld(result, ToMemOperand(instr->object()));
}
LOperand* key = instr->key();
@@ -4266,13 +4264,13 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
ElementsKind to_kind = instr->to_kind();
Label not_applicable;
- __ ld(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ __ Ld(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
__ Branch(&not_applicable, ne, scratch, Operand(from_map));
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
Register new_map_reg = ToRegister(instr->new_map_temp());
__ li(new_map_reg, Operand(to_map));
- __ sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ __ Sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteForMap(object_reg,
new_map_reg,
@@ -4395,7 +4393,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
char_code, Operand(String::kMaxOneByteCharCode));
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
__ Dlsa(result, result, char_code, kPointerSizeLog2);
- __ ld(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+ __ Ld(result, FieldMemOperand(result, FixedArray::kHeaderSize));
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
__ Branch(deferred->entry(), eq, result, Operand(scratch));
__ bind(deferred->exit());
@@ -4428,7 +4426,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
FPURegister single_scratch = double_scratch0().low();
if (input->IsStackSlot()) {
Register scratch = scratch0();
- __ ld(scratch, ToMemOperand(input));
+ __ Ld(scratch, ToMemOperand(input));
__ mtc1(scratch, single_scratch);
} else {
__ mtc1(ToRegister(input), single_scratch);
@@ -4531,7 +4529,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+ __ Sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
}
@@ -4562,7 +4560,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ Branch(deferred->entry());
}
__ bind(deferred->exit());
- __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ __ Sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
}
@@ -4633,7 +4631,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
// Smi check.
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
// Heap number map check.
- __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ Ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
if (can_convert_undefined_to_nan) {
__ Branch(&convert, ne, scratch, Operand(at));
@@ -4642,7 +4640,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
Operand(at));
}
// Load heap number.
- __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ __ Ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
if (deoptimize_on_minus_zero) {
__ mfc1(at, result_reg);
__ Branch(&done, ne, at, Operand(zero_reg));
@@ -4658,7 +4656,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined,
input_reg, Operand(at));
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
- __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+ __ Ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ Branch(&done);
}
} else {
@@ -4688,7 +4686,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// The input is a tagged HeapObject.
// Heap number map check.
- __ ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ Ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
// This 'at' value and scratch1 map value are used for tests in both clauses
// of the if.
@@ -4697,7 +4695,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Label truncate;
__ Branch(USE_DELAY_SLOT, &truncate, eq, scratch1, Operand(at));
__ mov(scratch2, input_reg); // In delay slot.
- __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ Lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball, scratch1,
Operand(ODDBALL_TYPE));
__ bind(&truncate);
@@ -4707,7 +4705,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Operand(at));
// Load the double value.
- __ ldc1(double_scratch,
+ __ Ldc1(double_scratch,
FieldMemOperand(input_reg, HeapNumber::kValueOffset));
Register except_flag = scratch2;
@@ -4877,8 +4875,8 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
Register view = ToRegister(instr->view());
Register scratch = scratch0();
- __ ld(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
- __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
+ __ Ld(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
+ __ Lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
__ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, at,
Operand(zero_reg));
@@ -4936,7 +4934,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ li(at, Operand(cell));
- __ ld(at, FieldMemOperand(at, Cell::kValueOffset));
+ __ Ld(at, FieldMemOperand(at, Cell::kValueOffset));
DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg, Operand(at));
} else {
DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg,
@@ -4948,8 +4946,8 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
Label deopt, done;
// If the map is not deprecated the migration attempt does not make sense.
- __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- __ lwu(scratch0(), FieldMemOperand(scratch0(), Map::kBitField3Offset));
+ __ Ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Lwu(scratch0(), FieldMemOperand(scratch0(), Map::kBitField3Offset));
__ And(at, scratch0(), Operand(Map::Deprecated::kMask));
__ Branch(&deopt, eq, at, Operand(zero_reg));
@@ -5006,7 +5004,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
LOperand* input = instr->value();
DCHECK(input->IsRegister());
Register reg = ToRegister(input);
- __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ Ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
DeferredCheckMaps* deferred = NULL;
if (instr->hydrogen()->HasMigrationTarget()) {
@@ -5058,7 +5056,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
__ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
// Check for heap number
- __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ Ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
// Check for undefined. Undefined is converted to zero for clamping
@@ -5070,8 +5068,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Heap number
__ bind(&heap_number);
- __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
- HeapNumber::kValueOffset));
+ __ Ldc1(double_scratch0(),
+ FieldMemOperand(input_reg, HeapNumber::kValueOffset));
__ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
__ jmp(&done);
@@ -5140,7 +5138,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
__ bind(&loop);
__ Dsubu(scratch, scratch, Operand(kPointerSize));
__ Daddu(at, result, Operand(scratch));
- __ sd(scratch2, MemOperand(at));
+ __ Sd(scratch2, MemOperand(at));
__ Branch(&loop, ge, scratch, Operand(zero_reg));
}
}
@@ -5201,7 +5199,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register top_address = scratch0();
__ Dsubu(v0, v0, Operand(kHeapObjectTag));
__ li(top_address, Operand(allocation_top));
- __ sd(v0, MemOperand(top_address));
+ __ Sd(v0, MemOperand(top_address));
__ Daddu(v0, v0, Operand(kHeapObjectTag));
}
}
@@ -5283,7 +5281,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Factory* factory = isolate()->factory();
if (String::Equals(type_name, factory->number_string())) {
__ JumpIfSmi(input, true_label);
- __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ Ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
*cmp1 = input;
*cmp2 = Operand(at);
@@ -5318,8 +5316,8 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
// slot.
__ JumpIfSmi(input, false_label);
// Check for undetectable objects => true.
- __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
- __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+ __ Ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ Lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
__ And(at, at, 1 << Map::kIsUndetectable);
*cmp1 = at;
*cmp2 = Operand(zero_reg);
@@ -5327,8 +5325,8 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
} else if (String::Equals(type_name, factory->function_string())) {
__ JumpIfSmi(input, false_label);
- __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ Ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ And(scratch, scratch,
Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
*cmp1 = scratch;
@@ -5343,7 +5341,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ GetObjectType(input, scratch, scratch1());
__ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
// Check for callable or undetectable objects => false.
- __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ And(at, scratch,
Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
*cmp1 = at;
@@ -5492,7 +5490,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(object.is(a0));
__ CheckEnumCache(&call_runtime);
- __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Ld(result, FieldMemOperand(object, HeapObject::kMapOffset));
__ Branch(&use_cache);
// Get the set of properties to enumerate.
@@ -5514,10 +5512,9 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
- __ ld(result,
- FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
- __ ld(result,
- FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+ __ Ld(result,
+ FieldMemOperand(result, DescriptorArray::kEnumCacheBridgeOffset));
+ __ Ld(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache, result,
Operand(zero_reg));
@@ -5528,7 +5525,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
Register map = ToRegister(instr->map());
- __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map,
Operand(scratch0()));
}
@@ -5592,16 +5589,16 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
__ SmiScale(scratch, index, kPointerSizeLog2); // In delay slot.
__ Daddu(scratch, object, scratch);
- __ ld(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
+ __ Ld(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
__ Branch(&done);
__ bind(&out_of_object);
- __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ Ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
// Index is equal to negated out of object property index plus 1.
__ Dsubu(scratch, result, scratch);
- __ ld(result, FieldMemOperand(scratch,
- FixedArray::kHeaderSize - kPointerSize));
+ __ Ld(result,
+ FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
__ bind(deferred->exit());
__ bind(&done);
}
diff --git a/deps/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc b/deps/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc
index 0374cbc7bb..eb50d4b2f1 100644
--- a/deps/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc
+++ b/deps/v8/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc
@@ -146,11 +146,11 @@ void LGapResolver::BreakCycle(int index) {
if (source->IsRegister()) {
__ mov(kLithiumScratchReg, cgen_->ToRegister(source));
} else if (source->IsStackSlot()) {
- __ ld(kLithiumScratchReg, cgen_->ToMemOperand(source));
+ __ Ld(kLithiumScratchReg, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
__ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
- __ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
+ __ Ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
}
@@ -167,13 +167,12 @@ void LGapResolver::RestoreValue() {
if (saved_destination_->IsRegister()) {
__ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg);
} else if (saved_destination_->IsStackSlot()) {
- __ sd(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
+ __ Sd(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(saved_destination_),
kLithiumScratchDouble);
} else if (saved_destination_->IsDoubleStackSlot()) {
- __ sdc1(kLithiumScratchDouble,
- cgen_->ToMemOperand(saved_destination_));
+ __ Sdc1(kLithiumScratchDouble, cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
}
@@ -196,12 +195,12 @@ void LGapResolver::EmitMove(int index) {
__ mov(cgen_->ToRegister(destination), source_register);
} else {
DCHECK(destination->IsStackSlot());
- __ sd(source_register, cgen_->ToMemOperand(destination));
+ __ Sd(source_register, cgen_->ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
- __ ld(cgen_->ToRegister(destination), source_operand);
+ __ Ld(cgen_->ToRegister(destination), source_operand);
} else {
DCHECK(destination->IsStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
@@ -211,15 +210,15 @@ void LGapResolver::EmitMove(int index) {
// Therefore we can't use 'at'. It is OK if the read from the source
// destroys 'at', since that happens before the value is read.
// This uses only a single reg of the double reg-pair.
- __ ldc1(kLithiumScratchDouble, source_operand);
- __ sdc1(kLithiumScratchDouble, destination_operand);
+ __ Ldc1(kLithiumScratchDouble, source_operand);
+ __ Sdc1(kLithiumScratchDouble, destination_operand);
} else {
- __ ld(at, source_operand);
- __ sd(at, destination_operand);
+ __ Ld(at, source_operand);
+ __ Sd(at, destination_operand);
}
} else {
- __ ld(kLithiumScratchReg, source_operand);
- __ sd(kLithiumScratchReg, destination_operand);
+ __ Ld(kLithiumScratchReg, source_operand);
+ __ Sd(kLithiumScratchReg, destination_operand);
}
}
@@ -243,13 +242,13 @@ void LGapResolver::EmitMove(int index) {
DCHECK(!in_cycle_); // Constant moves happen after all cycles are gone.
if (cgen_->IsSmi(constant_source)) {
__ li(kLithiumScratchReg, Operand(cgen_->ToSmi(constant_source)));
- __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
+ __ Sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
} else if (cgen_->IsInteger32(constant_source)) {
__ li(kLithiumScratchReg, Operand(cgen_->ToInteger32(constant_source)));
- __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
+ __ Sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
} else {
__ li(kLithiumScratchReg, cgen_->ToHandle(constant_source));
- __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
+ __ Sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
}
}
@@ -260,13 +259,13 @@ void LGapResolver::EmitMove(int index) {
} else {
DCHECK(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
- __ sdc1(source_register, destination_operand);
+ __ Sdc1(source_register, destination_operand);
}
} else if (source->IsDoubleStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
- __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
+ __ Ldc1(cgen_->ToDoubleRegister(destination), source_operand);
} else {
DCHECK(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
@@ -277,13 +276,13 @@ void LGapResolver::EmitMove(int index) {
cgen_->ToHighMemOperand(source);
MemOperand destination_high_operand =
cgen_->ToHighMemOperand(destination);
- __ lw(kLithiumScratchReg, source_operand);
- __ sw(kLithiumScratchReg, destination_operand);
- __ lw(kLithiumScratchReg, source_high_operand);
- __ sw(kLithiumScratchReg, destination_high_operand);
+ __ Lw(kLithiumScratchReg, source_operand);
+ __ Sw(kLithiumScratchReg, destination_operand);
+ __ Lw(kLithiumScratchReg, source_high_operand);
+ __ Sw(kLithiumScratchReg, destination_high_operand);
} else {
- __ ldc1(kLithiumScratchDouble, source_operand);
- __ sdc1(kLithiumScratchDouble, destination_operand);
+ __ Ldc1(kLithiumScratchDouble, source_operand);
+ __ Sdc1(kLithiumScratchDouble, destination_operand);
}
}
} else {
diff --git a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
index 29323b7a4b..877d62ceaa 100644
--- a/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
+++ b/deps/v8/src/crankshaft/ppc/lithium-codegen-ppc.cc
@@ -5593,7 +5593,8 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
- __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+ __ LoadP(result,
+ FieldMemOperand(result, DescriptorArray::kEnumCacheBridgeOffset));
__ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmpi(result, Operand::Zero());
DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache);
diff --git a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
index 311f31fee2..91bb03e647 100644
--- a/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
+++ b/deps/v8/src/crankshaft/s390/lithium-codegen-s390.cc
@@ -5524,7 +5524,8 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
- __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+ __ LoadP(result,
+ FieldMemOperand(result, DescriptorArray::kEnumCacheBridgeOffset));
__ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ CmpP(result, Operand::Zero());
DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache);
diff --git a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
index 443ae4e224..3eddd47bc4 100644
--- a/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/crankshaft/x64/lithium-codegen-x64.cc
@@ -5340,7 +5340,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
__ movp(result,
- FieldOperand(result, DescriptorArray::kEnumCacheOffset));
+ FieldOperand(result, DescriptorArray::kEnumCacheBridgeOffset));
__ movp(result,
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
diff --git a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
index 4788143045..2a229aa92e 100644
--- a/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
+++ b/deps/v8/src/crankshaft/x87/lithium-codegen-x87.cc
@@ -5555,8 +5555,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
- __ mov(result,
- FieldOperand(result, DescriptorArray::kEnumCacheOffset));
+ __ mov(result, FieldOperand(result, DescriptorArray::kEnumCacheBridgeOffset));
__ mov(result,
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
diff --git a/deps/v8/src/d8-console.cc b/deps/v8/src/d8-console.cc
new file mode 100644
index 0000000000..e4f81b3c2d
--- /dev/null
+++ b/deps/v8/src/d8-console.cc
@@ -0,0 +1,111 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/d8-console.h"
+#include "src/d8.h"
+
+namespace v8 {
+
+namespace {
+void WriteToFile(FILE* file, Isolate* isolate,
+ const debug::ConsoleCallArguments& args) {
+ for (int i = 0; i < args.Length(); i++) {
+ HandleScope handle_scope(isolate);
+ if (i != 0) fprintf(file, " ");
+
+ // Explicitly catch potential exceptions in toString().
+ v8::TryCatch try_catch(isolate);
+ Local<Value> arg = args[i];
+ Local<String> str_obj;
+
+ if (arg->IsSymbol()) arg = Local<Symbol>::Cast(arg)->Name();
+ if (!arg->ToString(isolate->GetCurrentContext()).ToLocal(&str_obj)) {
+ Shell::ReportException(isolate, &try_catch);
+ return;
+ }
+
+ v8::String::Utf8Value str(str_obj);
+ int n = static_cast<int>(fwrite(*str, sizeof(**str), str.length(), file));
+ if (n != str.length()) {
+ printf("Error in fwrite\n");
+ Shell::Exit(1);
+ }
+ }
+ fprintf(file, "\n");
+}
+} // anonymous namespace
+
+D8Console::D8Console(Isolate* isolate) : isolate_(isolate) {
+ default_timer_ = base::TimeTicks::HighResolutionNow();
+}
+
+void D8Console::Log(const debug::ConsoleCallArguments& args) {
+ WriteToFile(stdout, isolate_, args);
+}
+
+void D8Console::Error(const debug::ConsoleCallArguments& args) {
+ WriteToFile(stderr, isolate_, args);
+}
+
+void D8Console::Warn(const debug::ConsoleCallArguments& args) {
+ WriteToFile(stdout, isolate_, args);
+}
+
+void D8Console::Info(const debug::ConsoleCallArguments& args) {
+ WriteToFile(stdout, isolate_, args);
+}
+
+void D8Console::Debug(const debug::ConsoleCallArguments& args) {
+ WriteToFile(stdout, isolate_, args);
+}
+
+void D8Console::Time(const debug::ConsoleCallArguments& args) {
+ if (args.Length() == 0) {
+ default_timer_ = base::TimeTicks::HighResolutionNow();
+ } else {
+ Local<Value> arg = args[0];
+ Local<String> label;
+ v8::TryCatch try_catch(isolate_);
+ if (!arg->ToString(isolate_->GetCurrentContext()).ToLocal(&label)) {
+ Shell::ReportException(isolate_, &try_catch);
+ return;
+ }
+ v8::String::Utf8Value utf8(label);
+ std::string string(*utf8);
+ auto find = timers_.find(string);
+ if (find != timers_.end()) {
+ find->second = base::TimeTicks::HighResolutionNow();
+ } else {
+ timers_.insert(std::pair<std::string, base::TimeTicks>(
+ string, base::TimeTicks::HighResolutionNow()));
+ }
+ }
+}
+
+void D8Console::TimeEnd(const debug::ConsoleCallArguments& args) {
+ base::TimeDelta delta;
+ base::TimeTicks now = base::TimeTicks::HighResolutionNow();
+ if (args.Length() == 0) {
+ delta = base::TimeTicks::HighResolutionNow() - default_timer_;
+ printf("default: ");
+ } else {
+ Local<Value> arg = args[0];
+ Local<String> label;
+ v8::TryCatch try_catch(isolate_);
+ if (!arg->ToString(isolate_->GetCurrentContext()).ToLocal(&label)) {
+ Shell::ReportException(isolate_, &try_catch);
+ return;
+ }
+ v8::String::Utf8Value utf8(label);
+ std::string string(*utf8);
+ auto find = timers_.find(string);
+ if (find != timers_.end()) {
+ delta = now - find->second;
+ }
+ printf("%s: ", *utf8);
+ }
+ printf("%f\n", delta.InMillisecondsF());
+}
+
+} // namespace v8
diff --git a/deps/v8/src/d8-console.h b/deps/v8/src/d8-console.h
new file mode 100644
index 0000000000..293cb21180
--- /dev/null
+++ b/deps/v8/src/d8-console.h
@@ -0,0 +1,34 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_D8_CONSOLE_H_
+#define V8_D8_CONSOLE_H_
+
+#include "src/base/platform/time.h"
+#include "src/debug/debug-interface.h"
+#include "src/debug/interface-types.h"
+
+namespace v8 {
+
+class D8Console : public debug::ConsoleDelegate {
+ public:
+ explicit D8Console(Isolate* isolate);
+
+ private:
+ void Log(const debug::ConsoleCallArguments& args) override;
+ void Error(const debug::ConsoleCallArguments& args) override;
+ void Warn(const debug::ConsoleCallArguments& args) override;
+ void Info(const debug::ConsoleCallArguments& args) override;
+ void Debug(const debug::ConsoleCallArguments& args) override;
+ void Time(const debug::ConsoleCallArguments& args) override;
+ void TimeEnd(const debug::ConsoleCallArguments& args) override;
+
+ Isolate* isolate_;
+ std::map<std::string, base::TimeTicks> timers_;
+ base::TimeTicks default_timer_;
+};
+
+} // namespace v8
+
+#endif // V8_D8_CONSOLE_H_
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 62a6518d68..efa8dbc633 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -17,6 +17,7 @@
#include "src/third_party/vtune/v8-vtune.h"
#endif
+#include "src/d8-console.h"
#include "src/d8.h"
#include "src/ostreams.h"
@@ -35,6 +36,7 @@
#include "src/list-inl.h"
#include "src/msan.h"
#include "src/objects-inl.h"
+#include "src/objects.h"
#include "src/snapshot/natives.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils.h"
@@ -71,9 +73,37 @@ const int kMaxSerializerMemoryUsage = 1 * MB; // Arbitrary maximum for testing.
// array buffers storing the lengths as a SMI internally.
#define TWO_GB (2u * 1024u * 1024u * 1024u)
-class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
+// Forwards memory reservation and protection functions to the V8 default
+// allocator. Used by ShellArrayBufferAllocator and MockArrayBufferAllocator.
+class ArrayBufferAllocatorBase : public v8::ArrayBuffer::Allocator {
+ std::unique_ptr<Allocator> allocator_ =
+ std::unique_ptr<Allocator>(NewDefaultAllocator());
+
public:
- virtual void* Allocate(size_t length) {
+ void* Reserve(size_t length) override { return allocator_->Reserve(length); }
+
+ void Free(void*, size_t) override = 0;
+
+ void Free(void* data, size_t length, AllocationMode mode) override {
+ switch (mode) {
+ case AllocationMode::kNormal: {
+ return Free(data, length);
+ }
+ case AllocationMode::kReservation: {
+ return allocator_->Free(data, length, mode);
+ }
+ }
+ }
+
+ void SetProtection(void* data, size_t length,
+ Protection protection) override {
+ allocator_->SetProtection(data, length, protection);
+ }
+};
+
+class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
+ public:
+ void* Allocate(size_t length) override {
#if USE_VM
if (RoundToPageSize(&length)) {
void* data = VirtualMemoryAllocate(length);
@@ -93,7 +123,7 @@ class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
void* data = AllocateUninitialized(length);
return data == NULL ? data : memset(data, 0, length);
}
- virtual void* AllocateUninitialized(size_t length) {
+ void* AllocateUninitialized(size_t length) override {
#if USE_VM
if (RoundToPageSize(&length)) return VirtualMemoryAllocate(length);
#endif
@@ -105,7 +135,7 @@ class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return malloc(length);
#endif
}
- virtual void Free(void* data, size_t length) {
+ void Free(void* data, size_t length) override {
#if USE_VM
if (RoundToPageSize(&length)) {
base::VirtualMemory::ReleaseRegion(data, length);
@@ -137,18 +167,28 @@ class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
#endif
};
+class MockArrayBufferAllocator : public ArrayBufferAllocatorBase {
+ const size_t kAllocationLimit = 10 * MB;
+ size_t get_actual_length(size_t length) const {
+ return length > kAllocationLimit ? base::OS::CommitPageSize() : length;
+ }
-class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
void* Allocate(size_t length) override {
- size_t actual_length = length > 10 * MB ? 1 : length;
+ const size_t actual_length = get_actual_length(length);
void* data = AllocateUninitialized(actual_length);
return data == NULL ? data : memset(data, 0, actual_length);
}
void* AllocateUninitialized(size_t length) override {
- return length > 10 * MB ? malloc(1) : malloc(length);
+ return malloc(get_actual_length(length));
}
void Free(void* p, size_t) override { free(p); }
+ void Free(void* data, size_t length, AllocationMode mode) override {
+ ArrayBufferAllocatorBase::Free(data, get_actual_length(length), mode);
+ }
+ void* Reserve(size_t length) override {
+ return ArrayBufferAllocatorBase::Reserve(get_actual_length(length));
+ }
};
@@ -393,6 +433,20 @@ class PerIsolateData {
int RealmFind(Local<Context> context);
};
+class ExternalOwningOneByteStringResource
+ : public String::ExternalOneByteStringResource {
+ public:
+ ExternalOwningOneByteStringResource() : length_(0) {}
+ ExternalOwningOneByteStringResource(std::unique_ptr<const char[]> data,
+ size_t length)
+ : data_(std::move(data)), length_(length) {}
+ const char* data() const override { return data_.get(); }
+ size_t length() const override { return length_; }
+
+ private:
+ std::unique_ptr<const char[]> data_;
+ size_t length_;
+};
CounterMap* Shell::counter_map_;
base::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
@@ -406,6 +460,8 @@ base::LazyMutex Shell::workers_mutex_;
bool Shell::allow_new_workers_ = true;
i::List<Worker*> Shell::workers_;
std::vector<ExternalizedContents> Shell::externalized_contents_;
+base::LazyMutex Shell::isolate_status_lock_;
+std::map<v8::Isolate*, bool> Shell::isolate_status_;
Global<Context> Shell::evaluation_context_;
ArrayBuffer::Allocator* Shell::array_buffer_allocator;
@@ -642,8 +698,6 @@ class ModuleEmbedderData {
};
enum {
- // The debugger reserves the first slot in the Context embedder data.
- kDebugIdIndex = Context::kDebugIdIndex,
kModuleEmbedderDataIndex,
kInspectorClientIndex
};
@@ -851,8 +905,10 @@ PerIsolateData::RealmScope::RealmScope(PerIsolateData* data) : data_(data) {
PerIsolateData::RealmScope::~RealmScope() {
- // Drop realms to avoid keeping them alive.
- for (int i = 0; i < data_->realm_count_; ++i) {
+ // Drop realms to avoid keeping them alive. We don't dispose the
+ // module embedder data for the first realm here, but instead do
+ // it in RunShell or in RunMain, if not running in interactive mode
+ for (int i = 1; i < data_->realm_count_; ++i) {
Global<Context>& realm = data_->realms_[i];
if (realm.IsEmpty()) continue;
DisposeModuleEmbedderData(realm.Get(data_->isolate_));
@@ -1004,6 +1060,11 @@ void Shell::RealmNavigate(const v8::FunctionCallbackInfo<v8::Value>& args) {
PerIsolateData* data = PerIsolateData::Get(isolate);
int index = data->RealmIndexOrThrow(args, 0);
if (index == -1) return;
+ if (index == 0 || index == data->realm_current_ ||
+ index == data->realm_switch_) {
+ Throw(args.GetIsolate(), "Invalid realm index");
+ return;
+ }
Local<Context> context = Local<Context>::New(isolate, data->realms_[index]);
v8::MaybeLocal<Value> global_object = context->Global();
@@ -1055,12 +1116,16 @@ void Shell::RealmEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
Local<Context> realm = Local<Context>::New(isolate, data->realms_[index]);
realm->Enter();
+ int previous_index = data->realm_current_;
+ data->realm_current_ = data->realm_switch_ = index;
Local<Value> result;
if (!script->BindToCurrentContext()->Run(realm).ToLocal(&result)) {
realm->Exit();
+ data->realm_current_ = data->realm_switch_ = previous_index;
return;
}
realm->Exit();
+ data->realm_current_ = data->realm_switch_ = previous_index;
args.GetReturnValue().Set(result);
}
@@ -1320,6 +1385,15 @@ void Shell::Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
const_cast<v8::FunctionCallbackInfo<v8::Value>*>(&args));
}
+// Note that both WaitUntilDone and NotifyDone are no-op when
+// --verify-predictable. See comment in Shell::EnsureEventLoopInitialized.
+void Shell::WaitUntilDone(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ SetWaitUntilDone(args.GetIsolate(), true);
+}
+
+void Shell::NotifyDone(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ SetWaitUntilDone(args.GetIsolate(), false);
+}
void Shell::Version(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(
@@ -1557,6 +1631,19 @@ Local<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
.ToLocalChecked(),
FunctionTemplate::New(isolate, Quit));
}
+ Local<ObjectTemplate> test_template = ObjectTemplate::New(isolate);
+ global_template->Set(
+ String::NewFromUtf8(isolate, "testRunner", NewStringType::kNormal)
+ .ToLocalChecked(),
+ test_template);
+ test_template->Set(
+ String::NewFromUtf8(isolate, "notifyDone", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, NotifyDone));
+ test_template->Set(
+ String::NewFromUtf8(isolate, "waitUntilDone", NewStringType::kNormal)
+ .ToLocalChecked(),
+ FunctionTemplate::New(isolate, WaitUntilDone));
global_template->Set(
String::NewFromUtf8(isolate, "version", NewStringType::kNormal)
.ToLocalChecked(),
@@ -1985,16 +2072,22 @@ void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
args.GetReturnValue().Set(buffer);
}
-
// Reads a file into a v8 string.
Local<String> Shell::ReadFile(Isolate* isolate, const char* name) {
int size = 0;
char* chars = ReadChars(name, &size);
if (chars == NULL) return Local<String>();
- Local<String> result =
- String::NewFromUtf8(isolate, chars, NewStringType::kNormal, size)
- .ToLocalChecked();
- delete[] chars;
+ Local<String> result;
+ if (i::FLAG_use_external_strings && internal::String::IsAscii(chars, size)) {
+ String::ExternalOneByteStringResource* resource =
+ new ExternalOwningOneByteStringResource(
+ std::unique_ptr<const char[]>(chars), size);
+ result = String::NewExternalOneByte(isolate, resource).ToLocalChecked();
+ } else {
+ result = String::NewFromUtf8(isolate, chars, NewStringType::kNormal, size)
+ .ToLocalChecked();
+ delete[] chars;
+ }
return result;
}
@@ -2017,6 +2110,9 @@ void Shell::RunShell(Isolate* isolate) {
ExecuteString(isolate, input, name, true, true);
}
printf("\n");
+ // We need to explicitly clean up the module embedder data for
+ // the interative shell context.
+ DisposeModuleEmbedderData(context);
}
class InspectorFrontend final : public v8_inspector::V8Inspector::Channel {
@@ -2213,16 +2309,8 @@ void SourceGroup::Execute(Isolate* isolate) {
}
}
-
Local<String> SourceGroup::ReadFile(Isolate* isolate, const char* name) {
- int size;
- char* chars = ReadChars(name, &size);
- if (chars == NULL) return Local<String>();
- Local<String> result =
- String::NewFromUtf8(isolate, chars, NewStringType::kNormal, size)
- .ToLocalChecked();
- delete[] chars;
- return result;
+ return Shell::ReadFile(isolate, name);
}
@@ -2240,6 +2328,10 @@ void SourceGroup::ExecuteInThread() {
create_params.host_import_module_dynamically_callback_ =
Shell::HostImportModuleDynamically;
Isolate* isolate = Isolate::New(create_params);
+
+ Shell::EnsureEventLoopInitialized(isolate);
+ D8Console console(isolate);
+ debug::SetConsoleDelegate(isolate, &console);
for (int i = 0; i < Shell::options.stress_runs; ++i) {
next_semaphore_.Wait();
{
@@ -2258,6 +2350,7 @@ void SourceGroup::ExecuteInThread() {
DisposeModuleEmbedderData(context);
}
Shell::CollectGarbage(isolate);
+ Shell::CompleteMessageLoop(isolate);
}
done_semaphore_.Signal();
}
@@ -2380,6 +2473,8 @@ void Worker::ExecuteInThread() {
create_params.host_import_module_dynamically_callback_ =
Shell::HostImportModuleDynamically;
Isolate* isolate = Isolate::New(create_params);
+ D8Console console(isolate);
+ debug::SetConsoleDelegate(isolate, &console);
{
Isolate::Scope iscope(isolate);
{
@@ -2532,9 +2627,6 @@ bool Shell::SetOptions(int argc, char* argv[]) {
continue;
} else if (strcmp(argv[i], "--isolate") == 0) {
options.num_isolates++;
- } else if (strcmp(argv[i], "--dump-heap-constants") == 0) {
- options.dump_heap_constants = true;
- argv[i] = NULL;
} else if (strcmp(argv[i], "--throws") == 0) {
options.expected_to_throw = true;
argv[i] = NULL;
@@ -2619,12 +2711,14 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
options.isolate_sources[i].StartExecuteInThread();
}
{
+ EnsureEventLoopInitialized(isolate);
if (options.lcov_file) {
debug::Coverage::SelectMode(isolate, debug::Coverage::kPreciseCount);
}
HandleScope scope(isolate);
Local<Context> context = CreateEvaluationContext(isolate);
- if (last_run && options.use_interactive_shell()) {
+ bool use_existing_context = last_run && options.use_interactive_shell();
+ if (use_existing_context) {
// Keep using the same context in the interactive shell.
evaluation_context_.Reset(isolate, context);
}
@@ -2634,10 +2728,13 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[], bool last_run) {
PerIsolateData::RealmScope realm_scope(PerIsolateData::Get(isolate));
options.isolate_sources[0].Execute(isolate);
}
- DisposeModuleEmbedderData(context);
+ if (!use_existing_context) {
+ DisposeModuleEmbedderData(context);
+ }
WriteLcovData(isolate, options.lcov_file);
}
CollectGarbage(isolate);
+ CompleteMessageLoop(isolate);
for (int i = 1; i < options.num_isolates; ++i) {
if (last_run) {
options.isolate_sources[i].JoinThread();
@@ -2665,24 +2762,55 @@ void Shell::CollectGarbage(Isolate* isolate) {
}
}
+void Shell::EnsureEventLoopInitialized(Isolate* isolate) {
+ // When using PredictablePlatform (i.e. FLAG_verify_predictable),
+ // we don't need event loop support, because tasks are completed
+ // immediately - both background and foreground ones.
+ if (!i::FLAG_verify_predictable) {
+ v8::platform::EnsureEventLoopInitialized(g_platform, isolate);
+ SetWaitUntilDone(isolate, false);
+ }
+}
+
+void Shell::SetWaitUntilDone(Isolate* isolate, bool value) {
+ base::LockGuard<base::Mutex> guard(isolate_status_lock_.Pointer());
+ if (isolate_status_.count(isolate) == 0) {
+ isolate_status_.insert(std::make_pair(isolate, value));
+ } else {
+ isolate_status_[isolate] = value;
+ }
+}
+
+bool Shell::IsWaitUntilDone(Isolate* isolate) {
+ base::LockGuard<base::Mutex> guard(isolate_status_lock_.Pointer());
+ DCHECK_GT(isolate_status_.count(isolate), 0);
+ return isolate_status_[isolate];
+}
+
+void Shell::CompleteMessageLoop(Isolate* isolate) {
+ // See comment in EnsureEventLoopInitialized.
+ if (i::FLAG_verify_predictable) return;
+ while (v8::platform::PumpMessageLoop(
+ g_platform, isolate,
+ Shell::IsWaitUntilDone(isolate)
+ ? platform::MessageLoopBehavior::kWaitForWork
+ : platform::MessageLoopBehavior::kDoNotWait)) {
+ isolate->RunMicrotasks();
+ }
+ v8::platform::RunIdleTasks(g_platform, isolate,
+ 50.0 / base::Time::kMillisecondsPerSecond);
+}
+
void Shell::EmptyMessageQueues(Isolate* isolate) {
if (i::FLAG_verify_predictable) return;
- while (true) {
- // Pump the message loop until it is empty.
- while (v8::platform::PumpMessageLoop(g_platform, isolate)) {
- isolate->RunMicrotasks();
- }
- // Run the idle tasks.
- v8::platform::RunIdleTasks(g_platform, isolate,
- 50.0 / base::Time::kMillisecondsPerSecond);
- // If there are still outstanding waiters, sleep a little (to wait for
- // background tasks) and then try everything again.
- if (reinterpret_cast<i::Isolate*>(isolate)->GetWaitCountForTesting() > 0) {
- base::OS::Sleep(base::TimeDelta::FromMilliseconds(1));
- } else {
- break;
- }
+ // Pump the message loop until it is empty.
+ while (v8::platform::PumpMessageLoop(
+ g_platform, isolate, platform::MessageLoopBehavior::kDoNotWait)) {
+ isolate->RunMicrotasks();
}
+ // Run the idle tasks.
+ v8::platform::RunIdleTasks(g_platform, isolate,
+ 50.0 / base::Time::kMillisecondsPerSecond);
}
class Serializer : public ValueSerializer::Delegate {
@@ -2915,73 +3043,6 @@ void Shell::CleanupWorkers() {
externalized_contents_.clear();
}
-
-static void DumpHeapConstants(i::Isolate* isolate) {
- i::Heap* heap = isolate->heap();
- printf(
- "# Copyright 2017 the V8 project authors. All rights reserved.\n"
- "# Use of this source code is governed by a BSD-style license that can\n"
- "# be found in the LICENSE file.\n\n");
- // Dump the INSTANCE_TYPES table to the console.
- printf("# List of known V8 instance types.\n");
-#define DUMP_TYPE(T) printf(" %d: \"%s\",\n", i::T, #T);
- printf("INSTANCE_TYPES = {\n");
- INSTANCE_TYPE_LIST(DUMP_TYPE)
- printf("}\n");
-#undef DUMP_TYPE
-
- // Dump the KNOWN_MAP table to the console.
- printf("\n# List of known V8 maps.\n");
-#define ROOT_LIST_CASE(type, name, camel_name) \
- if (n == NULL && o == heap->name()) n = #camel_name;
-#define STRUCT_LIST_CASE(upper_name, camel_name, name) \
- if (n == NULL && o == heap->name##_map()) n = #camel_name "Map";
- i::HeapObjectIterator it(heap->map_space());
- printf("KNOWN_MAPS = {\n");
- for (i::Object* o = it.Next(); o != NULL; o = it.Next()) {
- i::Map* m = i::Map::cast(o);
- const char* n = NULL;
- intptr_t p = reinterpret_cast<intptr_t>(m) & 0x7ffff;
- int t = m->instance_type();
- ROOT_LIST(ROOT_LIST_CASE)
- STRUCT_LIST(STRUCT_LIST_CASE)
- if (n == NULL) continue;
- printf(" 0x%05" V8PRIxPTR ": (%d, \"%s\"),\n", p, t, n);
- }
- printf("}\n");
-#undef STRUCT_LIST_CASE
-#undef ROOT_LIST_CASE
-
- // Dump the KNOWN_OBJECTS table to the console.
- printf("\n# List of known V8 objects.\n");
-#define ROOT_LIST_CASE(type, name, camel_name) \
- if (n == NULL && o == heap->name()) n = #camel_name;
- i::OldSpaces spit(heap);
- printf("KNOWN_OBJECTS = {\n");
- for (i::PagedSpace* s = spit.next(); s != NULL; s = spit.next()) {
- i::HeapObjectIterator it(s);
- const char* sname = AllocationSpaceName(s->identity());
- for (i::Object* o = it.Next(); o != NULL; o = it.Next()) {
- const char* n = NULL;
- intptr_t p = reinterpret_cast<intptr_t>(o) & 0x7ffff;
- ROOT_LIST(ROOT_LIST_CASE)
- if (n == NULL) continue;
- printf(" (\"%s\", 0x%05" V8PRIxPTR "): \"%s\",\n", sname, p, n);
- }
- }
- printf("}\n");
-#undef ROOT_LIST_CASE
-
- // Dump frame markers
- printf("\n# List of known V8 Frame Markers.\n");
-#define DUMP_MARKER(T, class) printf(" \"%s\",\n", #T);
- printf("FRAME_MARKERS = (\n");
- STACK_FRAME_TYPE_LIST(DUMP_MARKER)
- printf(")\n");
-#undef DUMP_TYPE
-}
-
-
int Shell::Main(int argc, char* argv[]) {
std::ofstream trace_file;
#if (defined(_WIN32) || defined(_WIN64))
@@ -3073,10 +3134,12 @@ int Shell::Main(int argc, char* argv[]) {
}
Isolate* isolate = Isolate::New(create_params);
+ D8Console console(isolate);
{
Isolate::Scope scope(isolate);
Initialize(isolate);
PerIsolateData data(isolate);
+ debug::SetConsoleDelegate(isolate, &console);
if (options.trace_enabled) {
platform::tracing::TraceConfig* trace_config;
@@ -3093,11 +3156,6 @@ int Shell::Main(int argc, char* argv[]) {
tracing_controller->StartTracing(trace_config);
}
- if (options.dump_heap_constants) {
- DumpHeapConstants(reinterpret_cast<i::Isolate*>(isolate));
- return 0;
- }
-
if (options.stress_opt || options.stress_deopt) {
Testing::SetStressRunType(options.stress_opt
? Testing::kStressTypeOpt
diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp
index f6ceeaa78b..e6a40212cd 100644
--- a/deps/v8/src/d8.gyp
+++ b/deps/v8/src/d8.gyp
@@ -50,6 +50,8 @@
'sources': [
'd8.h',
'd8.cc',
+ 'd8-console.h',
+ 'd8-console.cc',
'<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
],
'conditions': [
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index d885817f7e..baa3a0bf6f 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -6,6 +6,7 @@
#define V8_D8_H_
#include <iterator>
+#include <map>
#include <memory>
#include <string>
#include <vector>
@@ -292,7 +293,6 @@ class ShellOptions {
stress_runs(1),
interactive_shell(false),
test_shell(false),
- dump_heap_constants(false),
expected_to_throw(false),
mock_arraybuffer_allocator(false),
enable_inspector(false),
@@ -324,7 +324,6 @@ class ShellOptions {
int stress_runs;
bool interactive_shell;
bool test_shell;
- bool dump_heap_constants;
bool expected_to_throw;
bool mock_arraybuffer_allocator;
bool enable_inspector;
@@ -358,6 +357,8 @@ class Shell : public i::AllStatic {
static void OnExit(Isolate* isolate);
static void CollectGarbage(Isolate* isolate);
static void EmptyMessageQueues(Isolate* isolate);
+ static void EnsureEventLoopInitialized(Isolate* isolate);
+ static void CompleteMessageLoop(Isolate* isolate);
static std::unique_ptr<SerializationData> SerializeValue(
Isolate* isolate, Local<Value> value, Local<Value> transfer);
@@ -393,6 +394,8 @@ class Shell : public i::AllStatic {
static void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
static void PrintErr(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Write(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void WaitUntilDone(const v8::FunctionCallbackInfo<v8::Value>& args);
+ static void NotifyDone(const v8::FunctionCallbackInfo<v8::Value>& args);
static void QuitOnce(v8::FunctionCallbackInfo<v8::Value>* args);
static void Quit(const v8::FunctionCallbackInfo<v8::Value>& args);
static void Version(const v8::FunctionCallbackInfo<v8::Value>& args);
@@ -457,6 +460,9 @@ class Shell : public i::AllStatic {
static ShellOptions options;
static ArrayBuffer::Allocator* array_buffer_allocator;
+ static void SetWaitUntilDone(Isolate* isolate, bool value);
+ static bool IsWaitUntilDone(Isolate* isolate);
+
private:
static Global<Context> evaluation_context_;
static base::OnceType quit_once_;
@@ -491,6 +497,10 @@ class Shell : public i::AllStatic {
int index);
static MaybeLocal<Module> FetchModuleTree(v8::Local<v8::Context> context,
const std::string& file_name);
+ // We may have multiple isolates running concurrently, so the access to
+ // the isolate_status_ needs to be concurrency-safe.
+ static base::LazyMutex isolate_status_lock_;
+ static std::map<Isolate*, bool> isolate_status_;
};
diff --git a/deps/v8/src/date.cc b/deps/v8/src/date.cc
index 41ec17c25a..ff420f5e4f 100644
--- a/deps/v8/src/date.cc
+++ b/deps/v8/src/date.cc
@@ -7,8 +7,8 @@
#include "src/objects.h"
#include "src/objects-inl.h"
-#ifdef V8_I18N_SUPPORT
-#include "src/i18n.h"
+#ifdef V8_INTL_SUPPORT
+#include "src/intl.h"
#endif
namespace v8 {
@@ -28,7 +28,7 @@ static const char kDaysInMonths[] =
DateCache::DateCache()
: stamp_(0),
tz_cache_(
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
FLAG_icu_timezone_data ? new ICUTimezoneCache()
: base::OS::CreateTimezoneCache()
#else
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index 8ac0a4c11d..15a007ac89 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -260,58 +260,87 @@ namespace {
bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
// Use macro to include both inlined and non-inlined version of an intrinsic.
-#define INTRINSIC_WHITELIST(V) \
- /* Conversions */ \
- V(ToInteger) \
- V(ToObject) \
- V(ToString) \
- V(ToLength) \
- V(ToNumber) \
- /* Type checks */ \
- V(IsJSReceiver) \
- V(IsSmi) \
- V(IsArray) \
- V(IsFunction) \
- V(IsDate) \
- V(IsJSProxy) \
- V(IsRegExp) \
- V(IsTypedArray) \
- V(ClassOf) \
- /* Loads */ \
- V(LoadLookupSlotForCall) \
- /* Arrays */ \
- V(ArraySpeciesConstructor) \
- V(NormalizeElements) \
- V(GetArrayKeys) \
- V(HasComplexElements) \
- V(EstimateNumberOfElements) \
- /* Errors */ \
- V(ReThrow) \
- V(ThrowReferenceError) \
- V(ThrowSymbolIteratorInvalid) \
- V(ThrowIteratorResultNotAnObject) \
- V(NewTypeError) \
- /* Strings */ \
- V(StringCharCodeAt) \
- V(StringIndexOf) \
- V(StringReplaceOneCharWithString) \
- V(SubString) \
- V(RegExpInternalReplace) \
- /* Literals */ \
- V(CreateArrayLiteral) \
- V(CreateObjectLiteral) \
- V(CreateRegExpLiteral) \
- /* Collections */ \
- V(JSCollectionGetTable) \
- V(FixedArrayGet) \
- V(StringGetRawHashField) \
- V(GenericHash) \
- V(MapIteratorInitialize) \
- V(MapInitialize) \
- /* Misc. */ \
- V(ForInPrepare) \
- V(Call) \
- V(MaxSmi) \
+#define INTRINSIC_WHITELIST(V) \
+ /* Conversions */ \
+ V(ToInteger) \
+ V(ToObject) \
+ V(ToString) \
+ V(ToLength) \
+ V(ToNumber) \
+ /* Type checks */ \
+ V(IsJSReceiver) \
+ V(IsSmi) \
+ V(IsArray) \
+ V(IsFunction) \
+ V(IsDate) \
+ V(IsJSProxy) \
+ V(IsJSMap) \
+ V(IsJSSet) \
+ V(IsJSMapIterator) \
+ V(IsJSSetIterator) \
+ V(IsJSWeakMap) \
+ V(IsJSWeakSet) \
+ V(IsRegExp) \
+ V(IsTypedArray) \
+ V(ClassOf) \
+ /* Loads */ \
+ V(LoadLookupSlotForCall) \
+ /* Arrays */ \
+ V(ArraySpeciesConstructor) \
+ V(NormalizeElements) \
+ V(GetArrayKeys) \
+ V(HasComplexElements) \
+ V(EstimateNumberOfElements) \
+ /* Errors */ \
+ V(ReThrow) \
+ V(ThrowReferenceError) \
+ V(ThrowSymbolIteratorInvalid) \
+ V(ThrowIteratorResultNotAnObject) \
+ V(NewTypeError) \
+ /* Strings */ \
+ V(StringCharCodeAt) \
+ V(StringIndexOf) \
+ V(StringReplaceOneCharWithString) \
+ V(SubString) \
+ V(RegExpInternalReplace) \
+ /* Literals */ \
+ V(CreateArrayLiteral) \
+ V(CreateObjectLiteral) \
+ V(CreateRegExpLiteral) \
+ /* Collections */ \
+ V(JSCollectionGetTable) \
+ V(FixedArrayGet) \
+ V(StringGetRawHashField) \
+ V(GenericHash) \
+ V(MapIteratorInitialize) \
+ V(MapInitialize) \
+ /* Called from builtins */ \
+ V(StringParseFloat) \
+ V(StringParseInt) \
+ V(StringCharCodeAtRT) \
+ V(StringIndexOfUnchecked) \
+ V(StringEqual) \
+ V(SymbolDescriptiveString) \
+ V(GenerateRandomNumbers) \
+ V(ExternalStringGetChar) \
+ V(GlobalPrint) \
+ V(AllocateInNewSpace) \
+ V(AllocateSeqOneByteString) \
+ V(AllocateSeqTwoByteString) \
+ V(ObjectCreate) \
+ V(ObjectHasOwnProperty) \
+ V(ArrayIndexOf) \
+ V(ArrayIncludes_Slow) \
+ V(ArrayIsArray) \
+ V(ThrowTypeError) \
+ V(ThrowCalledOnNullOrUndefined) \
+ V(ThrowIncompatibleMethodReceiver) \
+ V(ThrowInvalidHint) \
+ V(ThrowNotDateError) \
+ /* Misc. */ \
+ V(ForInPrepare) \
+ V(Call) \
+ V(MaxSmi) \
V(HasInPrototypeChain)
#define CASE(Name) \
@@ -556,6 +585,7 @@ bool BuiltinHasNoSideEffect(Builtins::Name id) {
case Builtins::kStringPrototypeIncludes:
case Builtins::kStringPrototypeIndexOf:
case Builtins::kStringPrototypeLastIndexOf:
+ case Builtins::kStringPrototypeSlice:
case Builtins::kStringPrototypeStartsWith:
case Builtins::kStringPrototypeSubstr:
case Builtins::kStringPrototypeSubstring:
@@ -649,6 +679,23 @@ bool DebugEvaluate::FunctionHasNoSideEffect(Handle<SharedFunctionInfo> info) {
int builtin_index = info->code()->builtin_index();
if (builtin_index >= 0 && builtin_index < Builtins::builtin_count &&
BuiltinHasNoSideEffect(static_cast<Builtins::Name>(builtin_index))) {
+#ifdef DEBUG
+ // TODO(yangguo): Check builtin-to-builtin calls too.
+ int mode = RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE);
+ bool failed = false;
+ for (RelocIterator it(info->code(), mode); !it.done(); it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ Address address = rinfo->target_external_reference();
+ const Runtime::Function* function = Runtime::FunctionForEntry(address);
+ if (function == nullptr) continue;
+ if (!IntrinsicHasNoSideEffect(function->function_id)) {
+ PrintF("Whitelisted builtin %s calls non-whitelisted intrinsic %s\n",
+ Builtins::name(builtin_index), function->name);
+ failed = true;
+ }
+ CHECK(!failed);
+ }
+#endif // DEBUG
return true;
}
}
diff --git a/deps/v8/src/debug/debug-evaluate.h b/deps/v8/src/debug/debug-evaluate.h
index 5f5b51e329..ba6ca7e439 100644
--- a/deps/v8/src/debug/debug-evaluate.h
+++ b/deps/v8/src/debug/debug-evaluate.h
@@ -7,6 +7,7 @@
#include "src/frames.h"
#include "src/objects.h"
+#include "src/objects/string-table.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 443e07497d..ffe34c112e 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -25,6 +25,9 @@ class Script;
namespace debug {
+void SetContextId(Local<Context> context, int id);
+int GetContextId(Local<Context> context);
+
/**
* Debugger is running in its own context which is entered while debugger
* messages are being dispatched. This is an explicit getter for this
@@ -126,6 +129,7 @@ class V8_EXPORT_PRIVATE Script {
ScriptOriginOptions OriginOptions() const;
bool WasCompiled() const;
+ bool IsEmbedded() const;
int Id() const;
int LineOffset() const;
int ColumnOffset() const;
@@ -133,7 +137,7 @@ class V8_EXPORT_PRIVATE Script {
MaybeLocal<String> Name() const;
MaybeLocal<String> SourceURL() const;
MaybeLocal<String> SourceMappingURL() const;
- MaybeLocal<Value> ContextData() const;
+ Maybe<int> ContextId() const;
MaybeLocal<String> Source() const;
bool IsWasm() const;
bool IsModule() const;
@@ -166,8 +170,7 @@ MaybeLocal<UnboundScript> CompileInspectorScript(Isolate* isolate,
class DebugDelegate {
public:
virtual ~DebugDelegate() {}
- virtual void PromiseEventOccurred(v8::Local<v8::Context> context,
- debug::PromiseDebugActionType type, int id,
+ virtual void PromiseEventOccurred(debug::PromiseDebugActionType type, int id,
int parent_id, bool created_by_user) {}
virtual void ScriptCompiled(v8::Local<Script> script,
bool has_compile_error) {}
@@ -207,6 +210,11 @@ enum Builtin {
Local<Function> GetBuiltin(Isolate* isolate, Builtin builtin);
+V8_EXPORT_PRIVATE void SetConsoleDelegate(Isolate* isolate,
+ ConsoleDelegate* delegate);
+
+int GetStackFrameId(v8::Local<v8::StackFrame> frame);
+
/**
* Native wrapper around v8::internal::JSGeneratorObject object.
*/
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 73af4cf7f7..41c22c9ea5 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -186,7 +186,7 @@ CodeBreakIterator::CodeBreakIterator(Handle<DebugInfo> debug_info)
: BreakIterator(debug_info),
reloc_iterator_(debug_info->DebugCode(), GetModeMask()),
source_position_iterator_(
- debug_info->DebugCode()->source_position_table()) {
+ debug_info->DebugCode()->SourcePositionTable()) {
// There is at least one break location.
DCHECK(!Done());
Next();
@@ -277,7 +277,7 @@ BytecodeArrayBreakIterator::BytecodeArrayBreakIterator(
Handle<DebugInfo> debug_info)
: BreakIterator(debug_info),
source_position_iterator_(
- debug_info->DebugBytecodeArray()->source_position_table()) {
+ debug_info->DebugBytecodeArray()->SourcePositionTable()) {
// There is at least one break location.
DCHECK(!Done());
Next();
@@ -417,10 +417,10 @@ char* Debug::RestoreDebug(char* storage) {
int Debug::ArchiveSpacePerThread() { return 0; }
-void Debug::Iterate(ObjectVisitor* v) {
- v->VisitPointer(&thread_local_.return_value_);
- v->VisitPointer(&thread_local_.suspended_generator_);
- v->VisitPointer(&thread_local_.ignore_step_into_function_);
+void Debug::Iterate(RootVisitor* v) {
+ v->VisitRootPointer(Root::kDebug, &thread_local_.return_value_);
+ v->VisitRootPointer(Root::kDebug, &thread_local_.suspended_generator_);
+ v->VisitRootPointer(Root::kDebug, &thread_local_.ignore_step_into_function_);
}
DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
@@ -1301,19 +1301,7 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
OptimizingCompileDispatcher::BlockingBehavior::kBlock);
}
- List<Handle<JSFunction> > functions;
-
- // Flush all optimized code maps. Note that the below heap iteration does not
- // cover this, because the given function might have been inlined into code
- // for which no JSFunction exists.
- {
- SharedFunctionInfo::GlobalIterator iterator(isolate_);
- while (SharedFunctionInfo* shared = iterator.Next()) {
- shared->ClearCodeFromOptimizedCodeMap();
- }
- }
-
- // The native context also has a list of OSR'd optimized code. Clear it.
+ // The native context has a list of OSR'd optimized code. Clear it.
isolate_->ClearOSROptimizedCode();
// Make sure we abort incremental marking.
@@ -1323,6 +1311,7 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
DCHECK(shared->is_compiled());
bool baseline_exists = shared->HasBaselineCode();
+ List<Handle<JSFunction>> functions;
{
// TODO(yangguo): with bytecode, we still walk the heap to find all
// optimized code for the function to deoptimize. We can probably be
@@ -1334,6 +1323,9 @@ bool Debug::PrepareFunctionForBreakPoints(Handle<SharedFunctionInfo> shared) {
if (obj->IsJSFunction()) {
JSFunction* function = JSFunction::cast(obj);
if (!function->Inlines(*shared)) continue;
+ if (function->has_feedback_vector()) {
+ function->ClearOptimizedCodeSlot("Prepare for breakpoints");
+ }
if (function->code()->kind() == Code::OPTIMIZED_FUNCTION) {
Deoptimizer::DeoptimizeFunction(function);
}
@@ -1888,32 +1880,6 @@ void Debug::OnAfterCompile(Handle<Script> script) {
}
namespace {
-struct CollectedCallbackData {
- Object** location;
- int id;
- Debug* debug;
- Isolate* isolate;
-
- CollectedCallbackData(Object** location, int id, Debug* debug,
- Isolate* isolate)
- : location(location), id(id), debug(debug), isolate(isolate) {}
-};
-
-void SendAsyncTaskEventCancel(const v8::WeakCallbackInfo<void>& info) {
- std::unique_ptr<CollectedCallbackData> data(
- reinterpret_cast<CollectedCallbackData*>(info.GetParameter()));
- if (!data->debug->is_active()) return;
- HandleScope scope(data->isolate);
- data->debug->OnAsyncTaskEvent(debug::kDebugPromiseCollected, data->id, 0);
-}
-
-void ResetPromiseHandle(const v8::WeakCallbackInfo<void>& info) {
- CollectedCallbackData* data =
- reinterpret_cast<CollectedCallbackData*>(info.GetParameter());
- GlobalHandles::Destroy(data->location);
- info.SetSecondPassCallback(&SendAsyncTaskEventCancel);
-}
-
// In an async function, reuse the existing stack related to the outer
// Promise. Otherwise, e.g. in a direct call to then, save a new stack.
// Promises with multiple reactions with one or more of them being async
@@ -1982,19 +1948,6 @@ int Debug::NextAsyncTaskId(Handle<JSObject> promise) {
handle(Smi::FromInt(++thread_local_.async_task_count_), isolate_);
Object::SetProperty(&it, async_id, SLOPPY, Object::MAY_BE_STORE_FROM_KEYED)
.ToChecked();
- Handle<Object> global_handle = isolate_->global_handles()->Create(*promise);
- // We send EnqueueRecurring async task event when promise is fulfilled or
- // rejected, WillHandle and DidHandle for every scheduled microtask for this
- // promise.
- // We need to send a cancel event when no other microtasks can be
- // started for this promise and all current microtasks are finished.
- // Since we holding promise when at least one microtask is scheduled (inside
- // PromiseReactionJobInfo), we can send cancel event in weak callback.
- GlobalHandles::MakeWeak(
- global_handle.location(),
- new CollectedCallbackData(global_handle.location(), async_id->value(),
- this, isolate_),
- &ResetPromiseHandle, v8::WeakCallbackType::kParameter);
return async_id->value();
}
@@ -2002,7 +1955,13 @@ namespace {
debug::Location GetDebugLocation(Handle<Script> script, int source_position) {
Script::PositionInfo info;
Script::GetPositionInfo(script, source_position, &info, Script::WITH_OFFSET);
- return debug::Location(info.line, info.column);
+ // V8 provides ScriptCompiler::CompileFunctionInContext method which takes
+ // expression and compile it as anonymous function like (function() ..
+ // expression ..). To produce correct locations for stmts inside of this
+ // expression V8 compile this function with negative offset. Instead of stmt
+ // position blackboxing use function start position which is negative in
+ // described case.
+ return debug::Location(std::max(info.line, 0), std::max(info.column, 0));
}
} // namespace
@@ -2044,9 +2003,6 @@ void Debug::OnAsyncTaskEvent(debug::PromiseDebugActionType type, int id,
if (in_debug_scope() || ignore_events()) return;
if (!debug_delegate_) return;
SuppressDebug while_processing(this);
- DebugScope debug_scope(isolate_->debug());
- if (debug_scope.failed()) return;
- HandleScope scope(isolate_);
PostponeInterruptsScope no_interrupts(isolate_);
DisableBreak no_recursive_break(this);
bool created_by_user = false;
@@ -2058,16 +2014,13 @@ void Debug::OnAsyncTaskEvent(debug::PromiseDebugActionType type, int id,
!it.done() &&
!IsFrameBlackboxed(it.frame());
}
- debug_delegate_->PromiseEventOccurred(
- Utils::ToLocal(debug_scope.GetContext()), type, id, parent_id,
- created_by_user);
+ debug_delegate_->PromiseEventOccurred(type, id, parent_id, created_by_user);
}
void Debug::ProcessCompileEvent(v8::DebugEvent event, Handle<Script> script) {
// Attach the correct debug id to the script. The debug id is used by the
// inspector to filter scripts by native context.
- FixedArray* array = isolate_->native_context()->embedder_data();
- script->set_context_data(array->get(v8::Context::kDebugIdIndex));
+ script->set_context_data(isolate_->native_context()->debug_context_id());
if (ignore_events()) return;
if (!script->IsUserJavaScript() && script->type() != i::Script::TYPE_WASM) {
return;
@@ -2188,9 +2141,9 @@ void Debug::HandleDebugBreak(IgnoreBreakMode ignore_break_mode) {
Object* fun = it.frame()->function();
if (fun && fun->IsJSFunction()) {
HandleScope scope(isolate_);
+ Handle<JSFunction> function(JSFunction::cast(fun), isolate_);
// Don't stop in builtin and blackboxed functions.
- Handle<SharedFunctionInfo> shared(JSFunction::cast(fun)->shared(),
- isolate_);
+ Handle<SharedFunctionInfo> shared(function->shared(), isolate_);
bool ignore_break = ignore_break_mode == kIgnoreIfTopFrameBlackboxed
? IsBlackboxed(shared)
: AllFramesOnStackAreBlackboxed();
@@ -2203,12 +2156,11 @@ void Debug::HandleDebugBreak(IgnoreBreakMode ignore_break_mode) {
// TODO(yangguo): introduce break_on_function_entry since current
// implementation is slow.
if (isolate_->stack_guard()->CheckDebugBreak()) {
- Deoptimizer::DeoptimizeFunction(JSFunction::cast(fun));
+ Deoptimizer::DeoptimizeFunction(*function);
}
return;
}
- JSGlobalObject* global =
- JSFunction::cast(fun)->context()->global_object();
+ JSGlobalObject* global = function->context()->global_object();
// Don't stop in debugger functions.
if (IsDebugGlobal(global)) return;
// Don't stop if the break location is muted.
@@ -2347,8 +2299,11 @@ bool Debug::PerformSideEffectCheckForCallback(Address function) {
}
void LegacyDebugDelegate::PromiseEventOccurred(
- v8::Local<v8::Context> context, v8::debug::PromiseDebugActionType type,
- int id, int parent_id, bool created_by_user) {
+ v8::debug::PromiseDebugActionType type, int id, int parent_id,
+ bool created_by_user) {
+ DebugScope debug_scope(isolate_->debug());
+ if (debug_scope.failed()) return;
+ HandleScope scope(isolate_);
Handle<Object> event_data;
if (isolate_->debug()->MakeAsyncTaskEvent(type, id).ToHandle(&event_data)) {
ProcessDebugEvent(v8::AsyncTaskEvent, Handle<JSObject>::cast(event_data));
diff --git a/deps/v8/src/debug/debug.h b/deps/v8/src/debug/debug.h
index f90f18966b..5dad8a8ceb 100644
--- a/deps/v8/src/debug/debug.h
+++ b/deps/v8/src/debug/debug.h
@@ -365,7 +365,7 @@ class Debug {
char* RestoreDebug(char* from);
static int ArchiveSpacePerThread();
void FreeThreadResources() { }
- void Iterate(ObjectVisitor* v);
+ void Iterate(RootVisitor* v);
bool CheckExecutionState(int id) {
return CheckExecutionState() && break_id() == id;
@@ -616,8 +616,7 @@ class Debug {
class LegacyDebugDelegate : public v8::debug::DebugDelegate {
public:
explicit LegacyDebugDelegate(Isolate* isolate) : isolate_(isolate) {}
- void PromiseEventOccurred(v8::Local<v8::Context> context,
- v8::debug::PromiseDebugActionType type, int id,
+ void PromiseEventOccurred(v8::debug::PromiseDebugActionType type, int id,
int parent_id, bool created_by_user) override;
void ScriptCompiled(v8::Local<v8::debug::Script> script,
bool has_compile_error) override;
diff --git a/deps/v8/src/debug/interface-types.h b/deps/v8/src/debug/interface-types.h
index 82eb2f2abb..265cd7559c 100644
--- a/deps/v8/src/debug/interface-types.h
+++ b/deps/v8/src/debug/interface-types.h
@@ -9,9 +9,15 @@
#include <string>
#include <vector>
+#include "include/v8.h"
#include "src/globals.h"
namespace v8 {
+
+namespace internal {
+class BuiltinArguments;
+} // internal
+
namespace debug {
/**
@@ -67,7 +73,6 @@ enum PromiseDebugActionType {
kDebugEnqueueAsyncFunction,
kDebugEnqueuePromiseResolve,
kDebugEnqueuePromiseReject,
- kDebugPromiseCollected,
kDebugWillHandle,
kDebugDidHandle,
};
@@ -90,6 +95,47 @@ class V8_EXPORT_PRIVATE BreakLocation : public Location {
BreakLocationType type_;
};
+class ConsoleCallArguments : private v8::FunctionCallbackInfo<v8::Value> {
+ public:
+ int Length() const { return v8::FunctionCallbackInfo<v8::Value>::Length(); }
+ V8_INLINE Local<Value> operator[](int i) const {
+ return v8::FunctionCallbackInfo<v8::Value>::operator[](i);
+ }
+
+ explicit ConsoleCallArguments(const v8::FunctionCallbackInfo<v8::Value>&);
+ explicit ConsoleCallArguments(internal::BuiltinArguments&);
+};
+
+// v8::FunctionCallbackInfo could be used for getting arguments only. Calling
+// of any other getter will produce a crash.
+class ConsoleDelegate {
+ public:
+ virtual void Debug(const ConsoleCallArguments& args) {}
+ virtual void Error(const ConsoleCallArguments& args) {}
+ virtual void Info(const ConsoleCallArguments& args) {}
+ virtual void Log(const ConsoleCallArguments& args) {}
+ virtual void Warn(const ConsoleCallArguments& args) {}
+ virtual void Dir(const ConsoleCallArguments& args) {}
+ virtual void DirXml(const ConsoleCallArguments& args) {}
+ virtual void Table(const ConsoleCallArguments& args) {}
+ virtual void Trace(const ConsoleCallArguments& args) {}
+ virtual void Group(const ConsoleCallArguments& args) {}
+ virtual void GroupCollapsed(const ConsoleCallArguments& args) {}
+ virtual void GroupEnd(const ConsoleCallArguments& args) {}
+ virtual void Clear(const ConsoleCallArguments& args) {}
+ virtual void Count(const ConsoleCallArguments& args) {}
+ virtual void Assert(const ConsoleCallArguments& args) {}
+ virtual void MarkTimeline(const ConsoleCallArguments& args) {}
+ virtual void Profile(const ConsoleCallArguments& args) {}
+ virtual void ProfileEnd(const ConsoleCallArguments& args) {}
+ virtual void Timeline(const ConsoleCallArguments& args) {}
+ virtual void TimelineEnd(const ConsoleCallArguments& args) {}
+ virtual void Time(const ConsoleCallArguments& args) {}
+ virtual void TimeEnd(const ConsoleCallArguments& args) {}
+ virtual void TimeStamp(const ConsoleCallArguments& args) {}
+ virtual ~ConsoleDelegate() = default;
+};
+
} // namespace debug
} // namespace v8
diff --git a/deps/v8/src/debug/mips/OWNERS b/deps/v8/src/debug/mips/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/src/debug/mips/OWNERS
+++ b/deps/v8/src/debug/mips/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/src/debug/mips/debug-mips.cc b/deps/v8/src/debug/mips/debug-mips.cc
index 5b809e6a40..177e803106 100644
--- a/deps/v8/src/debug/mips/debug-mips.cc
+++ b/deps/v8/src/debug/mips/debug-mips.cc
@@ -53,9 +53,17 @@ void DebugCodegen::PatchDebugBreakSlot(Isolate* isolate, Address pc,
// to a call to the debug break slot code.
// li t9, address (lui t9 / ori t9 instruction pair)
// call t9 (jalr t9 / nop instruction pair)
+
+ // Add a label for checking the size of the code used for returning.
+ Label check_codesize;
+ patcher.masm()->bind(&check_codesize);
patcher.masm()->li(v8::internal::t9,
Operand(reinterpret_cast<int32_t>(code->entry())));
patcher.masm()->Call(v8::internal::t9);
+
+ // Check that the size of the code generated is as expected.
+ DCHECK_EQ(Assembler::kDebugBreakSlotLength,
+ patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
}
bool DebugCodegen::DebugBreakSlotIsPatched(Address pc) {
diff --git a/deps/v8/src/debug/mips64/OWNERS b/deps/v8/src/debug/mips64/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/src/debug/mips64/OWNERS
+++ b/deps/v8/src/debug/mips64/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/src/debug/mips64/debug-mips64.cc b/deps/v8/src/debug/mips64/debug-mips64.cc
index b8dbbfb45e..fded965462 100644
--- a/deps/v8/src/debug/mips64/debug-mips64.cc
+++ b/deps/v8/src/debug/mips64/debug-mips64.cc
@@ -124,13 +124,13 @@ void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
// - Leave the frame.
// - Restart the frame by calling the function.
__ mov(fp, a1);
- __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// Pop return address and frame.
__ LeaveFrame(StackFrame::INTERNAL);
- __ ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ ld(a0,
+ __ Ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(a0,
FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(a2, a0);
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index e6ccdfeba0..d7c5006686 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -230,10 +230,15 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
class SelectedCodeUnlinker: public OptimizedFunctionVisitor {
public:
virtual void VisitFunction(JSFunction* function) {
+ // The code in the function's optimized code feedback vector slot might
+ // be different from the code on the function - evict it if necessary.
+ function->feedback_vector()->EvictOptimizedCodeMarkedForDeoptimization(
+ function->shared(), "unlinking code marked for deopt");
+
Code* code = function->code();
if (!code->marked_for_deoptimization()) return;
- // Unlink this function and evict from optimized code map.
+ // Unlink this function.
SharedFunctionInfo* shared = function->shared();
if (!code->deopt_already_counted()) {
shared->increment_deopt_count();
@@ -342,12 +347,12 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
#endif
// It is finally time to die, code object.
- // Remove the code from optimized code map.
+ // Remove the code from the osr optimized code cache.
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(codes[i]->deoptimization_data());
- SharedFunctionInfo* shared =
- SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
- shared->EvictFromOptimizedCodeMap(codes[i], "deoptimized code");
+ if (deopt_data->OsrAstId()->value() != BailoutId::None().ToInt()) {
+ isolate->EvictOSROptimizedCode(codes[i], "deoptimized code");
+ }
// Do platform-specific patching to force any activations to lazy deopt.
PatchCodeForDeoptimization(isolate, codes[i]);
@@ -1556,7 +1561,10 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
int input_index = 0;
Builtins* builtins = isolate_->builtins();
- Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
+ Code* construct_stub = builtins->builtin(
+ FLAG_harmony_restrict_constructor_return
+ ? Builtins::kJSConstructStubGenericRestrictedReturn
+ : Builtins::kJSConstructStubGenericUnrestrictedReturn);
BailoutId bailout_id = translated_frame->node_id();
unsigned height = translated_frame->height();
unsigned height_in_bytes = height * kPointerSize;
@@ -1662,18 +1670,22 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
PrintF(trace_scope_->file(), "(%d)\n", height - 1);
}
+ // The constructor function was mentioned explicitly in the
+ // CONSTRUCT_STUB_FRAME.
+ output_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function);
+ WriteValueToOutput(function, 0, frame_index, output_offset,
+ "constructor function ");
+
+ // The deopt info contains the implicit receiver or the new target at the
+ // position of the receiver. Copy it to the top of stack.
+ output_offset -= kPointerSize;
+ value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
+ output_frame->SetFrameSlot(output_offset, value);
if (bailout_id == BailoutId::ConstructStubCreate()) {
- // The function was mentioned explicitly in the CONSTRUCT_STUB_FRAME.
- output_offset -= kPointerSize;
- value = reinterpret_cast<intptr_t>(function);
- WriteValueToOutput(function, 0, frame_index, output_offset, "function ");
+ DebugPrintOutputSlot(value, frame_index, output_offset, "new target\n");
} else {
- DCHECK(bailout_id == BailoutId::ConstructStubInvoke());
- // The newly allocated object was passed as receiver in the artificial
- // constructor stub environment created by HEnvironment::CopyForInlining().
- output_offset -= kPointerSize;
- value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
- output_frame->SetFrameSlot(output_offset, value);
+ CHECK(bailout_id == BailoutId::ConstructStubInvoke());
DebugPrintOutputSlot(value, frame_index, output_offset,
"allocated receiver\n");
}
@@ -1684,8 +1696,7 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
Register result_reg = FullCodeGenerator::result_register();
value = input_->GetRegister(result_reg.code());
output_frame->SetFrameSlot(output_offset, value);
- DebugPrintOutputSlot(value, frame_index, output_offset,
- "constructor result\n");
+ DebugPrintOutputSlot(value, frame_index, output_offset, "subcall result\n");
output_frame->SetState(
Smi::FromInt(static_cast<int>(BailoutState::TOS_REGISTER)));
@@ -2326,6 +2337,12 @@ void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
data->deopt_entry_code_entries_[type] = entry_count;
}
+void Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(Isolate* isolate) {
+ EnsureCodeForDeoptimizationEntry(isolate, EAGER, kMaxNumberOfEntries - 1);
+ EnsureCodeForDeoptimizationEntry(isolate, LAZY, kMaxNumberOfEntries - 1);
+ EnsureCodeForDeoptimizationEntry(isolate, SOFT, kMaxNumberOfEntries - 1);
+}
+
FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count)
: frame_size_(frame_size),
parameter_count_(parameter_count),
@@ -3433,7 +3450,8 @@ Address TranslatedState::ComputeArgumentsPosition(Address input_frame_pointer,
// objects for the fields are not read from the TranslationIterator, but instead
// created on-the-fly based on dynamic information in the optimized frame.
void TranslatedState::CreateArgumentsElementsTranslatedValues(
- int frame_index, Address input_frame_pointer, bool is_rest) {
+ int frame_index, Address input_frame_pointer, bool is_rest,
+ FILE* trace_file) {
TranslatedFrame& frame = frames_[frame_index];
int length;
@@ -3442,6 +3460,11 @@ void TranslatedState::CreateArgumentsElementsTranslatedValues(
int object_index = static_cast<int>(object_positions_.size());
int value_index = static_cast<int>(frame.values_.size());
+ if (trace_file != nullptr) {
+ PrintF(trace_file,
+ "arguments elements object #%d (is_rest = %d, length = %d)",
+ object_index, is_rest, length);
+ }
object_positions_.push_back({frame_index, value_index});
frame.Add(TranslatedValue::NewDeferredObject(
this, length + FixedArray::kHeaderSize / kPointerSize, object_index));
@@ -3519,7 +3542,8 @@ int TranslatedState::CreateNextTranslatedValue(
case Translation::ARGUMENTS_ELEMENTS: {
bool is_rest = iterator->Next();
- CreateArgumentsElementsTranslatedValues(frame_index, fp, is_rest);
+ CreateArgumentsElementsTranslatedValues(frame_index, fp, is_rest,
+ trace_file);
return 0;
}
@@ -3527,6 +3551,10 @@ int TranslatedState::CreateNextTranslatedValue(
bool is_rest = iterator->Next();
int length;
ComputeArgumentsPosition(fp, is_rest, &length);
+ if (trace_file != nullptr) {
+ PrintF(trace_file, "arguments length field (is_rest = %d, length = %d)",
+ is_rest, length);
+ }
frame.Add(TranslatedValue::NewInt32(this, length));
return 0;
}
@@ -3932,7 +3960,8 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
Handle<Object> elements = materializer.FieldAt(value_index);
object->set_properties(FixedArray::cast(*properties));
object->set_elements(FixedArrayBase::cast(*elements));
- for (int i = 0; i < length - 3; ++i) {
+ int in_object_properties = map->GetInObjectProperties();
+ for (int i = 0; i < in_object_properties; ++i) {
Handle<Object> value = materializer.FieldAt(value_index);
FieldIndex index = FieldIndex::ForPropertyIndex(object->map(), i);
object->FastPropertyAtPut(index, *value);
@@ -4028,39 +4057,10 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
slot->value_ = object;
Handle<Object> properties = materializer.FieldAt(value_index);
Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> length = materializer.FieldAt(value_index);
+ Handle<Object> array_length = materializer.FieldAt(value_index);
object->set_properties(FixedArray::cast(*properties));
object->set_elements(FixedArrayBase::cast(*elements));
- object->set_length(*length);
- return object;
- }
- case JS_FUNCTION_TYPE: {
- Handle<SharedFunctionInfo> temporary_shared =
- isolate_->factory()->NewSharedFunctionInfo(
- isolate_->factory()->empty_string(), MaybeHandle<Code>(), false);
- Handle<JSFunction> object =
- isolate_->factory()->NewFunctionFromSharedFunctionInfo(
- map, temporary_shared, isolate_->factory()->undefined_value(),
- NOT_TENURED);
- slot->value_ = object;
- Handle<Object> properties = materializer.FieldAt(value_index);
- Handle<Object> elements = materializer.FieldAt(value_index);
- Handle<Object> prototype = materializer.FieldAt(value_index);
- Handle<Object> shared = materializer.FieldAt(value_index);
- Handle<Object> context = materializer.FieldAt(value_index);
- Handle<Object> vector_cell = materializer.FieldAt(value_index);
- Handle<Object> entry = materializer.FieldAt(value_index);
- Handle<Object> next_link = materializer.FieldAt(value_index);
- object->ReplaceCode(*isolate_->builtins()->CompileLazy());
- object->set_map(*map);
- object->set_properties(FixedArray::cast(*properties));
- object->set_elements(FixedArrayBase::cast(*elements));
- object->set_prototype_or_initial_map(*prototype);
- object->set_shared(SharedFunctionInfo::cast(*shared));
- object->set_context(Context::cast(*context));
- object->set_feedback_vector_cell(Cell::cast(*vector_cell));
- CHECK(entry->IsNumber()); // Entry to compile lazy stub.
- CHECK(next_link->IsUndefined(isolate_));
+ object->set_length(*array_length);
return object;
}
case CONS_STRING_TYPE: {
@@ -4071,11 +4071,11 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
.ToHandleChecked());
slot->value_ = object;
Handle<Object> hash = materializer.FieldAt(value_index);
- Handle<Object> length = materializer.FieldAt(value_index);
+ Handle<Object> string_length = materializer.FieldAt(value_index);
Handle<Object> first = materializer.FieldAt(value_index);
Handle<Object> second = materializer.FieldAt(value_index);
object->set_map(*map);
- object->set_length(Smi::cast(*length)->value());
+ object->set_length(Smi::cast(*string_length)->value());
object->set_first(String::cast(*first));
object->set_second(String::cast(*second));
CHECK(hash->IsNumber()); // The {Name::kEmptyHashField} value.
@@ -4095,15 +4095,16 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
}
case FIXED_ARRAY_TYPE: {
Handle<Object> lengthObject = materializer.FieldAt(value_index);
- int32_t length = 0;
- CHECK(lengthObject->ToInt32(&length));
- Handle<FixedArray> object = isolate_->factory()->NewFixedArray(length);
+ int32_t array_length = 0;
+ CHECK(lengthObject->ToInt32(&array_length));
+ Handle<FixedArray> object =
+ isolate_->factory()->NewFixedArray(array_length);
// We need to set the map, because the fixed array we are
// materializing could be a context or an arguments object,
// in which case we must retain that information.
object->set_map(*map);
slot->value_ = object;
- for (int i = 0; i < length; ++i) {
+ for (int i = 0; i < array_length; ++i) {
Handle<Object> value = materializer.FieldAt(value_index);
object->set(i, *value);
}
@@ -4112,15 +4113,15 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
case FIXED_DOUBLE_ARRAY_TYPE: {
DCHECK_EQ(*map, isolate_->heap()->fixed_double_array_map());
Handle<Object> lengthObject = materializer.FieldAt(value_index);
- int32_t length = 0;
- CHECK(lengthObject->ToInt32(&length));
+ int32_t array_length = 0;
+ CHECK(lengthObject->ToInt32(&array_length));
Handle<FixedArrayBase> object =
- isolate_->factory()->NewFixedDoubleArray(length);
+ isolate_->factory()->NewFixedDoubleArray(array_length);
slot->value_ = object;
- if (length > 0) {
+ if (array_length > 0) {
Handle<FixedDoubleArray> double_array =
Handle<FixedDoubleArray>::cast(object);
- for (int i = 0; i < length; ++i) {
+ for (int i = 0; i < array_length; ++i) {
Handle<Object> value = materializer.FieldAt(value_index);
if (value.is_identical_to(isolate_->factory()->the_hole_value())) {
double_array->set_the_hole(isolate_, i);
@@ -4160,6 +4161,7 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_VALUE_TYPE:
+ case JS_FUNCTION_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_DATE_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
@@ -4203,23 +4205,23 @@ Handle<Object> TranslatedState::MaterializeCapturedObjectAt(
case FILLER_TYPE:
case ACCESS_CHECK_INFO_TYPE:
case INTERCEPTOR_INFO_TYPE:
- case CALL_HANDLER_INFO_TYPE:
case OBJECT_TEMPLATE_INFO_TYPE:
case ALLOCATION_MEMENTO_TYPE:
- case TYPE_FEEDBACK_INFO_TYPE:
case ALIASED_ARGUMENTS_ENTRY_TYPE:
case PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE:
case PROMISE_REACTION_JOB_INFO_TYPE:
case DEBUG_INFO_TYPE:
- case BREAK_POINT_INFO_TYPE:
case STACK_FRAME_INFO_TYPE:
case CELL_TYPE:
case WEAK_CELL_TYPE:
case PROTOTYPE_INFO_TYPE:
case TUPLE2_TYPE:
case TUPLE3_TYPE:
- case CONSTANT_ELEMENTS_PAIR_TYPE:
case ASYNC_GENERATOR_REQUEST_TYPE:
+ case PADDING_TYPE_1:
+ case PADDING_TYPE_2:
+ case PADDING_TYPE_3:
+ case PADDING_TYPE_4:
OFStream os(stderr);
os << "[couldn't handle instance type " << map->instance_type() << "]"
<< std::endl;
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index bd9ba8ea24..16c5abeb86 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -322,7 +322,7 @@ class TranslatedState {
int* length);
void CreateArgumentsElementsTranslatedValues(int frame_index,
Address input_frame_pointer,
- bool is_rest);
+ bool is_rest, FILE* trace_file);
void UpdateFromPreviouslyMaterializedObjects();
Handle<Object> MaterializeAt(int frame_index, int* value_index);
@@ -540,6 +540,7 @@ class Deoptimizer : public Malloced {
static void EnsureCodeForDeoptimizationEntry(Isolate* isolate,
BailoutType type,
int max_entry_id);
+ static void EnsureCodeForMaxDeoptimizationEntries(Isolate* isolate);
Isolate* isolate() const { return isolate_; }
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 5e4e59ab4a..2f6cf4e749 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -564,9 +564,9 @@ class ElementsAccessorBase : public ElementsAccessor {
Subclass::ValidateImpl(holder);
}
- static bool IsPackedImpl(Handle<JSObject> holder,
- Handle<FixedArrayBase> backing_store, uint32_t start,
- uint32_t end) {
+ static bool IsPackedImpl(JSObject* holder, FixedArrayBase* backing_store,
+ uint32_t start, uint32_t end) {
+ DisallowHeapAllocation no_gc;
if (IsFastPackedElementsKind(kind())) return true;
Isolate* isolate = backing_store->GetIsolate();
for (uint32_t i = start; i < end; i++) {
@@ -580,9 +580,9 @@ class ElementsAccessorBase : public ElementsAccessor {
static void TryTransitionResultArrayToPacked(Handle<JSArray> array) {
if (!IsHoleyElementsKind(kind())) return;
- int length = Smi::cast(array->length())->value();
Handle<FixedArrayBase> backing_store(array->elements());
- if (!Subclass::IsPackedImpl(array, backing_store, 0, length)) {
+ int length = Smi::cast(array->length())->value();
+ if (!Subclass::IsPackedImpl(*array, *backing_store, 0, length)) {
return;
}
ElementsKind packed_kind = GetPackedElementsKind(kind());
@@ -595,19 +595,17 @@ class ElementsAccessorBase : public ElementsAccessor {
}
}
- bool HasElement(Handle<JSObject> holder, uint32_t index,
- Handle<FixedArrayBase> backing_store,
- PropertyFilter filter) final {
+ bool HasElement(JSObject* holder, uint32_t index,
+ FixedArrayBase* backing_store, PropertyFilter filter) final {
return Subclass::HasElementImpl(holder->GetIsolate(), holder, index,
backing_store, filter);
}
- static bool HasElementImpl(Isolate* isolate, Handle<JSObject> holder,
- uint32_t index,
- Handle<FixedArrayBase> backing_store,
+ static bool HasElementImpl(Isolate* isolate, JSObject* holder, uint32_t index,
+ FixedArrayBase* backing_store,
PropertyFilter filter = ALL_PROPERTIES) {
- return Subclass::GetEntryForIndexImpl(isolate, *holder, *backing_store,
- index, filter) != kMaxUInt32;
+ return Subclass::GetEntryForIndexImpl(isolate, holder, backing_store, index,
+ filter) != kMaxUInt32;
}
bool HasAccessors(JSObject* holder) final {
@@ -771,9 +769,15 @@ class ElementsAccessorBase : public ElementsAccessor {
backing_store = handle(array->elements(), isolate);
}
}
- if (2 * length <= capacity) {
+ if (2 * length + JSObject::kMinAddedElementsCapacity <= capacity) {
// If more than half the elements won't be used, trim the array.
- isolate->heap()->RightTrimFixedArray(*backing_store, capacity - length);
+ // Do not trim from short arrays to prevent frequent trimming on
+ // repeated pop operations.
+ // Leave some space to allow for subsequent push operations.
+ int elements_to_trim = length + 1 == old_length
+ ? (capacity - length) / 2
+ : capacity - length;
+ isolate->heap()->RightTrimFixedArray(*backing_store, elements_to_trim);
} else {
// Otherwise, fill the unused tail with holes.
BackingStore::cast(*backing_store)->FillWithHoles(length, old_length);
@@ -1004,6 +1008,18 @@ class ElementsAccessorBase : public ElementsAccessor {
kPackedSizeNotKnown, size);
}
+ Object* CopyElements(Handle<JSReceiver> source, Handle<JSObject> destination,
+ size_t length) final {
+ return Subclass::CopyElementsHandleImpl(source, destination, length);
+ }
+
+ static Object* CopyElementsHandleImpl(Handle<JSReceiver> source,
+ Handle<JSObject> destination,
+ size_t length) {
+ UNREACHABLE();
+ return *source;
+ }
+
Handle<SeededNumberDictionary> Normalize(Handle<JSObject> object) final {
return Subclass::NormalizeImpl(object, handle(object->elements()));
}
@@ -1079,7 +1095,8 @@ class ElementsAccessorBase : public ElementsAccessor {
Isolate* isolate = keys->isolate();
Factory* factory = isolate->factory();
for (uint32_t i = 0; i < length; i++) {
- if (Subclass::HasElementImpl(isolate, object, i, backing_store, filter)) {
+ if (Subclass::HasElementImpl(isolate, *object, i, *backing_store,
+ filter)) {
keys->AddKey(factory->NewNumberFromUint(i));
}
}
@@ -1092,7 +1109,8 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t insertion_index = 0) {
uint32_t length = Subclass::GetMaxIndex(*object, *backing_store);
for (uint32_t i = 0; i < length; i++) {
- if (Subclass::HasElementImpl(isolate, object, i, backing_store, filter)) {
+ if (Subclass::HasElementImpl(isolate, *object, i, *backing_store,
+ filter)) {
if (convert == GetKeysConversion::kConvertToString) {
Handle<String> index_string = isolate->factory()->Uint32ToString(i);
list->set(insertion_index, *index_string);
@@ -1868,8 +1886,6 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
// TODO(verwaest): Move this out of elements.cc.
// If an old space backing store is larger than a certain size and
// has too few used values, normalize it.
- // To avoid doing the check on every delete we require at least
- // one adjacent hole to the value being deleted.
const int kMinLengthForSparsenessCheck = 64;
if (backing_store->length() < kMinLengthForSparsenessCheck) return;
if (backing_store->GetHeap()->InNewSpace(*backing_store)) return;
@@ -1879,34 +1895,48 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
} else {
length = static_cast<uint32_t>(store->length());
}
- if ((entry > 0 && backing_store->is_the_hole(isolate, entry - 1)) ||
- (entry + 1 < length &&
- backing_store->is_the_hole(isolate, entry + 1))) {
- if (!obj->IsJSArray()) {
- uint32_t i;
- for (i = entry + 1; i < length; i++) {
- if (!backing_store->is_the_hole(isolate, i)) break;
- }
- if (i == length) {
- DeleteAtEnd(obj, backing_store, entry);
- return;
- }
+
+ // To avoid doing the check on every delete, use a counter-based heuristic.
+ const int kLengthFraction = 16;
+ // The above constant must be large enough to ensure that we check for
+ // normalization frequently enough. At a minimum, it should be large
+ // enough to reliably hit the "window" of remaining elements count where
+ // normalization would be beneficial.
+ STATIC_ASSERT(kLengthFraction >=
+ SeededNumberDictionary::kEntrySize *
+ SeededNumberDictionary::kPreferFastElementsSizeFactor);
+ size_t current_counter = isolate->elements_deletion_counter();
+ if (current_counter < length / kLengthFraction) {
+ isolate->set_elements_deletion_counter(current_counter + 1);
+ return;
+ }
+ // Reset the counter whenever the full check is performed.
+ isolate->set_elements_deletion_counter(0);
+
+ if (!obj->IsJSArray()) {
+ uint32_t i;
+ for (i = entry + 1; i < length; i++) {
+ if (!backing_store->is_the_hole(isolate, i)) break;
}
- int num_used = 0;
- for (int i = 0; i < backing_store->length(); ++i) {
- if (!backing_store->is_the_hole(isolate, i)) {
- ++num_used;
- // Bail out if a number dictionary wouldn't be able to save at least
- // 75% space.
- if (4 * SeededNumberDictionary::ComputeCapacity(num_used) *
- SeededNumberDictionary::kEntrySize >
- backing_store->length()) {
- return;
- }
+ if (i == length) {
+ DeleteAtEnd(obj, backing_store, entry);
+ return;
+ }
+ }
+ int num_used = 0;
+ for (int i = 0; i < backing_store->length(); ++i) {
+ if (!backing_store->is_the_hole(isolate, i)) {
+ ++num_used;
+ // Bail out if a number dictionary wouldn't be able to save much space.
+ if (SeededNumberDictionary::kPreferFastElementsSizeFactor *
+ SeededNumberDictionary::ComputeCapacity(num_used) *
+ SeededNumberDictionary::kEntrySize >
+ static_cast<uint32_t>(backing_store->length())) {
+ return;
}
}
- JSObject::NormalizeElements(obj);
}
+ JSObject::NormalizeElements(obj);
}
static void ReconfigureImpl(Handle<JSObject> object,
@@ -2138,7 +2168,8 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
int hole_end) {
Heap* heap = isolate->heap();
Handle<BackingStore> dst_elms = Handle<BackingStore>::cast(backing_store);
- if (heap->CanMoveObjectStart(*dst_elms) && dst_index == 0) {
+ if (len > JSArray::kMaxCopyElements && dst_index == 0 &&
+ heap->CanMoveObjectStart(*dst_elms)) {
// Update all the copies of this backing_store handle.
*dst_elms.location() =
BackingStore::cast(heap->LeftTrimFixedArray(*dst_elms, src_index));
@@ -2319,7 +2350,7 @@ class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> {
Handle<FixedArray> result = isolate->factory()->NewFixedArray(length);
Handle<FixedArrayBase> elements(array->elements(), isolate);
for (uint32_t i = 0; i < length; i++) {
- if (!Subclass::HasElementImpl(isolate, array, i, elements)) continue;
+ if (!Subclass::HasElementImpl(isolate, *array, i, *elements)) continue;
Handle<Object> value;
value = Subclass::GetImpl(isolate, *elements, i);
if (value->IsName()) {
@@ -2772,11 +2803,10 @@ class TypedElementsAccessor
return PropertyDetails(kData, DONT_DELETE, 0, PropertyCellType::kNoCell);
}
- static bool HasElementImpl(Isolate* isolate, Handle<JSObject> holder,
- uint32_t index,
- Handle<FixedArrayBase> backing_store,
+ static bool HasElementImpl(Isolate* isolate, JSObject* holder, uint32_t index,
+ FixedArrayBase* backing_store,
PropertyFilter filter) {
- return index < AccessorClass::GetCapacityImpl(*holder, *backing_store);
+ return index < AccessorClass::GetCapacityImpl(holder, backing_store);
}
static bool HasAccessorsImpl(JSObject* holder,
@@ -2866,10 +2896,10 @@ class TypedElementsAccessor
ctype value;
if (obj_value->IsSmi()) {
- value = BackingStore::from_int(Smi::cast(*obj_value)->value());
+ value = BackingStore::from(Smi::cast(*obj_value)->value());
} else {
DCHECK(obj_value->IsHeapNumber());
- value = BackingStore::from_double(HeapNumber::cast(*obj_value)->value());
+ value = BackingStore::from(HeapNumber::cast(*obj_value)->value());
}
// Ensure indexes are within array bounds
@@ -3089,6 +3119,220 @@ class TypedElementsAccessor
}
return result_array;
}
+
+ static bool HasSimpleRepresentation(InstanceType type) {
+ return !(type == FIXED_FLOAT32_ARRAY_TYPE ||
+ type == FIXED_FLOAT64_ARRAY_TYPE ||
+ type == FIXED_UINT8_CLAMPED_ARRAY_TYPE);
+ }
+
+ template <typename SourceTraits>
+ static void CopyBetweenBackingStores(FixedTypedArrayBase* source,
+ BackingStore* dest, size_t length) {
+ FixedTypedArray<SourceTraits>* source_fta =
+ FixedTypedArray<SourceTraits>::cast(source);
+ for (uint32_t i = 0; i < length; i++) {
+ typename SourceTraits::ElementType elem = source_fta->get_scalar(i);
+ dest->set(i, dest->from(elem));
+ }
+ }
+
+ static void CopyElementsHandleFromTypedArray(Handle<JSTypedArray> source,
+ Handle<JSTypedArray> destination,
+ size_t length) {
+ // The source is a typed array, so we know we don't need to do ToNumber
+ // side-effects, as the source elements will always be a number or
+ // undefined.
+ DisallowHeapAllocation no_gc;
+
+ Handle<FixedTypedArrayBase> source_elements(
+ FixedTypedArrayBase::cast(source->elements()));
+ Handle<BackingStore> destination_elements(
+ BackingStore::cast(destination->elements()));
+
+ DCHECK_GE(destination->length(), source->length());
+ DCHECK(source->length()->IsSmi());
+ DCHECK_EQ(Smi::FromInt(static_cast<int>(length)), source->length());
+
+ InstanceType source_type = source_elements->map()->instance_type();
+ InstanceType destination_type =
+ destination_elements->map()->instance_type();
+
+ bool same_type = source_type == destination_type;
+ bool same_size = source->element_size() == destination->element_size();
+ bool both_are_simple = HasSimpleRepresentation(source_type) &&
+ HasSimpleRepresentation(destination_type);
+
+ // We assume the source and destination don't overlap, even though they
+ // can share the same buffer. This is always true for newly allocated
+ // TypedArrays.
+ uint8_t* source_data = static_cast<uint8_t*>(source_elements->DataPtr());
+ uint8_t* dest_data = static_cast<uint8_t*>(destination_elements->DataPtr());
+ size_t source_byte_length = NumberToSize(source->byte_length());
+ size_t dest_byte_length = NumberToSize(destination->byte_length());
+ CHECK(dest_data + dest_byte_length <= source_data ||
+ source_data + source_byte_length <= dest_data);
+
+ // We can simply copy the backing store if the types are the same, or if
+ // we are converting e.g. Uint8 <-> Int8, as the binary representation
+ // will be the same. This is not the case for floats or clamped Uint8,
+ // which have special conversion operations.
+ if (same_type || (same_size && both_are_simple)) {
+ size_t element_size = source->element_size();
+ std::memcpy(dest_data, source_data, length * element_size);
+ } else {
+ // We use scalar accessors below to avoid boxing/unboxing, so there are
+ // no allocations.
+ switch (source->GetElementsKind()) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+ case TYPE##_ELEMENTS: \
+ CopyBetweenBackingStores<Type##ArrayTraits>( \
+ *source_elements, *destination_elements, length); \
+ break;
+ TYPED_ARRAYS(TYPED_ARRAY_CASE)
+ default:
+ UNREACHABLE();
+ break;
+ }
+#undef TYPED_ARRAY_CASE
+ }
+ }
+
+ static bool HoleyPrototypeLookupRequired(Isolate* isolate,
+ Handle<JSArray> source) {
+ Object* source_proto = source->map()->prototype();
+ // Null prototypes are OK - we don't need to do prototype chain lookups on
+ // them.
+ if (source_proto->IsNull(isolate)) return false;
+ if (source_proto->IsJSProxy()) return true;
+ DCHECK(source_proto->IsJSObject());
+ if (!isolate->is_initial_array_prototype(JSObject::cast(source_proto))) {
+ return true;
+ }
+ return !isolate->IsFastArrayConstructorPrototypeChainIntact();
+ }
+
+ static bool TryCopyElementsHandleFastNumber(Handle<JSArray> source,
+ Handle<JSTypedArray> destination,
+ size_t length) {
+ Isolate* isolate = source->GetIsolate();
+ DisallowHeapAllocation no_gc;
+ DisallowJavascriptExecution no_js(isolate);
+
+ ElementsKind kind = source->GetElementsKind();
+ BackingStore* dest = BackingStore::cast(destination->elements());
+
+ // When we find the hole, we normally have to look up the element on the
+ // prototype chain, which is not handled here and we return false instead.
+ // When the array has the original array prototype, and that prototype has
+ // not been changed in a way that would affect lookups, we can just convert
+ // the hole into undefined.
+ if (HoleyPrototypeLookupRequired(isolate, source)) return false;
+
+ Object* undefined = isolate->heap()->undefined_value();
+
+ // Fastpath for packed Smi kind.
+ if (kind == FAST_SMI_ELEMENTS) {
+ FixedArray* source_store = FixedArray::cast(source->elements());
+
+ for (uint32_t i = 0; i < length; i++) {
+ Object* elem = source_store->get(i);
+ DCHECK(elem->IsSmi());
+ int int_value = Smi::cast(elem)->value();
+ dest->set(i, dest->from(int_value));
+ }
+ return true;
+ } else if (kind == FAST_HOLEY_SMI_ELEMENTS) {
+ FixedArray* source_store = FixedArray::cast(source->elements());
+ for (uint32_t i = 0; i < length; i++) {
+ if (source_store->is_the_hole(isolate, i)) {
+ dest->SetValue(i, undefined);
+ } else {
+ Object* elem = source_store->get(i);
+ DCHECK(elem->IsSmi());
+ int int_value = Smi::cast(elem)->value();
+ dest->set(i, dest->from(int_value));
+ }
+ }
+ return true;
+ } else if (kind == FAST_DOUBLE_ELEMENTS) {
+ // Fastpath for packed double kind. We avoid boxing and then immediately
+ // unboxing the double here by using get_scalar.
+ FixedDoubleArray* source_store =
+ FixedDoubleArray::cast(source->elements());
+
+ for (uint32_t i = 0; i < length; i++) {
+ // Use the from_double conversion for this specific TypedArray type,
+ // rather than relying on C++ to convert elem.
+ double elem = source_store->get_scalar(i);
+ dest->set(i, dest->from(elem));
+ }
+ return true;
+ } else if (kind == FAST_HOLEY_DOUBLE_ELEMENTS) {
+ FixedDoubleArray* source_store =
+ FixedDoubleArray::cast(source->elements());
+ for (uint32_t i = 0; i < length; i++) {
+ if (source_store->is_the_hole(i)) {
+ dest->SetValue(i, undefined);
+ } else {
+ double elem = source_store->get_scalar(i);
+ dest->set(i, dest->from(elem));
+ }
+ }
+ return true;
+ }
+ return false;
+ }
+
+ static Object* CopyElementsHandleSlow(Handle<JSReceiver> source,
+ Handle<JSTypedArray> destination,
+ size_t length) {
+ Isolate* isolate = source->GetIsolate();
+ Handle<BackingStore> destination_elements(
+ BackingStore::cast(destination->elements()));
+ for (uint32_t i = 0; i < length; i++) {
+ LookupIterator it(isolate, source, i, source);
+ Handle<Object> elem;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem,
+ Object::GetProperty(&it));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, elem, Object::ToNumber(elem));
+ // We don't need to check for buffer neutering here, because the
+ // source cannot be a TypedArray.
+ // The spec says we store the length, then get each element, so we don't
+ // need to check changes to length.
+ destination_elements->SetValue(i, *elem);
+ }
+ return Smi::kZero;
+ }
+
+ // This doesn't guarantee that the destination array will be completely
+ // filled. The caller must do this by passing a source with equal length, if
+ // that is required.
+ static Object* CopyElementsHandleImpl(Handle<JSReceiver> source,
+ Handle<JSObject> destination,
+ size_t length) {
+ Handle<JSTypedArray> destination_ta =
+ Handle<JSTypedArray>::cast(destination);
+
+ // All conversions from TypedArrays can be done without allocation.
+ if (source->IsJSTypedArray()) {
+ Handle<JSTypedArray> source_ta = Handle<JSTypedArray>::cast(source);
+ CopyElementsHandleFromTypedArray(source_ta, destination_ta, length);
+ return Smi::kZero;
+ }
+
+ // Fast cases for packed numbers kinds where we don't need to allocate.
+ if (source->IsJSArray()) {
+ Handle<JSArray> source_array = Handle<JSArray>::cast(source);
+ if (TryCopyElementsHandleFastNumber(source_array, destination_ta,
+ length)) {
+ return Smi::kZero;
+ }
+ }
+ // Final generic case that handles prototype chain lookups, getters, proxies
+ // and observable side effects via valueOf, etc.
+ return CopyElementsHandleSlow(source, destination_ta, length);
+ }
};
#define FIXED_ELEMENTS_ACCESSOR(Type, type, TYPE, ctype, size) \
@@ -3270,6 +3514,8 @@ class SloppyArgumentsElementsAccessor
uint32_t entry = ArgumentsAccessor::GetEntryForIndexImpl(
isolate, holder, arguments, index, filter);
if (entry == kMaxUInt32) return kMaxUInt32;
+ // Arguments entries could overlap with the dictionary entries, hence offset
+ // them by the number of context mapped entries.
return elements->parameter_map_length() + entry;
}
@@ -3293,19 +3539,28 @@ class SloppyArgumentsElementsAccessor
}
static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) {
- SloppyArgumentsElements* elements =
- SloppyArgumentsElements::cast(obj->elements());
+ Handle<SloppyArgumentsElements> elements(
+ SloppyArgumentsElements::cast(obj->elements()));
uint32_t length = elements->parameter_map_length();
+ uint32_t delete_or_entry = entry;
+ if (entry < length) {
+ delete_or_entry = kMaxUInt32;
+ }
+ Subclass::SloppyDeleteImpl(obj, elements, delete_or_entry);
+ // SloppyDeleteImpl allocates a new dictionary elements store. For making
+ // heap verification happy we postpone clearing out the mapped entry.
if (entry < length) {
- // TODO(kmillikin): We could check if this was the last aliased
- // parameter, and revert to normal elements in that case. That
- // would enable GC of the context.
elements->set_mapped_entry(entry, obj->GetHeap()->the_hole_value());
- } else {
- Subclass::DeleteFromArguments(obj, entry - length);
}
}
+ static void SloppyDeleteImpl(Handle<JSObject> obj,
+ Handle<SloppyArgumentsElements> elements,
+ uint32_t entry) {
+ // Implemented in subclasses.
+ UNREACHABLE();
+ }
+
static void CollectElementIndicesImpl(Handle<JSObject> object,
Handle<FixedArrayBase> backing_store,
KeyAccumulator* keys) {
@@ -3456,17 +3711,21 @@ class SlowSloppyArgumentsElementsAccessor
}
return result;
}
- static void DeleteFromArguments(Handle<JSObject> obj, uint32_t entry) {
+ static void SloppyDeleteImpl(Handle<JSObject> obj,
+ Handle<SloppyArgumentsElements> elements,
+ uint32_t entry) {
+ // No need to delete a context mapped entry from the arguments elements.
+ if (entry == kMaxUInt32) return;
Isolate* isolate = obj->GetIsolate();
- Handle<SloppyArgumentsElements> elements(
- SloppyArgumentsElements::cast(obj->elements()), isolate);
Handle<SeededNumberDictionary> dict(
SeededNumberDictionary::cast(elements->arguments()), isolate);
// TODO(verwaest): Remove reliance on index in Shrink.
uint32_t index = GetIndexForEntryImpl(*dict, entry);
- Handle<Object> result = SeededNumberDictionary::DeleteProperty(dict, entry);
+ int length = elements->parameter_map_length();
+ Handle<Object> result =
+ SeededNumberDictionary::DeleteProperty(dict, entry - length);
USE(result);
- DCHECK(result->IsTrue(dict->GetIsolate()));
+ DCHECK(result->IsTrue(isolate));
Handle<FixedArray> new_elements =
SeededNumberDictionary::Shrink(dict, index);
elements->set_arguments(*new_elements);
@@ -3590,10 +3849,28 @@ class FastSloppyArgumentsElementsAccessor
return FastHoleyObjectElementsAccessor::NormalizeImpl(object, arguments);
}
- static void DeleteFromArguments(Handle<JSObject> obj, uint32_t entry) {
- Handle<FixedArray> arguments =
- GetArguments(obj->GetIsolate(), obj->elements());
- FastHoleyObjectElementsAccessor::DeleteCommon(obj, entry, arguments);
+ static Handle<SeededNumberDictionary> NormalizeArgumentsElements(
+ Handle<JSObject> object, Handle<SloppyArgumentsElements> elements,
+ uint32_t* entry) {
+ Handle<SeededNumberDictionary> dictionary =
+ JSObject::NormalizeElements(object);
+ elements->set_arguments(*dictionary);
+ // kMaxUInt32 indicates that a context mapped element got deleted. In this
+ // case we only normalize the elements (aka. migrate to SLOW_SLOPPY).
+ if (*entry == kMaxUInt32) return dictionary;
+ uint32_t length = elements->parameter_map_length();
+ if (*entry >= length) {
+ *entry = dictionary->FindEntry(*entry - length) + length;
+ }
+ return dictionary;
+ }
+
+ static void SloppyDeleteImpl(Handle<JSObject> obj,
+ Handle<SloppyArgumentsElements> elements,
+ uint32_t entry) {
+ // Always normalize element on deleting an entry.
+ NormalizeArgumentsElements(obj, elements, &entry);
+ SlowSloppyArgumentsElementsAccessor::SloppyDeleteImpl(obj, elements, entry);
}
static void AddImpl(Handle<JSObject> object, uint32_t index,
@@ -3621,14 +3898,10 @@ class FastSloppyArgumentsElementsAccessor
Handle<FixedArrayBase> store, uint32_t entry,
Handle<Object> value,
PropertyAttributes attributes) {
- Handle<SeededNumberDictionary> dictionary =
- JSObject::NormalizeElements(object);
- SloppyArgumentsElements* elements = SloppyArgumentsElements::cast(*store);
- elements->set_arguments(*dictionary);
- uint32_t length = elements->parameter_map_length();
- if (entry >= length) {
- entry = dictionary->FindEntry(entry - length) + length;
- }
+ DCHECK_EQ(object->elements(), *store);
+ Handle<SloppyArgumentsElements> elements(
+ SloppyArgumentsElements::cast(*store));
+ NormalizeArgumentsElements(object, elements, &entry);
SlowSloppyArgumentsElementsAccessor::ReconfigureImpl(object, store, entry,
value, attributes);
}
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 3fdef3d987..5184b29765 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -6,7 +6,6 @@
#define V8_ELEMENTS_H_
#include "src/elements-kind.h"
-#include "src/heap/heap.h"
#include "src/isolate.h"
#include "src/keys.h"
#include "src/objects.h"
@@ -43,13 +42,13 @@ class ElementsAccessor {
// index is ignored. Note that only Dictionary elements have custom
// PropertyAttributes associated, hence the |filter| argument is ignored for
// all but DICTIONARY_ELEMENTS and SLOW_SLOPPY_ARGUMENTS_ELEMENTS.
- virtual bool HasElement(Handle<JSObject> holder, uint32_t index,
- Handle<FixedArrayBase> backing_store,
+ virtual bool HasElement(JSObject* holder, uint32_t index,
+ FixedArrayBase* backing_store,
PropertyFilter filter = ALL_PROPERTIES) = 0;
- inline bool HasElement(Handle<JSObject> holder, uint32_t index,
+ inline bool HasElement(JSObject* holder, uint32_t index,
PropertyFilter filter = ALL_PROPERTIES) {
- return HasElement(holder, index, handle(holder->elements()), filter);
+ return HasElement(holder, index, holder->elements(), filter);
}
virtual Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) = 0;
@@ -189,6 +188,9 @@ class ElementsAccessor {
ElementsKind source_kind,
Handle<FixedArrayBase> destination, int size) = 0;
+ virtual Object* CopyElements(Handle<JSReceiver> source,
+ Handle<JSObject> destination, size_t length) = 0;
+
virtual Handle<FixedArray> CreateListFromArray(Isolate* isolate,
Handle<JSArray> array) = 0;
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index 05a69acfde..17e63ff83b 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -194,9 +194,9 @@ MaybeHandle<Object> Execution::Call(Isolate* isolate, Handle<Object> callable,
// static
-MaybeHandle<Object> Execution::New(Handle<JSFunction> constructor, int argc,
- Handle<Object> argv[]) {
- return New(constructor->GetIsolate(), constructor, constructor, argc, argv);
+MaybeHandle<Object> Execution::New(Isolate* isolate, Handle<Object> constructor,
+ int argc, Handle<Object> argv[]) {
+ return New(isolate, constructor, constructor, argc, argv);
}
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index d5f6371726..c6ea3847b8 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -33,7 +33,8 @@ class Execution final : public AllStatic {
// Construct object from function, the caller supplies an array of
// arguments.
- MUST_USE_RESULT static MaybeHandle<Object> New(Handle<JSFunction> constructor,
+ MUST_USE_RESULT static MaybeHandle<Object> New(Isolate* isolate,
+ Handle<Object> constructor,
int argc,
Handle<Object> argv[]);
MUST_USE_RESULT static MaybeHandle<Object> New(Isolate* isolate,
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 3e49c527e3..443347231f 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -148,13 +148,13 @@ void StatisticsExtension::GetCounters(
if (obj->IsCode()) {
Code* code = Code::cast(obj);
reloc_info_total += code->relocation_info()->Size();
- ByteArray* source_position_table = code->source_position_table();
+ ByteArray* source_position_table = code->SourcePositionTable();
if (source_position_table->length() > 0) {
- source_position_table_total += code->source_position_table()->Size();
+ source_position_table_total += code->SourcePositionTable()->Size();
}
} else if (obj->IsBytecodeArray()) {
source_position_table_total +=
- BytecodeArray::cast(obj)->source_position_table()->Size();
+ BytecodeArray::cast(obj)->SourcePositionTable()->Size();
}
}
diff --git a/deps/v8/src/external-reference-table.cc b/deps/v8/src/external-reference-table.cc
index 5f25df46e9..95c3a976e6 100644
--- a/deps/v8/src/external-reference-table.cc
+++ b/deps/v8/src/external-reference-table.cc
@@ -15,6 +15,7 @@
#if defined(DEBUG) && defined(V8_OS_LINUX) && !defined(V8_OS_ANDROID)
#define SYMBOLIZE_FUNCTION
#include <execinfo.h>
+#include <vector>
#endif // DEBUG && V8_OS_LINUX && !V8_OS_ANDROID
namespace v8 {
@@ -49,6 +50,14 @@ ExternalReferenceTable::ExternalReferenceTable(Isolate* isolate) {
AddApiReferences(isolate);
}
+ExternalReferenceTable::~ExternalReferenceTable() {
+#ifdef SYMBOLIZE_FUNCTION
+ for (char** table : symbol_tables_) {
+ free(table);
+ }
+#endif
+}
+
#ifdef DEBUG
void ExternalReferenceTable::ResetCount() {
for (ExternalReferenceEntry& entry : refs_) entry.count = 0;
@@ -62,10 +71,12 @@ void ExternalReferenceTable::PrintCount() {
}
#endif // DEBUG
-// static
-const char* ExternalReferenceTable::ResolveSymbol(void* address) {
+const char* ExternalReferenceTable::ResolveSymbol(void* address,
+ std::vector<char**>* tables) {
#ifdef SYMBOLIZE_FUNCTION
- return backtrace_symbols(&address, 1)[0];
+ char** table = backtrace_symbols(&address, 1);
+ if (tables) tables->push_back(table);
+ return table[0];
#else
return "<unresolved>";
#endif // SYMBOLIZE_FUNCTION
@@ -235,8 +246,31 @@ void ExternalReferenceTable::AddReferences(Isolate* isolate) {
"libc_memchr");
Add(ExternalReference::libc_memcpy_function(isolate).address(),
"libc_memcpy");
+ Add(ExternalReference::libc_memmove_function(isolate).address(),
+ "libc_memmove");
Add(ExternalReference::libc_memset_function(isolate).address(),
"libc_memset");
+ Add(ExternalReference::try_internalize_string_function(isolate).address(),
+ "try_internalize_string_function");
+#ifdef V8_INTL_SUPPORT
+ Add(ExternalReference::intl_convert_one_byte_to_lower(isolate).address(),
+ "intl_convert_one_byte_to_lower");
+ Add(ExternalReference::intl_to_latin1_lower_table(isolate).address(),
+ "intl_to_latin1_lower_table");
+#endif // V8_INTL_SUPPORT
+ Add(ExternalReference::search_string_raw<const uint8_t, const uint8_t>(
+ isolate)
+ .address(),
+ "search_string_raw<1-byte, 1-byte>");
+ Add(ExternalReference::search_string_raw<const uint8_t, const uc16>(isolate)
+ .address(),
+ "search_string_raw<1-byte, 2-byte>");
+ Add(ExternalReference::search_string_raw<const uc16, const uint8_t>(isolate)
+ .address(),
+ "search_string_raw<2-byte, 1-byte>");
+ Add(ExternalReference::search_string_raw<const uc16, const uc16>(isolate)
+ .address(),
+ "search_string_raw<1-byte, 2-byte>");
Add(ExternalReference::log_enter_external_function(isolate).address(),
"Logger::EnterExternal");
Add(ExternalReference::log_leave_external_function(isolate).address(),
@@ -443,7 +477,11 @@ void ExternalReferenceTable::AddApiReferences(Isolate* isolate) {
if (api_external_references != nullptr) {
while (*api_external_references != 0) {
Address address = reinterpret_cast<Address>(*api_external_references);
+#ifdef SYMBOLIZE_FUNCTION
+ Add(address, ResolveSymbol(address, &symbol_tables_));
+#else
Add(address, ResolveSymbol(address));
+#endif
api_external_references++;
}
}
diff --git a/deps/v8/src/external-reference-table.h b/deps/v8/src/external-reference-table.h
index 76d437c3cc..101d8332fb 100644
--- a/deps/v8/src/external-reference-table.h
+++ b/deps/v8/src/external-reference-table.h
@@ -5,6 +5,8 @@
#ifndef V8_EXTERNAL_REFERENCE_TABLE_H_
#define V8_EXTERNAL_REFERENCE_TABLE_H_
+#include <vector>
+
#include "src/address-map.h"
namespace v8 {
@@ -18,6 +20,7 @@ class Isolate;
class ExternalReferenceTable {
public:
static ExternalReferenceTable* instance(Isolate* isolate);
+ ~ExternalReferenceTable();
uint32_t size() const { return static_cast<uint32_t>(refs_.length()); }
Address address(uint32_t i) { return refs_[i].address; }
@@ -32,7 +35,8 @@ class ExternalReferenceTable {
void PrintCount();
#endif // DEBUG
- static const char* ResolveSymbol(void* address);
+ static const char* ResolveSymbol(void* address,
+ std::vector<char**>* = nullptr);
private:
struct ExternalReferenceEntry {
@@ -63,6 +67,9 @@ class ExternalReferenceTable {
void AddApiReferences(Isolate* isolate);
List<ExternalReferenceEntry> refs_;
+#ifdef DEBUG
+ std::vector<char**> symbol_tables_;
+#endif
uint32_t api_refs_start_;
DISALLOW_COPY_AND_ASSIGN(ExternalReferenceTable);
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 33adec7483..7f2eae35dd 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -128,8 +128,8 @@ Handle<ContextExtension> Factory::NewContextExtension(
Handle<ConstantElementsPair> Factory::NewConstantElementsPair(
ElementsKind elements_kind, Handle<FixedArrayBase> constant_values) {
- Handle<ConstantElementsPair> result = Handle<ConstantElementsPair>::cast(
- NewStruct(CONSTANT_ELEMENTS_PAIR_TYPE));
+ Handle<ConstantElementsPair> result =
+ Handle<ConstantElementsPair>::cast(NewStruct(TUPLE2_TYPE));
result->set_elements_kind(elements_kind);
result->set_constant_values(*constant_values);
return result;
@@ -260,7 +260,7 @@ Handle<AccessorPair> Factory::NewAccessorPair() {
Handle<TypeFeedbackInfo> Factory::NewTypeFeedbackInfo() {
Handle<TypeFeedbackInfo> info =
- Handle<TypeFeedbackInfo>::cast(NewStruct(TYPE_FEEDBACK_INFO_TYPE));
+ Handle<TypeFeedbackInfo>::cast(NewStruct(TUPLE3_TYPE));
info->initialize_storage();
return info;
}
@@ -1130,7 +1130,8 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
script->set_eval_from_position(0);
script->set_shared_function_infos(*empty_fixed_array(), SKIP_WRITE_BARRIER);
script->set_flags(0);
- script->set_preparsed_scope_data(heap->empty_fixed_uint32_array());
+ script->set_preparsed_scope_data(
+ PodArray<uint32_t>::cast(heap->empty_byte_array()));
heap->set_script_list(*WeakFixedArray::Add(script_list(), script));
return script;
@@ -1633,6 +1634,13 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
DCHECK_EQ(vector->map(), *many_closures_cell_map());
}
+ // Check that the optimized code in the feedback vector wasn't marked for
+ // deoptimization while not pointed to by any live JSFunction.
+ if (vector->value()->IsFeedbackVector()) {
+ FeedbackVector::cast(vector->value())
+ ->EvictOptimizedCodeMarkedForDeoptimization(
+ *info, "new function from shared function info");
+ }
result->set_feedback_vector_cell(*vector);
if (info->ic_age() != isolate()->heap()->global_ic_age()) {
info->ResetForNewContext(isolate()->heap()->global_ic_age());
@@ -1865,6 +1873,15 @@ Handle<JSObject> Factory::NewJSObjectFromMap(
JSObject);
}
+Handle<JSObject> Factory::NewSlowJSObjectFromMap(Handle<Map> map, int capacity,
+ PretenureFlag pretenure) {
+ DCHECK(map->is_dictionary_map());
+ Handle<FixedArray> object_properties =
+ NameDictionary::New(isolate(), capacity);
+ Handle<JSObject> js_object = NewJSObjectFromMap(map, pretenure);
+ js_object->set_properties(*object_properties);
+ return js_object;
+}
Handle<JSArray> Factory::NewJSArray(ElementsKind elements_kind,
PretenureFlag pretenure) {
@@ -2460,7 +2477,6 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
code = isolate()->builtins()->Illegal();
}
share->set_code(*code);
- share->set_optimized_code_map(*empty_fixed_array());
share->set_scope_info(ScopeInfo::Empty(isolate()));
share->set_outer_scope_info(*the_hole_value());
Handle<Code> construct_stub =
@@ -2476,7 +2492,7 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
FeedbackMetadata::New(isolate(), &empty_spec);
share->set_feedback_metadata(*feedback_metadata, SKIP_WRITE_BARRIER);
share->set_function_literal_id(FunctionLiteral::kIdTypeInvalid);
-#if TRACE_MAPS
+#if V8_SFI_HAS_UNIQUE_ID
share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
#endif
share->set_profiler_ticks(0);
@@ -2604,7 +2620,7 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
Handle<BreakPointInfo> Factory::NewBreakPointInfo(int source_position) {
Handle<BreakPointInfo> new_break_point_info =
- Handle<BreakPointInfo>::cast(NewStruct(BREAK_POINT_INFO_TYPE));
+ Handle<BreakPointInfo>::cast(NewStruct(TUPLE2_TYPE));
new_break_point_info->set_source_position(source_position);
new_break_point_info->set_break_point_objects(*undefined_value());
return new_break_point_info;
@@ -2623,6 +2639,21 @@ Handle<StackFrameInfo> Factory::NewStackFrameInfo() {
return stack_frame_info;
}
+Handle<SourcePositionTableWithFrameCache>
+Factory::NewSourcePositionTableWithFrameCache(
+ Handle<ByteArray> source_position_table,
+ Handle<UnseededNumberDictionary> stack_frame_cache) {
+ Handle<SourcePositionTableWithFrameCache>
+ source_position_table_with_frame_cache =
+ Handle<SourcePositionTableWithFrameCache>::cast(
+ NewStruct(TUPLE2_TYPE));
+ source_position_table_with_frame_cache->set_source_position_table(
+ *source_position_table);
+ source_position_table_with_frame_cache->set_stack_frame_cache(
+ *stack_frame_cache);
+ return source_position_table_with_frame_cache;
+}
+
Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
int length) {
bool strict_mode_callee = is_strict(callee->shared()->language_mode()) ||
@@ -2650,32 +2681,31 @@ Handle<JSWeakMap> Factory::NewJSWeakMap() {
return Handle<JSWeakMap>::cast(NewJSObjectFromMap(map));
}
-
-Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
- int number_of_properties,
- bool* is_result_from_cache) {
+Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> native_context,
+ int number_of_properties) {
+ DCHECK(native_context->IsNativeContext());
const int kMapCacheSize = 128;
-
// We do not cache maps for too many properties or when running builtin code.
- if (number_of_properties > kMapCacheSize ||
- isolate()->bootstrapper()->IsActive()) {
- *is_result_from_cache = false;
- Handle<Map> map = Map::Create(isolate(), number_of_properties);
- return map;
+ if (isolate()->bootstrapper()->IsActive()) {
+ return Map::Create(isolate(), number_of_properties);
+ }
+ // Use initial slow object proto map for too many properties.
+ if (number_of_properties > kMapCacheSize) {
+ return handle(native_context->slow_object_with_object_prototype_map(),
+ isolate());
}
- *is_result_from_cache = true;
if (number_of_properties == 0) {
// Reuse the initial map of the Object function if the literal has no
// predeclared properties.
- return handle(context->object_function()->initial_map(), isolate());
+ return handle(native_context->object_function()->initial_map(), isolate());
}
int cache_index = number_of_properties - 1;
- Handle<Object> maybe_cache(context->map_cache(), isolate());
+ Handle<Object> maybe_cache(native_context->map_cache(), isolate());
if (maybe_cache->IsUndefined(isolate())) {
// Allocate the new map cache for the native context.
maybe_cache = NewFixedArray(kMapCacheSize, TENURED);
- context->set_map_cache(*maybe_cache);
+ native_context->set_map_cache(*maybe_cache);
} else {
// Check to see whether there is a matching element in the cache.
Handle<FixedArray> cache = Handle<FixedArray>::cast(maybe_cache);
@@ -2683,13 +2713,16 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
if (result->IsWeakCell()) {
WeakCell* cell = WeakCell::cast(result);
if (!cell->cleared()) {
- return handle(Map::cast(cell->value()), isolate());
+ Map* map = Map::cast(cell->value());
+ DCHECK(!map->is_dictionary_map());
+ return handle(map, isolate());
}
}
}
// Create a new map and add it to the cache.
Handle<FixedArray> cache = Handle<FixedArray>::cast(maybe_cache);
Handle<Map> map = Map::Create(isolate(), number_of_properties);
+ DCHECK(!map->is_dictionary_map());
Handle<WeakCell> cell = NewWeakCell(map);
cache->set(cache_index, *cell);
return map;
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 60b53a860d..8146205559 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -9,7 +9,10 @@
#include "src/globals.h"
#include "src/isolate.h"
#include "src/messages.h"
+#include "src/objects/descriptor-array.h"
+#include "src/objects/dictionary.h"
#include "src/objects/scope-info.h"
+#include "src/string-hasher.h"
namespace v8 {
namespace internal {
@@ -325,6 +328,10 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<BreakPointInfo> NewBreakPointInfo(int source_position);
Handle<StackFrameInfo> NewStackFrameInfo();
+ Handle<SourcePositionTableWithFrameCache>
+ NewSourcePositionTableWithFrameCache(
+ Handle<ByteArray> source_position_table,
+ Handle<UnseededNumberDictionary> stack_frame_cache);
// Foreign objects are pretenured when allocated by the bootstrapper.
Handle<Foreign> NewForeign(Address addr,
@@ -472,6 +479,10 @@ class V8_EXPORT_PRIVATE Factory final {
Handle<Map> map,
PretenureFlag pretenure = NOT_TENURED,
Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
+ Handle<JSObject> NewSlowJSObjectFromMap(
+ Handle<Map> map,
+ int number_of_slow_properties = NameDictionary::kInitialCapacity,
+ PretenureFlag pretenure = NOT_TENURED);
// JS arrays are pretenured when allocated by the parser.
@@ -763,9 +774,8 @@ class V8_EXPORT_PRIVATE Factory final {
// Return a map for given number of properties using the map cache in the
// native context.
- Handle<Map> ObjectLiteralMapFromCache(Handle<Context> context,
- int number_of_properties,
- bool* is_result_from_cache);
+ Handle<Map> ObjectLiteralMapFromCache(Handle<Context> native_context,
+ int number_of_properties);
Handle<RegExpMatchInfo> NewRegExpMatchInfo();
diff --git a/deps/v8/src/feedback-vector-inl.h b/deps/v8/src/feedback-vector-inl.h
index 0eb2f3ccc3..e368385166 100644
--- a/deps/v8/src/feedback-vector-inl.h
+++ b/deps/v8/src/feedback-vector-inl.h
@@ -113,6 +113,15 @@ void FeedbackVector::clear_invocation_count() {
set(kInvocationCountIndex, Smi::kZero);
}
+Code* FeedbackVector::optimized_code() const {
+ WeakCell* cell = WeakCell::cast(get(kOptimizedCodeIndex));
+ return cell->cleared() ? nullptr : Code::cast(cell->value());
+}
+
+bool FeedbackVector::has_optimized_code() const {
+ return !WeakCell::cast(get(kOptimizedCodeIndex))->cleared();
+}
+
// Conversion from an integer index to either a slot or an ic slot.
// static
FeedbackSlot FeedbackVector::ToSlot(int index) {
diff --git a/deps/v8/src/feedback-vector.cc b/deps/v8/src/feedback-vector.cc
index 37663f590b..afbcf2a923 100644
--- a/deps/v8/src/feedback-vector.cc
+++ b/deps/v8/src/feedback-vector.cc
@@ -203,6 +203,7 @@ Handle<FeedbackVector> FeedbackVector::New(Isolate* isolate,
Handle<FixedArray> array = factory->NewFixedArray(length, TENURED);
array->set_map_no_write_barrier(isolate->heap()->feedback_vector_map());
array->set(kSharedFunctionInfoIndex, *shared);
+ array->set(kOptimizedCodeIndex, *factory->empty_weak_cell());
array->set(kInvocationCountIndex, Smi::kZero);
// Ensure we can skip the write barrier
@@ -296,6 +297,40 @@ void FeedbackVector::AddToCodeCoverageList(Isolate* isolate,
isolate->SetCodeCoverageList(*list);
}
+// static
+void FeedbackVector::SetOptimizedCode(Handle<FeedbackVector> vector,
+ Handle<Code> code) {
+ DCHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
+ Factory* factory = vector->GetIsolate()->factory();
+ Handle<WeakCell> cell = factory->NewWeakCell(code);
+ vector->set(kOptimizedCodeIndex, *cell);
+}
+
+void FeedbackVector::ClearOptimizedCode() {
+ set(kOptimizedCodeIndex, GetIsolate()->heap()->empty_weak_cell());
+}
+
+void FeedbackVector::EvictOptimizedCodeMarkedForDeoptimization(
+ SharedFunctionInfo* shared, const char* reason) {
+ WeakCell* cell = WeakCell::cast(get(kOptimizedCodeIndex));
+ if (!cell->cleared()) {
+ Code* code = Code::cast(cell->value());
+ if (code->marked_for_deoptimization()) {
+ if (FLAG_trace_deopt) {
+ PrintF("[evicting optimizing code marked for deoptimization (%s) for ",
+ reason);
+ shared->ShortPrint();
+ PrintF("]\n");
+ }
+ if (!code->deopt_already_counted()) {
+ shared->increment_deopt_count();
+ code->set_deopt_already_counted(true);
+ }
+ ClearOptimizedCode();
+ }
+ }
+}
+
void FeedbackVector::ClearSlots(JSFunction* host_function) {
Isolate* isolate = GetIsolate();
@@ -646,9 +681,10 @@ void FeedbackNexus::ConfigureMonomorphic(Handle<Name> name,
}
}
-void FeedbackNexus::ConfigurePolymorphic(Handle<Name> name, MapHandleList* maps,
+void FeedbackNexus::ConfigurePolymorphic(Handle<Name> name,
+ MapHandles const& maps,
List<Handle<Object>>* handlers) {
- int receiver_count = maps->length();
+ int receiver_count = static_cast<int>(maps.size());
DCHECK(receiver_count > 1);
Handle<FixedArray> array;
if (name.is_null()) {
@@ -661,14 +697,14 @@ void FeedbackNexus::ConfigurePolymorphic(Handle<Name> name, MapHandleList* maps,
}
for (int current = 0; current < receiver_count; ++current) {
- Handle<Map> map = maps->at(current);
+ Handle<Map> map = maps[current];
Handle<WeakCell> cell = Map::WeakCellForMap(map);
array->set(current * 2, *cell);
array->set(current * 2 + 1, *handlers->at(current));
}
}
-int FeedbackNexus::ExtractMaps(MapHandleList* maps) const {
+int FeedbackNexus::ExtractMaps(MapHandles* maps) const {
Isolate* isolate = GetIsolate();
Object* feedback = GetFeedback();
bool is_named_feedback = IsPropertyNameFeedback(feedback);
@@ -684,7 +720,7 @@ int FeedbackNexus::ExtractMaps(MapHandleList* maps) const {
WeakCell* cell = WeakCell::cast(array->get(i));
if (!cell->cleared()) {
Map* map = Map::cast(cell->value());
- maps->Add(handle(map, isolate));
+ maps->push_back(handle(map, isolate));
found++;
}
}
@@ -693,7 +729,7 @@ int FeedbackNexus::ExtractMaps(MapHandleList* maps) const {
WeakCell* cell = WeakCell::cast(feedback);
if (!cell->cleared()) {
Map* map = Map::cast(cell->value());
- maps->Add(handle(map, isolate));
+ maps->push_back(handle(map, isolate));
return 1;
}
}
@@ -791,13 +827,13 @@ Name* KeyedStoreICNexus::FindFirstName() const {
KeyedAccessStoreMode KeyedStoreICNexus::GetKeyedAccessStoreMode() const {
KeyedAccessStoreMode mode = STANDARD_STORE;
- MapHandleList maps;
+ MapHandles maps;
List<Handle<Object>> handlers;
if (GetKeyType() == PROPERTY) return mode;
ExtractMaps(&maps);
- FindHandlers(&handlers, maps.length());
+ FindHandlers(&handlers, static_cast<int>(maps.size()));
for (int i = 0; i < handlers.length(); i++) {
// The first handler that isn't the slow handler will have the bits we need.
Handle<Object> maybe_code_handler = handlers.at(i);
@@ -911,7 +947,7 @@ InlineCacheState CollectTypeProfileNexus::StateFromFeedback() const {
}
void CollectTypeProfileNexus::Collect(Handle<String> type, int position) {
- DCHECK_GT(position, 0);
+ DCHECK_GE(position, 0);
Isolate* isolate = GetIsolate();
Object* const feedback = GetFeedback();
@@ -922,8 +958,7 @@ void CollectTypeProfileNexus::Collect(Handle<String> type, int position) {
if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
types = UnseededNumberDictionary::NewEmpty(isolate);
} else {
- types = Handle<UnseededNumberDictionary>(
- UnseededNumberDictionary::cast(feedback), isolate);
+ types = handle(UnseededNumberDictionary::cast(feedback));
}
Handle<ArrayList> position_specific_types;
@@ -931,8 +966,7 @@ void CollectTypeProfileNexus::Collect(Handle<String> type, int position) {
if (types->Has(position)) {
int entry = types->FindEntry(position);
DCHECK(types->ValueAt(entry)->IsArrayList());
- position_specific_types =
- Handle<ArrayList>(ArrayList::cast(types->ValueAt(entry)));
+ position_specific_types = handle(ArrayList::cast(types->ValueAt(entry)));
} else {
position_specific_types = ArrayList::New(isolate, 1);
}
@@ -957,8 +991,8 @@ Handle<JSObject> ConvertToJSObject(Isolate* isolate,
if (key->IsSmi()) {
int value_index = index + UnseededNumberDictionary::kEntryValueIndex;
- Handle<ArrayList> position_specific_types = Handle<ArrayList>(
- ArrayList::cast(feedback->get(value_index)), isolate);
+ Handle<ArrayList> position_specific_types(
+ ArrayList::cast(feedback->get(value_index)));
int position = Smi::cast(key)->value();
JSObject::AddDataElement(type_profile, position,
@@ -978,12 +1012,11 @@ JSObject* CollectTypeProfileNexus::GetTypeProfile() const {
Object* const feedback = GetFeedback();
if (feedback == *FeedbackVector::UninitializedSentinel(isolate)) {
- return *isolate->factory()->NewJSMap();
+ return *isolate->factory()->NewJSObject(isolate->object_function());
}
- return *ConvertToJSObject(
- isolate, Handle<UnseededNumberDictionary>(
- UnseededNumberDictionary::cast(feedback), isolate));
+ return *ConvertToJSObject(isolate,
+ handle(UnseededNumberDictionary::cast(feedback)));
}
} // namespace internal
diff --git a/deps/v8/src/feedback-vector.h b/deps/v8/src/feedback-vector.h
index 2319401177..84ec460de1 100644
--- a/deps/v8/src/feedback-vector.h
+++ b/deps/v8/src/feedback-vector.h
@@ -9,7 +9,7 @@
#include "src/base/logging.h"
#include "src/elements-kind.h"
-#include "src/objects.h"
+#include "src/objects/map.h"
#include "src/type-hints.h"
#include "src/zone/zone-containers.h"
@@ -228,7 +228,7 @@ class FeedbackVectorSpec : public FeedbackVectorSpecBase<FeedbackVectorSpec> {
// If used, the TypeProfileSlot is always added as the first slot and its
// index is constant. If other slots are added before the TypeProfileSlot,
// this number changes.
- static const int kTypeProfileSlotIndex = 2;
+ static const int kTypeProfileSlotIndex = 3;
private:
friend class FeedbackVectorSpecBase<FeedbackVectorSpec>;
@@ -308,7 +308,8 @@ class FeedbackVector : public FixedArray {
static const int kSharedFunctionInfoIndex = 0;
static const int kInvocationCountIndex = 1;
- static const int kReservedIndexCount = 2;
+ static const int kOptimizedCodeIndex = 2;
+ static const int kReservedIndexCount = 3;
inline void ComputeCounts(int* with_type_info, int* generic,
int* vector_ic_count, bool code_is_interpreted);
@@ -323,6 +324,14 @@ class FeedbackVector : public FixedArray {
inline int invocation_count() const;
inline void clear_invocation_count();
+ inline Code* optimized_code() const;
+ inline bool has_optimized_code() const;
+ void ClearOptimizedCode();
+ void EvictOptimizedCodeMarkedForDeoptimization(SharedFunctionInfo* shared,
+ const char* reason);
+ static void SetOptimizedCode(Handle<FeedbackVector> vector,
+ Handle<Code> code);
+
// Conversion from a slot to an integer index to the underlying array.
static int GetIndex(FeedbackSlot slot) {
return kReservedIndexCount + slot.ToInt();
@@ -477,17 +486,14 @@ class FeedbackNexus {
InlineCacheState ic_state() const { return StateFromFeedback(); }
bool IsUninitialized() const { return StateFromFeedback() == UNINITIALIZED; }
Map* FindFirstMap() const {
- MapHandleList maps;
+ MapHandles maps;
ExtractMaps(&maps);
- if (maps.length() > 0) return *maps.at(0);
+ if (maps.size() > 0) return *maps.at(0);
return NULL;
}
- // TODO(mvstanton): remove FindAllMaps, it didn't survive a code review.
- void FindAllMaps(MapHandleList* maps) const { ExtractMaps(maps); }
-
virtual InlineCacheState StateFromFeedback() const = 0;
- virtual int ExtractMaps(MapHandleList* maps) const;
+ virtual int ExtractMaps(MapHandles* maps) const;
virtual MaybeHandle<Object> FindHandlerForMap(Handle<Map> map) const;
virtual bool FindHandlers(List<Handle<Object>>* code_list,
int length = -1) const;
@@ -511,7 +517,7 @@ class FeedbackNexus {
void ConfigureMonomorphic(Handle<Name> name, Handle<Map> receiver_map,
Handle<Object> handler);
- void ConfigurePolymorphic(Handle<Name> name, MapHandleList* maps,
+ void ConfigurePolymorphic(Handle<Name> name, MapHandles const& maps,
List<Handle<Object>>* handlers);
protected:
@@ -548,7 +554,7 @@ class CallICNexus final : public FeedbackNexus {
InlineCacheState StateFromFeedback() const final;
- int ExtractMaps(MapHandleList* maps) const final {
+ int ExtractMaps(MapHandles* maps) const final {
// CallICs don't record map feedback.
return 0;
}
@@ -594,7 +600,7 @@ class LoadGlobalICNexus : public FeedbackNexus {
DCHECK(vector->IsLoadGlobalIC(slot));
}
- int ExtractMaps(MapHandleList* maps) const final {
+ int ExtractMaps(MapHandles* maps) const final {
// LoadGlobalICs don't record map feedback.
return 0;
}
@@ -687,7 +693,7 @@ class BinaryOpICNexus final : public FeedbackNexus {
InlineCacheState StateFromFeedback() const final;
BinaryOperationHint GetBinaryOperationFeedback() const;
- int ExtractMaps(MapHandleList* maps) const final {
+ int ExtractMaps(MapHandles* maps) const final {
// BinaryOpICs don't record map feedback.
return 0;
}
@@ -714,7 +720,7 @@ class CompareICNexus final : public FeedbackNexus {
InlineCacheState StateFromFeedback() const final;
CompareOperationHint GetCompareOperationFeedback() const;
- int ExtractMaps(MapHandleList* maps) const final {
+ int ExtractMaps(MapHandles* maps) const final {
// BinaryOpICs don't record map feedback.
return 0;
}
diff --git a/deps/v8/src/field-type.h b/deps/v8/src/field-type.h
index 2f8250a161..5eb19dfe0c 100644
--- a/deps/v8/src/field-type.h
+++ b/deps/v8/src/field-type.h
@@ -7,6 +7,7 @@
#include "src/ast/ast-types.h"
#include "src/objects.h"
+#include "src/objects/map.h"
#include "src/ostreams.h"
namespace v8 {
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index a52cebb64a..85ee27ee6e 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -193,39 +193,43 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
DEFINE_IMPLICATION(es_staging, harmony)
// Features that are still work in progress (behind individual flags).
-#define HARMONY_INPROGRESS(V) \
- V(harmony_array_prototype_values, "harmony Array.prototype.values") \
- V(harmony_function_sent, "harmony function.sent") \
- V(harmony_tailcalls, "harmony tail calls") \
- V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
- V(harmony_do_expressions, "harmony do-expressions") \
- V(harmony_class_fields, "harmony public fields in class literals") \
- V(harmony_async_iteration, "harmony async iteration") \
- V(harmony_dynamic_import, "harmony dynamic import") \
+#define HARMONY_INPROGRESS(V) \
+ V(harmony_array_prototype_values, "harmony Array.prototype.values") \
+ V(harmony_function_sent, "harmony function.sent") \
+ V(harmony_tailcalls, "harmony tail calls") \
+ V(harmony_sharedarraybuffer, "harmony sharedarraybuffer") \
+ V(harmony_do_expressions, "harmony do-expressions") \
+ V(harmony_class_fields, "harmony public fields in class literals") \
+ V(harmony_async_iteration, "harmony async iteration") \
+ V(harmony_dynamic_import, "harmony dynamic import") \
V(harmony_promise_finally, "harmony Promise.prototype.finally")
// Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED(V) \
- V(harmony_function_tostring, "harmony Function.prototype.toString") \
- V(harmony_regexp_dotall, "harmony regexp dotall flag") \
- V(harmony_regexp_lookbehind, "harmony regexp lookbehind") \
- V(harmony_regexp_named_captures, "harmony regexp named captures") \
- V(harmony_regexp_property, "harmony unicode regexp property classes") \
- V(harmony_restrictive_generators, \
- "harmony restrictions on generator declarations") \
- V(harmony_object_rest_spread, "harmony object rest spread properties") \
- V(harmony_template_escapes, \
- "harmony invalid escapes in tagged template literals")
+#define HARMONY_STAGED(V) \
+ V(harmony_function_tostring, "harmony Function.prototype.toString") \
+ V(harmony_regexp_dotall, "harmony regexp dotall flag") \
+ V(harmony_regexp_lookbehind, "harmony regexp lookbehind") \
+ V(harmony_regexp_named_captures, "harmony regexp named captures") \
+ V(harmony_regexp_property, "harmony unicode regexp property classes") \
+ V(harmony_strict_legacy_accessor_builtins, \
+ "treat __defineGetter__ and related functions as strict") \
+ V(harmony_template_escapes, \
+ "harmony invalid escapes in tagged template literals") \
+ V(harmony_restrict_constructor_return, \
+ "harmony disallow non undefined primitive return value from class " \
+ "constructor")
// Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING_BASE(V) \
- V(harmony_trailing_commas, \
- "harmony trailing commas in function parameter lists")
-
-#ifdef V8_I18N_SUPPORT
+#define HARMONY_SHIPPING_BASE(V) \
+ V(harmony_restrictive_generators, \
+ "harmony restrictions on generator declarations") \
+ V(harmony_trailing_commas, \
+ "harmony trailing commas in function parameter lists") \
+ V(harmony_object_rest_spread, "harmony object rest spread properties")
+
+#ifdef V8_INTL_SUPPORT
#define HARMONY_SHIPPING(V) \
HARMONY_SHIPPING_BASE(V) \
- V(datetime_format_to_parts, "Intl.DateTimeFormat.formatToParts") \
V(icu_case_mapping, "case mapping with ICU rather than Unibrow")
#else
#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
@@ -254,7 +258,7 @@ HARMONY_STAGED(FLAG_STAGED_FEATURES)
HARMONY_SHIPPING(FLAG_SHIPPING_FEATURES)
#undef FLAG_SHIPPING_FEATURES
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
DEFINE_BOOL(icu_timezone_data, false,
"get information about timezones from ICU")
#endif
@@ -300,6 +304,9 @@ DEFINE_IMPLICATION(track_field_types, track_heap_object_fields)
DEFINE_BOOL(type_profile, false, "collect type information")
DEFINE_BOOL(feedback_normalization, false,
"feed back normalization to constructors")
+// TODO(jkummerow): This currently adds too much load on the stub cache.
+DEFINE_BOOL_READONLY(internalize_on_the_fly, false,
+ "internalize string keys for generic keyed ICs on the fly")
// Flags for optimization types.
DEFINE_BOOL(optimize_for_size, false,
@@ -324,8 +331,10 @@ DEFINE_BOOL(print_bytecode, false,
"print bytecode generated by ignition interpreter")
DEFINE_STRING(print_bytecode_filter, "*",
"filter for selecting which functions to print bytecode")
+#ifdef V8_TRACE_IGNITION
DEFINE_BOOL(trace_ignition, false,
"trace the bytecodes executed by the ignition interpreter")
+#endif
DEFINE_BOOL(trace_ignition_codegen, false,
"trace the codegen of ignition interpreter bytecode handlers")
DEFINE_BOOL(trace_ignition_dispatches, false,
@@ -336,7 +345,6 @@ DEFINE_STRING(trace_ignition_dispatches_output_file, nullptr,
"written (by default, the table is not written to a file)")
// Flags for Crankshaft.
-DEFINE_BOOL(crankshaft, true, "use crankshaft")
DEFINE_STRING(hydrogen_filter, "*", "optimization filter")
DEFINE_BOOL(use_gvn, true, "use hydrogen global value numbering")
DEFINE_INT(gvn_iterations, 3, "maximum number of GVN fix-point iterations")
@@ -352,12 +360,16 @@ DEFINE_INT(max_inlined_source_size, 600,
"maximum source size in bytes considered for a single inlining")
DEFINE_INT(max_inlined_nodes, 200,
"maximum number of AST nodes considered for a single inlining")
+DEFINE_INT(max_inlined_nodes_absolute, 1600,
+ "maximum absolute number of AST nodes considered for inlining "
+ "(incl. small functions)")
DEFINE_INT(max_inlined_nodes_cumulative, 400,
"maximum cumulative number of AST nodes considered for inlining")
+DEFINE_INT(max_inlined_nodes_small, 10,
+ "maximum number of AST nodes considered for small function inlining")
+DEFINE_FLOAT(min_inlining_frequency, 0.15, "minimum frequency for inlining")
DEFINE_BOOL(loop_invariant_code_motion, true, "loop invariant code motion")
DEFINE_BOOL(fast_math, true, "faster (but maybe less accurate) math functions")
-DEFINE_BOOL(collect_megamorphic_maps_from_stub_cache, false,
- "crankshaft harvests type feedback from stub cache")
DEFINE_BOOL(hydrogen_stats, false, "print statistics for hydrogen")
DEFINE_BOOL(trace_check_elimination, false, "trace check elimination phase")
DEFINE_BOOL(trace_environment_liveness, false,
@@ -517,7 +529,7 @@ DEFINE_BOOL(minimal, false,
"simplifies execution model to make porting "
"easier (e.g. always use Ignition, never use Crankshaft")
DEFINE_IMPLICATION(minimal, ignition)
-DEFINE_NEG_IMPLICATION(minimal, crankshaft)
+DEFINE_NEG_IMPLICATION(minimal, opt)
DEFINE_NEG_IMPLICATION(minimal, use_ic)
// Flags for native WebAssembly.
@@ -536,7 +548,6 @@ DEFINE_UINT(wasm_max_mem_pages, v8::internal::wasm::kV8MaxWasmMemoryPages,
"maximum memory size of a wasm instance")
DEFINE_UINT(wasm_max_table_size, v8::internal::wasm::kV8MaxWasmTableSize,
"maximum table size of a wasm instance")
-DEFINE_BOOL(trace_wasm_encoder, false, "trace encoding of wasm code")
DEFINE_BOOL(trace_wasm_decoder, false, "trace decoding of wasm code")
DEFINE_BOOL(trace_wasm_decode_time, false, "trace decoding time of wasm code")
DEFINE_BOOL(trace_wasm_compiler, false, "trace compiling of wasm code")
@@ -549,8 +560,6 @@ DEFINE_BOOL(wasm_break_on_decoder_error, false,
"debug break when wasm decoder encounters an error")
DEFINE_BOOL(validate_asm, false, "validate asm.js modules before compiling")
-DEFINE_BOOL(fast_validate_asm, false,
- "validate asm.js modules before compiling")
DEFINE_BOOL(suppress_asm_messages, false,
"don't emit asm.js related messages (for golden file testing)")
DEFINE_BOOL(trace_asm_time, false, "log asm.js timing info to the console")
@@ -597,6 +606,9 @@ DEFINE_BOOL(asm_wasm_lazy_compilation, false,
DEFINE_IMPLICATION(validate_asm, asm_wasm_lazy_compilation)
DEFINE_BOOL(wasm_lazy_compilation, false,
"enable lazy compilation for all wasm modules")
+// wasm-interpret-all resets {asm-,}wasm-lazy-compilation.
+DEFINE_NEG_IMPLICATION(wasm_interpret_all, asm_wasm_lazy_compilation)
+DEFINE_NEG_IMPLICATION(wasm_interpret_all, wasm_lazy_compilation)
// Profiler flags.
DEFINE_INT(frame_count, 1, "number of stack frames inspected by the profiler")
@@ -625,7 +637,6 @@ DEFINE_BOOL(experimental_new_space_growth_heuristic, false,
"of their absolute value.")
DEFINE_INT(max_old_space_size, 0, "max size of the old space (in Mbytes)")
DEFINE_INT(initial_old_space_size, 0, "initial old space size (in Mbytes)")
-DEFINE_INT(max_executable_size, 0, "max size of executable memory (in Mbytes)")
DEFINE_BOOL(gc_global, false, "always perform global GCs")
DEFINE_INT(gc_interval, -1, "garbage collect after <n> allocations")
DEFINE_INT(retain_maps_for_n_gc, 2,
@@ -651,7 +662,7 @@ DEFINE_BOOL(trace_fragmentation_verbose, false,
DEFINE_BOOL(trace_evacuation, false, "report evacuation statistics")
DEFINE_BOOL(trace_mutator_utilization, false,
"print mutator utilization, allocation speed, gc speed")
-DEFINE_BOOL(flush_code, true, "flush code that we expect not to use again")
+DEFINE_BOOL(flush_code, false, "flush code that we expect not to use again")
DEFINE_BOOL(trace_code_flushing, false, "trace code flushing progress")
DEFINE_BOOL(age_code, true,
"track un-executed functions to age code and flush only "
@@ -659,15 +670,24 @@ DEFINE_BOOL(age_code, true,
DEFINE_BOOL(incremental_marking, true, "use incremental marking")
DEFINE_BOOL(incremental_marking_wrappers, true,
"use incremental marking for marking wrappers")
-DEFINE_BOOL(concurrent_marking, false, "use concurrent marking")
+#ifdef V8_CONCURRENT_MARKING
+#define V8_CONCURRENT_MARKING_BOOL true
+#else
+#define V8_CONCURRENT_MARKING_BOOL false
+#endif
+DEFINE_BOOL(concurrent_marking, V8_CONCURRENT_MARKING_BOOL,
+ "use concurrent marking")
DEFINE_BOOL(trace_concurrent_marking, false, "trace concurrent marking")
+DEFINE_BOOL(minor_mc_parallel_marking, true,
+ "use parallel marking for the young generation")
+DEFINE_BOOL(trace_minor_mc_parallel_marking, false,
+ "trace parallel marking for the young generation")
DEFINE_INT(min_progress_during_incremental_marking_finalization, 32,
"keep finalizing incremental marking as long as we discover at "
"least this many unmarked objects")
DEFINE_INT(max_incremental_marking_finalization_rounds, 3,
"at most try this many times to finalize incremental marking")
DEFINE_BOOL(minor_mc, false, "perform young generation mark compact GCs")
-DEFINE_NEG_IMPLICATION(minor_mc, page_promotion)
DEFINE_NEG_IMPLICATION(minor_mc, flush_code)
DEFINE_BOOL(black_allocation, true, "use black allocation")
DEFINE_BOOL(concurrent_store_buffer, true,
@@ -694,6 +714,8 @@ DEFINE_BOOL(trace_detached_contexts, false,
DEFINE_IMPLICATION(trace_detached_contexts, track_detached_contexts)
#ifdef VERIFY_HEAP
DEFINE_BOOL(verify_heap, false, "verify heap pointers before and after GC")
+DEFINE_BOOL(verify_heap_skip_remembered_set, false,
+ "disable remembered set verification")
#endif
DEFINE_BOOL(move_object_start, true, "enable moving of object starts")
DEFINE_BOOL(memory_reducer, true, "use memory reducer")
@@ -709,8 +731,6 @@ DEFINE_BOOL(cleanup_code_caches_at_gc, true,
DEFINE_BOOL(use_marking_progress_bar, true,
"Use a progress bar to scan large objects in increments when "
"incremental marking is active.")
-DEFINE_BOOL(zap_code_space, DEBUG_BOOL,
- "Zap free memory in code space with 0xCC while sweeping.")
DEFINE_BOOL(force_marking_deque_overflows, false,
"force overflows of marking deque by reducing it's size "
"to 64 words")
@@ -782,8 +802,6 @@ DEFINE_BOOL(builtins_in_stack_traces, false,
"show built-in functions in stack traces")
// builtins.cc
-DEFINE_BOOL(experimental_fast_array_builtins, false,
- "use experimental array builtins")
DEFINE_BOOL(allow_unsafe_function_constructor, false,
"allow invoking the function constructor without security checks")
@@ -890,10 +908,6 @@ DEFINE_BOOL(clear_exceptions_on_js_entry, false,
DEFINE_INT(histogram_interval, 600000,
"time interval in ms for aggregating memory histograms")
-// global-handles.cc
-DEFINE_BOOL(trace_object_groups, false,
- "print object groups detected during each garbage collection")
-
// heap-snapshot-generator.cc
DEFINE_BOOL(heap_profiler_trace_objects, false,
"Dump heap object allocations/movements/size_updates")
@@ -909,7 +923,8 @@ DEFINE_BOOL(use_idle_notification, true,
"Use idle notification to reduce memory footprint.")
// ic.cc
DEFINE_BOOL(use_ic, true, "use inline caching")
-DEFINE_BOOL(trace_ic, false, "trace inline cache state transitions")
+DEFINE_BOOL(trace_ic, false,
+ "trace inline cache state transitions for tools/ic-processor")
DEFINE_IMPLICATION(trace_ic, log_code)
DEFINE_INT(ic_stats, 0, "inline cache state transitions statistics")
DEFINE_VALUE_IMPLICATION(trace_ic, ic_stats, 1)
@@ -928,7 +943,7 @@ DEFINE_BOOL(trace_prototype_users, false,
"Trace updates to prototype user tracking")
DEFINE_BOOL(use_verbose_printer, true, "allows verbose printing")
DEFINE_BOOL(trace_for_in_enumerate, false, "Trace for-in enumerate slow-paths")
-#if TRACE_MAPS
+#if V8_TRACE_MAPS
DEFINE_BOOL(trace_maps, false, "trace map creation")
#endif
@@ -944,9 +959,9 @@ DEFINE_BOOL(lazy_inner_functions, true, "enable lazy parsing inner functions")
DEFINE_BOOL(aggressive_lazy_inner_functions, false,
"even lazier inner function parsing")
DEFINE_IMPLICATION(aggressive_lazy_inner_functions, lazy_inner_functions)
-DEFINE_BOOL(preparser_scope_analysis, false,
+DEFINE_BOOL(experimental_preparser_scope_analysis, false,
"perform scope analysis for preparsed inner functions")
-DEFINE_IMPLICATION(preparser_scope_analysis, lazy_inner_functions)
+DEFINE_IMPLICATION(experimental_preparser_scope_analysis, lazy_inner_functions)
// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
DEFINE_BOOL(trace_sim, false, "Trace simulator execution")
@@ -987,8 +1002,6 @@ DEFINE_BOOL(abort_on_stack_overflow, false,
DEFINE_BOOL(randomize_hashes, true,
"randomize hashes to avoid predictable hash collisions "
"(with snapshots this option cannot override the baked-in seed)")
-DEFINE_BOOL(rehash_snapshot, true,
- "rehash strings from the snapshot to override the baked-in seed")
DEFINE_INT(hash_seed, 0,
"Fixed seed to use to hash property keys (0 means random)"
"(with snapshots this option cannot override the baked-in seed)")
@@ -1040,6 +1053,7 @@ DEFINE_BOOL(help, false, "Print usage message, including flags, on console")
DEFINE_BOOL(dump_counters, false, "Dump counters on exit")
DEFINE_BOOL(dump_counters_nvp, false,
"Dump counters as name-value pairs on exit")
+DEFINE_BOOL(use_external_strings, false, "Use external strings for source code")
DEFINE_STRING(map_counters, "", "Map counters to a file")
DEFINE_ARGS(js_arguments,
@@ -1295,6 +1309,7 @@ DEFINE_BOOL(single_threaded, false, "disable the use of background tasks")
DEFINE_NEG_IMPLICATION(single_threaded, concurrent_recompilation)
DEFINE_NEG_IMPLICATION(single_threaded, concurrent_marking)
DEFINE_NEG_IMPLICATION(single_threaded, concurrent_sweeping)
+DEFINE_NEG_IMPLICATION(single_threaded, minor_mc_parallel_marking)
DEFINE_NEG_IMPLICATION(single_threaded, parallel_compaction)
DEFINE_NEG_IMPLICATION(single_threaded, parallel_pointer_update)
DEFINE_NEG_IMPLICATION(single_threaded, concurrent_store_buffer)
@@ -1321,7 +1336,7 @@ DEFINE_INT(dump_allocations_digest_at_alloc, -1,
// assembler.h
DEFINE_BOOL(enable_embedded_constant_pool, V8_EMBEDDED_CONSTANT_POOL,
- "enable use of embedded constant pools (ARM/PPC only)")
+ "enable use of embedded constant pools (PPC only)")
DEFINE_BOOL(unbox_double_fields, V8_DOUBLE_FIELDS_UNBOXING,
"enable in-object double fields unboxing (64-bit only)")
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index 6998d49e17..ed901cf6e8 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -575,18 +575,19 @@ void FlagList::PrintHelp() {
OFStream os(stdout);
os << "Usage:\n"
- << " shell [options] -e string\n"
- << " execute string in V8\n"
- << " shell [options] file1 file2 ... filek\n"
- << " run JavaScript scripts in file1, file2, ..., filek\n"
- << " shell [options]\n"
- << " shell [options] --shell [file1 file2 ... filek]\n"
- << " run an interactive JavaScript shell\n"
- << " d8 [options] file1 file2 ... filek\n"
- << " d8 [options]\n"
- << " d8 [options] --shell [file1 file2 ... filek]\n"
- << " run the new debugging shell\n\n"
- << "Options:\n";
+ " shell [options] -e string\n"
+ " execute string in V8\n"
+ " shell [options] file1 file2 ... filek\n"
+ " run JavaScript scripts in file1, file2, ..., filek\n"
+ " shell [options]\n"
+ " shell [options] --shell [file1 file2 ... filek]\n"
+ " run an interactive JavaScript shell\n"
+ " d8 [options] file1 file2 ... filek\n"
+ " d8 [options]\n"
+ " d8 [options] --shell [file1 file2 ... filek]\n"
+ " run the new debugging shell\n\n"
+ "Options:\n";
+
for (size_t i = 0; i < num_flags; ++i) {
Flag* f = &flags[i];
os << " --" << f->name() << " (" << f->comment() << ")\n"
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index f4f6690421..90610fafb6 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -15,6 +15,7 @@
#include "src/register-configuration.h"
#include "src/safepoint-table.h"
#include "src/string-stream.h"
+#include "src/visitors.h"
#include "src/vm-state-inl.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
@@ -405,14 +406,13 @@ Code* StackFrame::GetSafepointData(Isolate* isolate,
static bool GcSafeCodeContains(HeapObject* object, Address addr);
#endif
-
-void StackFrame::IteratePc(ObjectVisitor* v, Address* pc_address,
+void StackFrame::IteratePc(RootVisitor* v, Address* pc_address,
Address* constant_pool_address, Code* holder) {
Address pc = *pc_address;
DCHECK(GcSafeCodeContains(holder, pc));
unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start());
Object* code = holder;
- v->VisitPointer(&code);
+ v->VisitRootPointer(Root::kTop, &code);
if (code == holder) return;
holder = reinterpret_cast<Code*>(code);
pc = holder->instruction_start() + pc_offset;
@@ -601,12 +601,11 @@ void ExitFrame::SetCallerFp(Address caller_fp) {
Memory::Address_at(fp() + ExitFrameConstants::kCallerFPOffset) = caller_fp;
}
-
-void ExitFrame::Iterate(ObjectVisitor* v) const {
+void ExitFrame::Iterate(RootVisitor* v) const {
// The arguments are traversed as part of the expression stack of
// the calling frame.
IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
- v->VisitPointer(&code_slot());
+ v->VisitRootPointer(Root::kTop, &code_slot());
}
@@ -782,7 +781,7 @@ void StandardFrame::Summarize(List<FrameSummary>* functions,
UNREACHABLE();
}
-void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
+void StandardFrame::IterateCompiledFrame(RootVisitor* v) const {
// Make sure that we're not doing "safe" stack frame iteration. We cannot
// possibly find pointers in optimized frames in that state.
DCHECK(can_access_heap_objects());
@@ -842,8 +841,8 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
// Visit the parameters that may be on top of the saved registers.
if (safepoint_entry.argument_count() > 0) {
- v->VisitPointers(parameters_base,
- parameters_base + safepoint_entry.argument_count());
+ v->VisitRootPointers(Root::kTop, parameters_base,
+ parameters_base + safepoint_entry.argument_count());
parameters_base += safepoint_entry.argument_count();
}
@@ -861,7 +860,7 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
for (int i = kNumSafepointRegisters - 1; i >=0; i--) {
if (safepoint_entry.HasRegisterAt(i)) {
int reg_stack_index = MacroAssembler::SafepointRegisterStackIndex(i);
- v->VisitPointer(parameters_base + reg_stack_index);
+ v->VisitRootPointer(Root::kTop, parameters_base + reg_stack_index);
}
}
// Skip the words containing the register values.
@@ -874,7 +873,7 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
// Visit the rest of the parameters if they are tagged.
if (code->has_tagged_params()) {
- v->VisitPointers(parameters_base, parameters_limit);
+ v->VisitRootPointers(Root::kTop, parameters_base, parameters_limit);
}
// Visit pointer spill slots and locals.
@@ -882,7 +881,7 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
int byte_index = index >> kBitsPerByteLog2;
int bit_index = index & (kBitsPerByte - 1);
if ((safepoint_bits[byte_index] & (1U << bit_index)) != 0) {
- v->VisitPointer(parameters_limit + index);
+ v->VisitRootPointer(Root::kTop, parameters_limit + index);
}
}
@@ -892,15 +891,11 @@ void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
if (!is_wasm() && !is_wasm_to_js()) {
// If this frame has JavaScript ABI, visit the context (in stub and JS
// frames) and the function (in JS frames).
- v->VisitPointers(frame_header_base, frame_header_limit);
+ v->VisitRootPointers(Root::kTop, frame_header_base, frame_header_limit);
}
}
-
-void StubFrame::Iterate(ObjectVisitor* v) const {
- IterateCompiledFrame(v);
-}
-
+void StubFrame::Iterate(RootVisitor* v) const { IterateCompiledFrame(v); }
Code* StubFrame::unchecked_code() const {
return isolate()->FindCodeObject(pc());
@@ -926,10 +921,7 @@ int StubFrame::LookupExceptionHandlerInTable(int* stack_slots) {
return table->LookupReturn(pc_offset);
}
-void OptimizedFrame::Iterate(ObjectVisitor* v) const {
- IterateCompiledFrame(v);
-}
-
+void OptimizedFrame::Iterate(RootVisitor* v) const { IterateCompiledFrame(v); }
void JavaScriptFrame::SetParameterValue(int index, Object* value) const {
Memory::Object_at(GetParameterSlot(index)) = value;
@@ -1727,7 +1719,7 @@ Code* WasmCompiledFrame::unchecked_code() const {
return isolate()->FindCodeObject(pc());
}
-void WasmCompiledFrame::Iterate(ObjectVisitor* v) const {
+void WasmCompiledFrame::Iterate(RootVisitor* v) const {
IterateCompiledFrame(v);
}
@@ -1787,7 +1779,7 @@ int WasmCompiledFrame::LookupExceptionHandlerInTable(int* stack_slots) {
return table->LookupReturn(pc_offset);
}
-void WasmInterpreterEntryFrame::Iterate(ObjectVisitor* v) const {
+void WasmInterpreterEntryFrame::Iterate(RootVisitor* v) const {
IterateCompiledFrame(v);
}
@@ -1833,6 +1825,10 @@ int WasmInterpreterEntryFrame::position() const {
return FrameSummary::GetBottom(this).AsWasmInterpreted().SourcePosition();
}
+Object* WasmInterpreterEntryFrame::context() const {
+ return wasm_instance()->compiled_module()->ptr_to_native_context();
+}
+
Address WasmInterpreterEntryFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPOffset;
}
@@ -2029,26 +2025,23 @@ void ArgumentsAdaptorFrame::Print(StringStream* accumulator,
accumulator->Add("}\n\n");
}
-
-void EntryFrame::Iterate(ObjectVisitor* v) const {
+void EntryFrame::Iterate(RootVisitor* v) const {
IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
}
-
-void StandardFrame::IterateExpressions(ObjectVisitor* v) const {
+void StandardFrame::IterateExpressions(RootVisitor* v) const {
const int offset = StandardFrameConstants::kLastObjectOffset;
Object** base = &Memory::Object_at(sp());
Object** limit = &Memory::Object_at(fp() + offset) + 1;
- v->VisitPointers(base, limit);
+ v->VisitRootPointers(Root::kTop, base, limit);
}
-
-void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
+void JavaScriptFrame::Iterate(RootVisitor* v) const {
IterateExpressions(v);
IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
}
-void InternalFrame::Iterate(ObjectVisitor* v) const {
+void InternalFrame::Iterate(RootVisitor* v) const {
Code* code = LookupCode();
IteratePc(v, pc_address(), constant_pool_address(), code);
// Internal frames typically do not receive any arguments, hence their stack
@@ -2060,16 +2053,15 @@ void InternalFrame::Iterate(ObjectVisitor* v) const {
if (code->has_tagged_params()) IterateExpressions(v);
}
-
-void StubFailureTrampolineFrame::Iterate(ObjectVisitor* v) const {
+void StubFailureTrampolineFrame::Iterate(RootVisitor* v) const {
Object** base = &Memory::Object_at(sp());
Object** limit = &Memory::Object_at(
fp() + StubFailureTrampolineFrameConstants::kFixedHeaderBottomOffset);
- v->VisitPointers(base, limit);
+ v->VisitRootPointers(Root::kTop, base, limit);
base = &Memory::Object_at(fp() + StandardFrameConstants::kFunctionOffset);
const int offset = StandardFrameConstants::kLastObjectOffset;
limit = &Memory::Object_at(fp() + offset) + 1;
- v->VisitPointers(base, limit);
+ v->VisitRootPointers(Root::kTop, base, limit);
IteratePc(v, pc_address(), constant_pool_address(), LookupCode());
}
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index 05a2d2b01b..a5355a4e8c 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -6,12 +6,18 @@
#define V8_FRAMES_H_
#include "src/allocation.h"
+#include "src/flags.h"
#include "src/handles.h"
+#include "src/objects.h"
#include "src/safepoint-table.h"
namespace v8 {
namespace internal {
+class AbstractCode;
+class ObjectVisitor;
+class StringStream;
+
#if V8_TARGET_ARCH_ARM64
typedef uint64_t RegList;
#else
@@ -30,6 +36,7 @@ int JSCallerSavedCode(int n);
// Forward declarations.
class ExternalCallbackScope;
class Isolate;
+class RootVisitor;
class StackFrameIteratorBase;
class ThreadLocalTop;
class WasmInstanceObject;
@@ -346,8 +353,10 @@ class ConstructFrameConstants : public TypedFrameConstants {
// FP-relative.
static const int kContextOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static const int kLengthOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
- static const int kImplicitReceiverOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
- DEFINE_TYPED_FRAME_SIZES(3);
+ static const int kConstructorOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(2);
+ static const int kNewTargetOrImplicitReceiverOffset =
+ TYPED_FRAME_PUSHED_VALUE_OFFSET(3);
+ DEFINE_TYPED_FRAME_SIZES(4);
};
class StubFailureTrampolineFrameConstants : public InternalFrameConstants {
@@ -589,8 +598,8 @@ class StackFrame BASE_EMBEDDED {
SafepointEntry* safepoint_entry,
unsigned* stack_slots);
- virtual void Iterate(ObjectVisitor* v) const = 0;
- static void IteratePc(ObjectVisitor* v, Address* pc_address,
+ virtual void Iterate(RootVisitor* v) const = 0;
+ static void IteratePc(RootVisitor* v, Address* pc_address,
Address* constant_pool_address, Code* holder);
// Sets a callback function for return-address rewriting profilers
@@ -661,7 +670,7 @@ class EntryFrame: public StackFrame {
Code* unchecked_code() const override;
// Garbage collection support.
- void Iterate(ObjectVisitor* v) const override;
+ void Iterate(RootVisitor* v) const override;
static EntryFrame* cast(StackFrame* frame) {
DCHECK(frame->is_entry());
@@ -714,7 +723,7 @@ class ExitFrame: public StackFrame {
Object*& code_slot() const;
// Garbage collection support.
- void Iterate(ObjectVisitor* v) const override;
+ void Iterate(RootVisitor* v) const override;
void SetCallerFp(Address caller_fp) override;
@@ -993,7 +1002,7 @@ class StandardFrame : public StackFrame {
// Iterate over expression stack including stack handlers, locals,
// and parts of the fixed part including context and code fields.
- void IterateExpressions(ObjectVisitor* v) const;
+ void IterateExpressions(RootVisitor* v) const;
// Returns the address of the n'th expression stack element.
virtual Address GetExpressionAddress(int n) const;
@@ -1007,7 +1016,7 @@ class StandardFrame : public StackFrame {
static inline bool IsConstructFrame(Address fp);
// Used by OptimizedFrames and StubFrames.
- void IterateCompiledFrame(ObjectVisitor* v) const;
+ void IterateCompiledFrame(RootVisitor* v) const;
private:
friend class StackFrame;
@@ -1057,7 +1066,7 @@ class JavaScriptFrame : public StandardFrame {
int GetArgumentsLength() const;
// Garbage collection support.
- void Iterate(ObjectVisitor* v) const override;
+ void Iterate(RootVisitor* v) const override;
// Printing support.
void Print(StringStream* accumulator, PrintMode mode,
@@ -1109,7 +1118,7 @@ class JavaScriptFrame : public StandardFrame {
// Garbage collection support. Iterates over incoming arguments,
// receiver, and any callee-saved registers.
- void IterateArguments(ObjectVisitor* v) const;
+ void IterateArguments(RootVisitor* v) const;
virtual void PrintFrameKind(StringStream* accumulator) const {}
@@ -1125,7 +1134,7 @@ class StubFrame : public StandardFrame {
Type type() const override { return STUB; }
// GC support.
- void Iterate(ObjectVisitor* v) const override;
+ void Iterate(RootVisitor* v) const override;
// Determine the code for the frame.
Code* unchecked_code() const override;
@@ -1152,7 +1161,7 @@ class OptimizedFrame : public JavaScriptFrame {
Type type() const override { return OPTIMIZED; }
// GC support.
- void Iterate(ObjectVisitor* v) const override;
+ void Iterate(RootVisitor* v) const override;
// Return a list with {SharedFunctionInfo} objects of this frame.
// The functions are ordered bottom-to-top (i.e. functions.last()
@@ -1285,7 +1294,7 @@ class WasmCompiledFrame final : public StandardFrame {
Type type() const override { return WASM_COMPILED; }
// GC support.
- void Iterate(ObjectVisitor* v) const override;
+ void Iterate(RootVisitor* v) const override;
// Printing support.
void Print(StringStream* accumulator, PrintMode mode,
@@ -1327,7 +1336,7 @@ class WasmInterpreterEntryFrame final : public StandardFrame {
Type type() const override { return WASM_INTERPRETER_ENTRY; }
// GC support.
- void Iterate(ObjectVisitor* v) const override;
+ void Iterate(RootVisitor* v) const override;
// Printing support.
void Print(StringStream* accumulator, PrintMode mode,
@@ -1344,6 +1353,7 @@ class WasmInterpreterEntryFrame final : public StandardFrame {
WasmInstanceObject* wasm_instance() const;
Script* script() const override;
int position() const override;
+ Object* context() const override;
static WasmInterpreterEntryFrame* cast(StackFrame* frame) {
DCHECK(frame->is_wasm_interpreter_entry());
@@ -1386,7 +1396,7 @@ class InternalFrame: public StandardFrame {
Type type() const override { return INTERNAL; }
// Garbage collection support.
- void Iterate(ObjectVisitor* v) const override;
+ void Iterate(RootVisitor* v) const override;
// Determine the code for the frame.
Code* unchecked_code() const override;
@@ -1414,7 +1424,7 @@ class StubFailureTrampolineFrame: public StandardFrame {
// This method could be called during marking phase of GC.
Code* unchecked_code() const override;
- void Iterate(ObjectVisitor* v) const override;
+ void Iterate(RootVisitor* v) const override;
// Architecture-specific register description.
static Register fp_register();
@@ -1527,7 +1537,6 @@ class JavaScriptFrameIterator BASE_EMBEDDED {
// NOTE: The stack trace frame iterator is an iterator that only traverse proper
// JavaScript frames that have proper JavaScript functions and WASM frames.
-// This excludes the problematic functions in runtime.js.
class StackTraceFrameIterator BASE_EMBEDDED {
public:
explicit StackTraceFrameIterator(Isolate* isolate);
diff --git a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
index 8a9edc7b83..30913d50c7 100644
--- a/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
+++ b/deps/v8/src/full-codegen/arm/full-codegen-arm.cc
@@ -1029,7 +1029,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ b(eq, &no_descriptors);
__ LoadInstanceDescriptors(r0, r2);
- __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheOffset));
+ __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeOffset));
__ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
@@ -1228,8 +1228,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(r3, r2, r1, r0);
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- Callable callable = CodeFactory::FastCloneShallowObject(
- isolate(), expr->properties_count());
+ Callable callable = CodeFactory::FastCloneShallowObject(isolate());
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -1767,12 +1766,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
// Assignment to var or initializing assignment to let/const in harmony
// mode.
MemOperand location = VarOperand(var, r1);
- if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
- // Check for an uninitialized let binding.
- __ ldr(r2, location);
- __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
- __ Check(eq, kLetBindingReInitialization);
- }
EmitStoreToStackLocalOrContextSlot(var, location);
}
}
@@ -2241,9 +2234,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy);
+ PushOperand(Smi::FromInt(language_mode()));
+ CallRuntimeWithOperands(Runtime::kDeleteProperty);
context()->Plug(r0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -2255,7 +2247,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ LoadGlobalObject(r2);
__ mov(r1, Operand(var->name()));
__ Push(r2, r1);
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
+ __ Push(Smi::FromInt(SLOPPY));
+ __ CallRuntime(Runtime::kDeleteProperty);
context()->Plug(r0);
} else {
DCHECK(!var->IsLookupSlot());
@@ -2760,43 +2753,7 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
static Address GetInterruptImmediateLoadAddress(Address pc) {
Address load_address = pc - 2 * Assembler::kInstrSize;
- if (!FLAG_enable_embedded_constant_pool) {
- DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address)));
- } else if (Assembler::IsLdrPpRegOffset(Memory::int32_at(load_address))) {
- // This is an extended constant pool lookup.
- if (CpuFeatures::IsSupported(ARMv7)) {
- load_address -= 2 * Assembler::kInstrSize;
- DCHECK(Assembler::IsMovW(Memory::int32_at(load_address)));
- DCHECK(Assembler::IsMovT(
- Memory::int32_at(load_address + Assembler::kInstrSize)));
- } else {
- load_address -= 4 * Assembler::kInstrSize;
- DCHECK(Assembler::IsMovImmed(Memory::int32_at(load_address)));
- DCHECK(Assembler::IsOrrImmed(
- Memory::int32_at(load_address + Assembler::kInstrSize)));
- DCHECK(Assembler::IsOrrImmed(
- Memory::int32_at(load_address + 2 * Assembler::kInstrSize)));
- DCHECK(Assembler::IsOrrImmed(
- Memory::int32_at(load_address + 3 * Assembler::kInstrSize)));
- }
- } else if (CpuFeatures::IsSupported(ARMv7) &&
- Assembler::IsMovT(Memory::int32_at(load_address))) {
- // This is a movw / movt immediate load.
- load_address -= Assembler::kInstrSize;
- DCHECK(Assembler::IsMovW(Memory::int32_at(load_address)));
- } else if (!CpuFeatures::IsSupported(ARMv7) &&
- Assembler::IsOrrImmed(Memory::int32_at(load_address))) {
- // This is a mov / orr immediate load.
- load_address -= 3 * Assembler::kInstrSize;
- DCHECK(Assembler::IsMovImmed(Memory::int32_at(load_address)));
- DCHECK(Assembler::IsOrrImmed(
- Memory::int32_at(load_address + Assembler::kInstrSize)));
- DCHECK(Assembler::IsOrrImmed(
- Memory::int32_at(load_address + 2 * Assembler::kInstrSize)));
- } else {
- // This is a small constant pool lookup.
- DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address)));
- }
+ DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address)));
return load_address;
}
diff --git a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
index 717e9d4470..bbb7450fe4 100644
--- a/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
+++ b/deps/v8/src/full-codegen/arm64/full-codegen-arm64.cc
@@ -1017,7 +1017,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Cbz(x1, &no_descriptors);
__ LoadInstanceDescriptors(x0, x2);
- __ Ldr(x2, FieldMemOperand(x2, DescriptorArray::kEnumCacheOffset));
+ __ Ldr(x2, FieldMemOperand(x2, DescriptorArray::kEnumCacheBridgeOffset));
__ Ldr(x2,
FieldMemOperand(x2, DescriptorArray::kEnumCacheBridgeCacheOffset));
@@ -1212,8 +1212,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(x3, x2, x1, x0);
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- Callable callable = CodeFactory::FastCloneShallowObject(
- isolate(), expr->properties_count());
+ Callable callable = CodeFactory::FastCloneShallowObject(isolate());
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -1710,11 +1709,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
// Assignment to var or initializing assignment to let/const in harmony
// mode.
MemOperand location = VarOperand(var, x1);
- if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
- __ Ldr(x10, location);
- __ CompareRoot(x10, Heap::kTheHoleValueRootIndex);
- __ Check(eq, kLetBindingReInitialization);
- }
EmitStoreToStackLocalOrContextSlot(var, location);
}
}
@@ -2206,9 +2200,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy);
+ PushOperand(Smi::FromInt(language_mode()));
+ CallRuntimeWithOperands(Runtime::kDeleteProperty);
context()->Plug(x0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -2220,7 +2213,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ LoadGlobalObject(x12);
__ Mov(x11, Operand(var->name()));
__ Push(x12, x11);
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
+ __ Push(Smi::FromInt(SLOPPY));
+ __ CallRuntime(Runtime::kDeleteProperty);
context()->Plug(x0);
} else {
DCHECK(!var->IsLookupSlot());
diff --git a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
index e4d4284d9c..0af067c81d 100644
--- a/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/full-codegen/ia32/full-codegen-ia32.cc
@@ -961,7 +961,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ j(equal, &no_descriptors);
__ LoadInstanceDescriptors(eax, ecx);
- __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheOffset));
+ __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeOffset));
__ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
@@ -1154,8 +1154,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(ebx, Immediate(SmiFromSlot(expr->literal_slot())));
__ mov(ecx, Immediate(constant_properties));
__ mov(edx, Immediate(Smi::FromInt(flags)));
- Callable callable = CodeFactory::FastCloneShallowObject(
- isolate(), expr->properties_count());
+ Callable callable = CodeFactory::FastCloneShallowObject(isolate());
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -1686,12 +1685,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
// Assignment to var or initializing assignment to let/const in harmony
// mode.
MemOperand location = VarOperand(var, ecx);
- if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
- // Check for an uninitialized let binding.
- __ mov(edx, location);
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ Check(equal, kLetBindingReInitialization);
- }
EmitStoreToStackLocalOrContextSlot(var, location);
}
}
@@ -2157,9 +2150,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy);
+ PushOperand(Smi::FromInt(language_mode()));
+ CallRuntimeWithOperands(Runtime::kDeleteProperty);
context()->Plug(eax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -2171,7 +2163,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ mov(eax, NativeContextOperand());
__ push(ContextOperand(eax, Context::EXTENSION_INDEX));
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
+ __ Push(Smi::FromInt(SLOPPY));
+ __ CallRuntime(Runtime::kDeleteProperty);
context()->Plug(eax);
} else {
DCHECK(!var->IsLookupSlot());
diff --git a/deps/v8/src/full-codegen/mips/OWNERS b/deps/v8/src/full-codegen/mips/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/src/full-codegen/mips/OWNERS
+++ b/deps/v8/src/full-codegen/mips/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
index 4725d3c3d4..e051b0b158 100644
--- a/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
+++ b/deps/v8/src/full-codegen/mips/full-codegen-mips.cc
@@ -1023,7 +1023,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Branch(&no_descriptors, eq, a1, Operand(Smi::kZero));
__ LoadInstanceDescriptors(v0, a2);
- __ lw(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheOffset));
+ __ lw(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeOffset));
__ lw(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
@@ -1222,8 +1222,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(a3, a2, a1, a0);
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- Callable callable = CodeFactory::FastCloneShallowObject(
- isolate(), expr->properties_count());
+ Callable callable = CodeFactory::FastCloneShallowObject(isolate());
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -1776,12 +1775,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
// Assignment to var or initializing assignment to let/const in harmony
// mode.
MemOperand location = VarOperand(var, a1);
- if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
- // Check for an uninitialized let binding.
- __ lw(a2, location);
- __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
- __ Check(eq, kLetBindingReInitialization, a2, Operand(t0));
- }
EmitStoreToStackLocalOrContextSlot(var, location);
}
}
@@ -2259,9 +2252,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy);
+ PushOperand(Smi::FromInt(language_mode()));
+ CallRuntimeWithOperands(Runtime::kDeleteProperty);
context()->Plug(v0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -2273,7 +2265,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ LoadGlobalObject(a2);
__ li(a1, Operand(var->name()));
__ Push(a2, a1);
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
+ __ Push(Smi::FromInt(SLOPPY));
+ __ CallRuntime(Runtime::kDeleteProperty);
context()->Plug(v0);
} else {
DCHECK(!var->IsLookupSlot());
diff --git a/deps/v8/src/full-codegen/mips64/OWNERS b/deps/v8/src/full-codegen/mips64/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/src/full-codegen/mips64/OWNERS
+++ b/deps/v8/src/full-codegen/mips64/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
index 150271e216..718e174b26 100644
--- a/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
+++ b/deps/v8/src/full-codegen/mips64/full-codegen-mips64.cc
@@ -123,7 +123,7 @@ void FullCodeGenerator::Generate() {
if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
- __ ld(a2, MemOperand(sp, receiver_offset));
+ __ Ld(a2, MemOperand(sp, receiver_offset));
__ AssertNotSmi(a2);
__ GetObjectType(a2, a2, a2);
__ Check(ge, kSloppyFunctionExpectsJSReceiverReceiver, a2,
@@ -140,13 +140,13 @@ void FullCodeGenerator::Generate() {
// Increment invocation count for the function.
{
Comment cmnt(masm_, "[ Increment invocation count");
- __ ld(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
- __ ld(a0, FieldMemOperand(a0, Cell::kValueOffset));
- __ ld(a4, FieldMemOperand(
+ __ Ld(a0, FieldMemOperand(a1, JSFunction::kFeedbackVectorOffset));
+ __ Ld(a0, FieldMemOperand(a0, Cell::kValueOffset));
+ __ Ld(a4, FieldMemOperand(
a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Daddu(a4, a4, Operand(Smi::FromInt(1)));
- __ sd(a4, FieldMemOperand(
+ __ Sd(a4, FieldMemOperand(
a0, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
}
@@ -173,7 +173,7 @@ void FullCodeGenerator::Generate() {
// Do pushes.
__ Dsubu(sp, sp, Operand(kMaxPushes * kPointerSize));
for (int i = 0; i < kMaxPushes; i++) {
- __ sd(t1, MemOperand(sp, i * kPointerSize));
+ __ Sd(t1, MemOperand(sp, i * kPointerSize));
}
// Continue loop if not done.
__ Dsubu(a2, a2, Operand(1));
@@ -183,7 +183,7 @@ void FullCodeGenerator::Generate() {
// Emit the remaining pushes.
__ Dsubu(sp, sp, Operand(remaining * kPointerSize));
for (int i = 0; i < remaining; i++) {
- __ sd(t1, MemOperand(sp, i * kPointerSize));
+ __ Sd(t1, MemOperand(sp, i * kPointerSize));
}
}
}
@@ -229,7 +229,7 @@ void FullCodeGenerator::Generate() {
// Context is returned in v0. It replaces the context passed to us.
// It's saved in the stack and kept live in cp.
__ mov(cp, v0);
- __ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = info->scope()->num_parameters();
int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
@@ -240,10 +240,10 @@ void FullCodeGenerator::Generate() {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
// Load parameter from stack.
- __ ld(a0, MemOperand(fp, parameter_offset));
+ __ Ld(a0, MemOperand(fp, parameter_offset));
// Store it in the context.
MemOperand target = ContextMemOperand(cp, var->index());
- __ sd(a0, target);
+ __ Sd(a0, target);
// Update the write barrier.
if (need_write_barrier) {
@@ -276,7 +276,7 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ Allocate arguments object");
if (!function_in_register_a1) {
// Load this again, if it's used by the local context below.
- __ ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ld(a1, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
if (is_strict(language_mode()) || !has_simple_parameters()) {
Callable callable = CodeFactory::FastNewStrictArguments(isolate());
@@ -351,9 +351,9 @@ void FullCodeGenerator::ClearAccumulator() {
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
__ li(a2, Operand(profiling_counter_));
- __ ld(a3, FieldMemOperand(a2, Cell::kValueOffset));
+ __ Ld(a3, FieldMemOperand(a2, Cell::kValueOffset));
__ Dsubu(a3, a3, Operand(Smi::FromInt(delta)));
- __ sd(a3, FieldMemOperand(a2, Cell::kValueOffset));
+ __ Sd(a3, FieldMemOperand(a2, Cell::kValueOffset));
}
@@ -365,7 +365,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
__ li(a2, Operand(profiling_counter_));
__ li(a3, Operand(Smi::FromInt(reset_value)));
- __ sd(a3, FieldMemOperand(a2, Cell::kValueOffset));
+ __ Sd(a3, FieldMemOperand(a2, Cell::kValueOffset));
}
@@ -456,7 +456,7 @@ void FullCodeGenerator::EmitReturnSequence() {
}
void FullCodeGenerator::RestoreContext() {
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
@@ -552,7 +552,7 @@ void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
Register reg) const {
DCHECK(count > 0);
if (count > 1) codegen()->DropOperands(count - 1);
- __ sd(reg, MemOperand(sp, 0));
+ __ Sd(reg, MemOperand(sp, 0));
}
@@ -686,7 +686,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
void FullCodeGenerator::GetVar(Register dest, Variable* var) {
// Use destination as scratch.
MemOperand location = VarOperand(var, dest);
- __ ld(dest, location);
+ __ Ld(dest, location);
}
@@ -699,7 +699,7 @@ void FullCodeGenerator::SetVar(Variable* var,
DCHECK(!scratch0.is(scratch1));
DCHECK(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
- __ sd(src, location);
+ __ Sd(src, location);
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
__ RecordWriteContextSlot(scratch0,
@@ -738,7 +738,7 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
// Check that we're not inside a with or catch context.
- __ ld(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
+ __ Ld(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
__ LoadRoot(a4, Heap::kWithContextMapRootIndex);
__ Check(ne, kDeclarationInWithContext,
a1, Operand(a4));
@@ -769,7 +769,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
if (variable->binding_needs_init()) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
- __ sd(a4, StackOperand(variable));
+ __ Sd(a4, StackOperand(variable));
}
break;
@@ -778,7 +778,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ sd(at, ContextMemOperand(cp, variable->index()));
+ __ Sd(at, ContextMemOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
PrepareForBailoutForId(proxy->id(), BailoutState::NO_REGISTERS);
}
@@ -819,7 +819,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case VariableLocation::LOCAL: {
Comment cmnt(masm_, "[ FunctionDeclaration");
VisitForAccumulatorValue(declaration->fun());
- __ sd(result_register(), StackOperand(variable));
+ __ Sd(result_register(), StackOperand(variable));
break;
}
@@ -827,7 +827,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
- __ sd(result_register(), ContextMemOperand(cp, variable->index()));
+ __ Sd(result_register(), ContextMemOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
__ RecordWriteContextSlot(cp,
@@ -893,7 +893,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
__ mov(a0, result_register()); // CompareStub requires args in a0, a1.
// Perform the comparison as if via '==='.
- __ ld(a1, MemOperand(sp, 0)); // Switch value.
+ __ Ld(a1, MemOperand(sp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
@@ -999,7 +999,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
Label use_cache;
- __ ld(v0, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ Ld(v0, FieldMemOperand(a0, HeapObject::kMapOffset));
__ Branch(&use_cache);
// Get the set of properties to enumerate.
@@ -1012,7 +1012,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// modification check. Otherwise, we got a fixed array, and we have
// to do a slow check.
Label fixed_array;
- __ ld(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ Ld(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kMetaMapRootIndex);
__ Branch(&fixed_array, ne, a2, Operand(at));
@@ -1024,8 +1024,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Branch(&no_descriptors, eq, a1, Operand(Smi::kZero));
__ LoadInstanceDescriptors(v0, a2);
- __ ld(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheOffset));
- __ ld(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ Ld(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeOffset));
+ __ Ld(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
__ li(a0, Operand(Smi::kZero));
@@ -1042,7 +1042,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ li(a1, Operand(Smi::FromInt(1))); // Smi(1) indicates slow check
__ Push(a1, v0); // Smi and array
- __ ld(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
+ __ Ld(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
__ Push(a1); // Fixed array length (as smi).
PrepareForBailoutForId(stmt->PrepareId(), BailoutState::NO_REGISTERS);
__ li(a0, Operand(Smi::kZero));
@@ -1053,33 +1053,33 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
SetExpressionAsStatementPosition(stmt->each());
// Load the current count to a0, load the length to a1.
- __ ld(a0, MemOperand(sp, 0 * kPointerSize));
- __ ld(a1, MemOperand(sp, 1 * kPointerSize));
+ __ Ld(a0, MemOperand(sp, 0 * kPointerSize));
+ __ Ld(a1, MemOperand(sp, 1 * kPointerSize));
__ Branch(loop_statement.break_label(), hs, a0, Operand(a1));
// Get the current entry of the array into register a3.
- __ ld(a2, MemOperand(sp, 2 * kPointerSize));
+ __ Ld(a2, MemOperand(sp, 2 * kPointerSize));
__ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ SmiScale(a4, a0, kPointerSizeLog2);
__ daddu(a4, a2, a4); // Array base + scaled (smi) index.
- __ ld(result_register(), MemOperand(a4)); // Current entry.
+ __ Ld(result_register(), MemOperand(a4)); // Current entry.
// Get the expected map from the stack or a smi in the
// permanent slow case into register a2.
- __ ld(a2, MemOperand(sp, 3 * kPointerSize));
+ __ Ld(a2, MemOperand(sp, 3 * kPointerSize));
// Check if the expected map still matches that of the enumerable.
// If not, we may have to filter the key.
Label update_each;
- __ ld(a1, MemOperand(sp, 4 * kPointerSize));
- __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Ld(a1, MemOperand(sp, 4 * kPointerSize));
+ __ Ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
__ Branch(&update_each, eq, a4, Operand(a2));
// We need to filter the key, record slow-path here.
int const vector_index = SmiFromSlot(slot)->value();
__ EmitLoadFeedbackVector(a3);
__ li(a2, Operand(FeedbackVector::MegamorphicSentinel(isolate())));
- __ sd(a2, FieldMemOperand(a3, FixedArray::OffsetOfElementAt(vector_index)));
+ __ Sd(a2, FieldMemOperand(a3, FixedArray::OffsetOfElementAt(vector_index)));
__ mov(a0, result_register());
// a0 contains the key. The receiver in a1 is the second argument to the
@@ -1130,8 +1130,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
FeedbackSlot slot) {
DCHECK(NeedsHomeObject(initializer));
- __ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
- __ ld(StoreDescriptor::ValueRegister(),
+ __ Ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+ __ Ld(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
@@ -1141,7 +1141,7 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
FeedbackSlot slot) {
DCHECK(NeedsHomeObject(initializer));
__ Move(StoreDescriptor::ReceiverRegister(), v0);
- __ ld(StoreDescriptor::ValueRegister(),
+ __ Ld(StoreDescriptor::ValueRegister(),
MemOperand(sp, offset * kPointerSize));
CallStoreIC(slot, isolate()->factory()->home_object_symbol());
}
@@ -1216,7 +1216,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Handle<BoilerplateDescription> constant_properties =
expr->GetOrBuildConstantProperties(isolate());
- __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ li(a2, Operand(SmiFromSlot(expr->literal_slot())));
__ li(a1, Operand(constant_properties));
__ li(a0, Operand(Smi::FromInt(expr->ComputeFlags())));
@@ -1224,8 +1224,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(a3, a2, a1, a0);
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- Callable callable = CodeFactory::FastCloneShallowObject(
- isolate(), expr->properties_count());
+ Callable callable = CodeFactory::FastCloneShallowObject(isolate());
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -1263,7 +1262,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
VisitForAccumulatorValue(value);
__ mov(StoreDescriptor::ValueRegister(), result_register());
DCHECK(StoreDescriptor::ValueRegister().is(a0));
- __ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+ __ Ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
CallStoreIC(property->GetSlot(0), key->value(), kStoreOwn);
PrepareForBailoutForId(key->id(), BailoutState::NO_REGISTERS);
@@ -1276,7 +1275,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
}
// Duplicate receiver on stack.
- __ ld(a0, MemOperand(sp));
+ __ Ld(a0, MemOperand(sp));
PushOperand(a0);
VisitForStackValue(key);
VisitForStackValue(value);
@@ -1293,7 +1292,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
case ObjectLiteral::Property::PROTOTYPE:
// Duplicate receiver on stack.
- __ ld(a0, MemOperand(sp));
+ __ Ld(a0, MemOperand(sp));
PushOperand(a0);
VisitForStackValue(value);
DCHECK(property->emit_store());
@@ -1323,7 +1322,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end();
++it) {
- __ ld(a0, MemOperand(sp)); // Duplicate receiver.
+ __ Ld(a0, MemOperand(sp)); // Duplicate receiver.
PushOperand(a0);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
@@ -1349,7 +1348,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
expr->GetOrBuildConstantElements(isolate());
__ mov(a0, result_register());
- __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ li(a2, Operand(SmiFromSlot(expr->literal_slot())));
__ li(a1, Operand(constant_elements));
if (MustCreateArrayLiteralWithRuntime(expr)) {
@@ -1386,7 +1385,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
VisitForAccumulatorValue(subexpr);
__ li(StoreDescriptor::NameRegister(), Operand(Smi::FromInt(array_index)));
- __ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ __ Ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp, 0));
__ mov(StoreDescriptor::ValueRegister(), result_register());
CallKeyedStoreIC(expr->LiteralFeedbackSlot());
@@ -1419,7 +1418,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
// We need the receiver both on the stack and in the register.
VisitForStackValue(property->obj());
- __ ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ __ Ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
} else {
VisitForStackValue(property->obj());
}
@@ -1429,9 +1428,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ ld(LoadDescriptor::ReceiverRegister(),
+ __ Ld(LoadDescriptor::ReceiverRegister(),
MemOperand(sp, 1 * kPointerSize));
- __ ld(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
+ __ Ld(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1567,11 +1566,11 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ LoadRoot(a3,
done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
__ LoadRoot(a4, Heap::kEmptyFixedArrayRootIndex);
- __ sd(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ sd(a4, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ sd(a2, FieldMemOperand(v0, JSIteratorResult::kValueOffset));
- __ sd(a3, FieldMemOperand(v0, JSIteratorResult::kDoneOffset));
+ __ Sd(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ Sd(a4, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ Sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ Sd(a2, FieldMemOperand(v0, JSIteratorResult::kValueOffset));
+ __ Sd(a3, FieldMemOperand(v0, JSIteratorResult::kDoneOffset));
STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
}
@@ -1717,7 +1716,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, FeedbackSlot slot) {
void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
Variable* var, MemOperand location) {
- __ sd(result_register(), location);
+ __ Sd(result_register(), location);
if (var->IsContextSlot()) {
// RecordWrite may destroy all its register arguments.
__ Move(a3, result_register());
@@ -1743,7 +1742,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
// Perform an initialization check for lexically declared variables.
if (hole_check_mode == HoleCheckMode::kRequired) {
Label assign;
- __ ld(a3, location);
+ __ Ld(a3, location);
__ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
__ Branch(&assign, ne, a3, Operand(a4));
__ li(a3, Operand(var->name()));
@@ -1761,7 +1760,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label uninitialized_this;
MemOperand location = VarOperand(var, a1);
- __ ld(a3, location);
+ __ Ld(a3, location);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(&uninitialized_this, eq, a3, Operand(at));
__ li(a0, Operand(var->name()));
@@ -1777,12 +1776,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
// Assignment to var or initializing assignment to let/const in harmony
// mode.
MemOperand location = VarOperand(var, a1);
- if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
- // Check for an uninitialized let binding.
- __ ld(a2, location);
- __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
- __ Check(eq, kLetBindingReInitialization, a2, Operand(a4));
- }
EmitStoreToStackLocalOrContextSlot(var, location);
}
}
@@ -1841,14 +1834,14 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
DCHECK(!callee->AsProperty()->IsSuperAccess());
- __ ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ __ Ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(),
BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
- __ ld(at, MemOperand(sp, 0));
+ __ Ld(at, MemOperand(sp, 0));
PushOperand(at);
- __ sd(v0, MemOperand(sp, kPointerSize));
+ __ Sd(v0, MemOperand(sp, kPointerSize));
convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
@@ -1866,16 +1859,16 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
// Load the function from the receiver.
DCHECK(callee->IsProperty());
- __ ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ __ Ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
__ Move(LoadDescriptor::NameRegister(), v0);
EmitKeyedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(),
BailoutState::TOS_REGISTER);
// Push the target function under the receiver.
- __ ld(at, MemOperand(sp, 0));
+ __ Ld(at, MemOperand(sp, 0));
PushOperand(at);
- __ sd(v0, MemOperand(sp, kPointerSize));
+ __ Sd(v0, MemOperand(sp, kPointerSize));
EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
}
@@ -1904,7 +1897,7 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
CodeFactory::CallICTrampoline(isolate(), mode, expr->tail_call_mode())
.code();
__ li(a3, Operand(IntFromSlot(expr->CallFeedbackICSlot())));
- __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ Ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ li(a0, Operand(arg_count));
CallIC(code);
OperandStackDepthDecrement(arg_count + 1);
@@ -1939,7 +1932,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// Load function and argument count into a1 and a0.
__ li(a0, Operand(arg_count));
- __ ld(a1, MemOperand(sp, arg_count * kPointerSize));
+ __ Ld(a1, MemOperand(sp, arg_count * kPointerSize));
// Record call targets in unoptimized code.
__ EmitLoadFeedbackVector(a2);
@@ -2089,8 +2082,8 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// v0 now contains the constructor function. Grab the
// instance class name from there.
- __ ld(v0, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
- __ ld(v0, FieldMemOperand(v0, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ Ld(v0, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(v0, FieldMemOperand(v0, SharedFunctionInfo::kInstanceClassNameOffset));
__ Branch(&done);
// Functions have class 'Function'.
@@ -2165,7 +2158,7 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
PrepareForBailoutForId(expr->CallId(), BailoutState::NO_REGISTERS);
// Move target to a1.
int const argc = args->length() - 2;
- __ ld(a1, MemOperand(sp, (argc + 1) * kPointerSize));
+ __ Ld(a1, MemOperand(sp, (argc + 1) * kPointerSize));
// Call the target.
__ li(a0, Operand(argc));
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
@@ -2180,8 +2173,8 @@ void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
DCHECK_EQ(1, args->length());
VisitForAccumulatorValue(args->at(0));
__ AssertFunction(v0);
- __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ ld(v0, FieldMemOperand(v0, Map::kPrototypeOffset));
+ __ Ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ Ld(v0, FieldMemOperand(v0, Map::kPrototypeOffset));
context()->Plug(v0);
}
@@ -2190,7 +2183,7 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
ExternalReference debug_is_active =
ExternalReference::debug_is_active_address(isolate());
__ li(at, Operand(debug_is_active));
- __ lbu(v0, MemOperand(at));
+ __ Lbu(v0, MemOperand(at));
__ SmiTag(v0);
context()->Plug(v0);
}
@@ -2209,11 +2202,11 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
__ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, a1);
__ Pop(a2, a3);
__ LoadRoot(a4, Heap::kEmptyFixedArrayRootIndex);
- __ sd(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ sd(a4, FieldMemOperand(v0, JSObject::kPropertiesOffset));
- __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
- __ sd(a2, FieldMemOperand(v0, JSIteratorResult::kValueOffset));
- __ sd(a3, FieldMemOperand(v0, JSIteratorResult::kDoneOffset));
+ __ Sd(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ Sd(a4, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+ __ Sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
+ __ Sd(a2, FieldMemOperand(v0, JSIteratorResult::kValueOffset));
+ __ Sd(a3, FieldMemOperand(v0, JSIteratorResult::kDoneOffset));
STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
__ jmp(&done);
@@ -2241,7 +2234,7 @@ void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
int arg_count = args->length();
SetCallPosition(expr);
- __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ Ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ li(a0, Operand(arg_count));
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
@@ -2260,9 +2253,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy);
+ PushOperand(Smi::FromInt(language_mode()));
+ CallRuntimeWithOperands(Runtime::kDeleteProperty);
context()->Plug(v0);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -2274,7 +2266,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ LoadGlobalObject(a2);
__ li(a1, Operand(var->name()));
__ Push(a2, a1);
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
+ __ Push(Smi::FromInt(SLOPPY));
+ __ CallRuntime(Runtime::kDeleteProperty);
context()->Plug(v0);
} else {
DCHECK(!var->IsLookupSlot());
@@ -2382,7 +2375,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case NAMED_PROPERTY: {
// Put the object both on the stack and in the register.
VisitForStackValue(prop->obj());
- __ ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ __ Ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(prop);
break;
}
@@ -2390,9 +2383,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
- __ ld(LoadDescriptor::ReceiverRegister(),
+ __ Ld(LoadDescriptor::ReceiverRegister(),
MemOperand(sp, 1 * kPointerSize));
- __ ld(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
+ __ Ld(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
EmitKeyedPropertyLoad(prop);
break;
}
@@ -2433,10 +2426,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
__ push(v0);
break;
case NAMED_PROPERTY:
- __ sd(v0, MemOperand(sp, kPointerSize));
+ __ Sd(v0, MemOperand(sp, kPointerSize));
break;
case KEYED_PROPERTY:
- __ sd(v0, MemOperand(sp, 2 * kPointerSize));
+ __ Sd(v0, MemOperand(sp, 2 * kPointerSize));
break;
case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
@@ -2471,10 +2464,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
PushOperand(v0);
break;
case NAMED_PROPERTY:
- __ sd(v0, MemOperand(sp, kPointerSize));
+ __ Sd(v0, MemOperand(sp, kPointerSize));
break;
case KEYED_PROPERTY:
- __ sd(v0, MemOperand(sp, 2 * kPointerSize));
+ __ Sd(v0, MemOperand(sp, 2 * kPointerSize));
break;
case NAMED_SUPER_PROPERTY:
case KEYED_SUPER_PROPERTY:
@@ -2576,7 +2569,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Factory* factory = isolate()->factory();
if (String::Equals(check, factory->number_string())) {
__ JumpIfSmi(v0, if_true);
- __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ Ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
Split(eq, v0, Operand(at), if_true, if_false, fall_through);
} else if (String::Equals(check, factory->string_string())) {
@@ -2598,14 +2591,14 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ Branch(if_false, eq, v0, Operand(at));
__ JumpIfSmi(v0, if_false);
// Check for undetectable objects => true.
- __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+ __ Ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ Lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
__ And(a1, a1, Operand(1 << Map::kIsUndetectable));
Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
} else if (String::Equals(check, factory->function_string())) {
__ JumpIfSmi(v0, if_false);
- __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+ __ Ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ Lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
__ And(a1, a1,
Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
Split(eq, a1, Operand(1 << Map::kIsCallable), if_true, if_false,
@@ -2618,7 +2611,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
__ GetObjectType(v0, v0, a1);
__ Branch(if_false, lt, a1, Operand(FIRST_JS_RECEIVER_TYPE));
// Check for callable or undetectable objects => false.
- __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+ __ Lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
__ And(a1, a1,
Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
Split(eq, a1, Operand(zero_reg), if_true, if_false, fall_through);
@@ -2721,8 +2714,8 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
} else {
__ JumpIfSmi(v0, if_false);
- __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+ __ Ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ Lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
__ And(a1, a1, Operand(1 << Map::kIsUndetectable));
Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
}
@@ -2742,20 +2735,20 @@ Register FullCodeGenerator::context_register() {
void FullCodeGenerator::LoadFromFrameField(int frame_offset, Register value) {
// DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
DCHECK(IsAligned(frame_offset, kPointerSize));
- // __ sw(value, MemOperand(fp, frame_offset));
- __ ld(value, MemOperand(fp, frame_offset));
+ // __ Sw(value, MemOperand(fp, frame_offset));
+ __ Ld(value, MemOperand(fp, frame_offset));
}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
// DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
DCHECK(IsAligned(frame_offset, kPointerSize));
- // __ sw(value, MemOperand(fp, frame_offset));
- __ sd(value, MemOperand(fp, frame_offset));
+ // __ Sw(value, MemOperand(fp, frame_offset));
+ __ Sd(value, MemOperand(fp, frame_offset));
}
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ ld(dst, ContextMemOperand(cp, context_index));
+ __ Ld(dst, ContextMemOperand(cp, context_index));
}
@@ -2771,10 +2764,10 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
- __ ld(at, ContextMemOperand(cp, Context::CLOSURE_INDEX));
+ __ Ld(at, ContextMemOperand(cp, Context::CLOSURE_INDEX));
} else {
DCHECK(closure_scope->is_function_scope());
- __ ld(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ld(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
PushOperand(at);
}
diff --git a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
index 38e90c5071..2ab08235cf 100644
--- a/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/deps/v8/src/full-codegen/ppc/full-codegen-ppc.cc
@@ -987,7 +987,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ beq(&no_descriptors);
__ LoadInstanceDescriptors(r3, r5);
- __ LoadP(r5, FieldMemOperand(r5, DescriptorArray::kEnumCacheOffset));
+ __ LoadP(r5, FieldMemOperand(r5, DescriptorArray::kEnumCacheBridgeOffset));
__ LoadP(r5,
FieldMemOperand(r5, DescriptorArray::kEnumCacheBridgeCacheOffset));
@@ -1193,8 +1193,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(r6, r5, r4, r3);
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- Callable callable = CodeFactory::FastCloneShallowObject(
- isolate(), expr->properties_count());
+ Callable callable = CodeFactory::FastCloneShallowObject(isolate());
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -1774,12 +1773,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
// Assignment to var or initializing assignment to let/const in harmony
// mode.
MemOperand location = VarOperand(var, r4);
- if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
- // Check for an uninitialized let binding.
- __ LoadP(r5, location);
- __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
- __ Check(eq, kLetBindingReInitialization);
- }
EmitStoreToStackLocalOrContextSlot(var, location);
}
}
@@ -2248,9 +2241,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy);
+ PushOperand(Smi::FromInt(language_mode()));
+ CallRuntimeWithOperands(Runtime::kDeleteProperty);
context()->Plug(r3);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -2262,7 +2254,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ LoadGlobalObject(r5);
__ mov(r4, Operand(var->name()));
__ Push(r5, r4);
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
+ __ Push(Smi::FromInt(SLOPPY));
+ __ CallRuntime(Runtime::kDeleteProperty);
context()->Plug(r3);
} else {
DCHECK(!var->IsLookupSlot());
diff --git a/deps/v8/src/full-codegen/s390/full-codegen-s390.cc b/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
index 60cbcb3cfb..250406ad53 100644
--- a/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
+++ b/deps/v8/src/full-codegen/s390/full-codegen-s390.cc
@@ -954,7 +954,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ beq(&no_descriptors, Label::kNear);
__ LoadInstanceDescriptors(r2, r4);
- __ LoadP(r4, FieldMemOperand(r4, DescriptorArray::kEnumCacheOffset));
+ __ LoadP(r4, FieldMemOperand(r4, DescriptorArray::kEnumCacheBridgeOffset));
__ LoadP(r4,
FieldMemOperand(r4, DescriptorArray::kEnumCacheBridgeCacheOffset));
@@ -1158,8 +1158,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Push(r5, r4, r3, r2);
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
- Callable callable = CodeFactory::FastCloneShallowObject(
- isolate(), expr->properties_count());
+ Callable callable = CodeFactory::FastCloneShallowObject(isolate());
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -1747,12 +1746,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
// Assignment to var or initializing assignment to let/const in harmony
// mode.
MemOperand location = VarOperand(var, r3);
- if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
- // Check for an uninitialized let binding.
- __ LoadP(r4, location);
- __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
- __ Check(eq, kLetBindingReInitialization);
- }
EmitStoreToStackLocalOrContextSlot(var, location);
}
}
@@ -2207,9 +2200,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy);
+ PushOperand(Smi::FromInt(language_mode()));
+ CallRuntimeWithOperands(Runtime::kDeleteProperty);
context()->Plug(r2);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -2221,7 +2213,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ LoadGlobalObject(r4);
__ mov(r3, Operand(var->name()));
__ Push(r4, r3);
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
+ __ Push(Smi::FromInt(SLOPPY));
+ __ CallRuntime(Runtime::kDeleteProperty);
context()->Plug(r2);
} else {
DCHECK(!var->IsLookupSlot());
diff --git a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
index 5e9d52af38..b15874c84b 100644
--- a/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
+++ b/deps/v8/src/full-codegen/x64/full-codegen-x64.cc
@@ -987,7 +987,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ j(equal, &no_descriptors);
__ LoadInstanceDescriptors(rax, rcx);
- __ movp(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheOffset));
+ __ movp(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeOffset));
__ movp(rcx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
@@ -1185,8 +1185,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Move(rbx, SmiFromSlot(expr->literal_slot()));
__ Move(rcx, constant_properties);
__ Move(rdx, Smi::FromInt(flags));
- Callable callable = CodeFactory::FastCloneShallowObject(
- isolate(), expr->properties_count());
+ Callable callable = CodeFactory::FastCloneShallowObject(isolate());
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -1678,12 +1677,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
// Assignment to var or initializing assignment to let/const in harmony
// mode.
MemOperand location = VarOperand(var, rcx);
- if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
- // Check for an uninitialized let binding.
- __ movp(rdx, location);
- __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
- __ Check(equal, kLetBindingReInitialization);
- }
EmitStoreToStackLocalOrContextSlot(var, location);
}
}
@@ -2146,9 +2139,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy);
+ PushOperand(Smi::FromInt(language_mode()));
+ CallRuntimeWithOperands(Runtime::kDeleteProperty);
context()->Plug(rax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -2160,7 +2152,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ movp(rax, NativeContextOperand());
__ Push(ContextOperand(rax, Context::EXTENSION_INDEX));
__ Push(var->name());
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
+ __ Push(Smi::FromInt(SLOPPY));
+ __ CallRuntime(Runtime::kDeleteProperty);
context()->Plug(rax);
} else {
DCHECK(!var->IsLookupSlot());
diff --git a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
index f0239be1a9..0499100746 100644
--- a/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
+++ b/deps/v8/src/full-codegen/x87/full-codegen-x87.cc
@@ -951,7 +951,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ j(equal, &no_descriptors);
__ LoadInstanceDescriptors(eax, ecx);
- __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheOffset));
+ __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeOffset));
__ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
@@ -1144,8 +1144,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
__ mov(ecx, Immediate(constant_properties));
__ mov(edx, Immediate(Smi::FromInt(flags)));
- Callable callable = CodeFactory::FastCloneShallowObject(
- isolate(), expr->properties_count());
+ Callable callable = CodeFactory::FastCloneShallowObject(isolate());
__ Call(callable.code(), RelocInfo::CODE_TARGET);
RestoreContext();
}
@@ -1676,12 +1675,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
// Assignment to var or initializing assignment to let/const in harmony
// mode.
MemOperand location = VarOperand(var, ecx);
- if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
- // Check for an uninitialized let binding.
- __ mov(edx, location);
- __ cmp(edx, isolate()->factory()->the_hole_value());
- __ Check(equal, kLetBindingReInitialization);
- }
EmitStoreToStackLocalOrContextSlot(var, location);
}
}
@@ -2147,9 +2140,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- CallRuntimeWithOperands(is_strict(language_mode())
- ? Runtime::kDeleteProperty_Strict
- : Runtime::kDeleteProperty_Sloppy);
+ PushOperand(Smi::FromInt(language_mode()));
+ CallRuntimeWithOperands(Runtime::kDeleteProperty);
context()->Plug(eax);
} else if (proxy != NULL) {
Variable* var = proxy->var();
@@ -2161,7 +2153,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ mov(eax, NativeContextOperand());
__ push(ContextOperand(eax, Context::EXTENSION_INDEX));
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kDeleteProperty_Sloppy);
+ __ Push(Smi::FromInt(SLOPPY));
+ __ CallRuntime(Runtime::kDeleteProperty);
context()->Plug(eax);
} else {
DCHECK(!var->IsLookupSlot());
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 67cff7b0f5..f603af8018 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -8,6 +8,7 @@
#include "src/cancelable-task.h"
#include "src/objects-inl.h"
#include "src/v8.h"
+#include "src/visitors.h"
#include "src/vm-state-inl.h"
namespace v8 {
@@ -608,7 +609,7 @@ bool GlobalHandles::IsWeak(Object** location) {
}
DISABLE_CFI_PERF
-void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
+void GlobalHandles::IterateWeakRoots(RootVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
Node* node = it.node();
if (node->IsWeakRetainer()) {
@@ -620,7 +621,7 @@ void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
node->CollectPhantomCallbackData(isolate(),
&pending_phantom_callbacks_);
} else {
- v->VisitPointer(node->location());
+ v->VisitRootPointer(Root::kGlobalHandles, node->location());
}
}
}
@@ -635,52 +636,17 @@ void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
}
}
-
-void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v) {
+void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(RootVisitor* v) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
if (node->IsStrongRetainer() ||
(node->IsWeakRetainer() && !node->is_independent() &&
node->is_active())) {
- v->VisitPointer(node->location());
- }
- }
-}
-
-
-void GlobalHandles::IdentifyNewSpaceWeakIndependentHandles(
- WeakSlotCallbackWithHeap f) {
- for (int i = 0; i < new_space_nodes_.length(); ++i) {
- Node* node = new_space_nodes_[i];
- DCHECK(node->is_in_new_space_list());
- if (node->is_independent() && node->IsWeak() &&
- f(isolate_->heap(), node->location())) {
- node->MarkPending();
- }
- }
-}
-
-
-void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) {
- for (int i = 0; i < new_space_nodes_.length(); ++i) {
- Node* node = new_space_nodes_[i];
- DCHECK(node->is_in_new_space_list());
- if (node->is_independent() && node->IsWeakRetainer()) {
- // Pending weak phantom handles die immediately. Everything else survives.
- if (node->IsPendingPhantomResetHandle()) {
- node->ResetPhantomHandle();
- ++number_of_phantom_handle_resets_;
- } else if (node->IsPendingPhantomCallback()) {
- node->CollectPhantomCallbackData(isolate(),
- &pending_phantom_callbacks_);
- } else {
- v->VisitPointer(node->location());
- }
+ v->VisitRootPointer(Root::kGlobalHandles, node->location());
}
}
}
-
void GlobalHandles::IdentifyWeakUnmodifiedObjects(
WeakSlotCallback is_unmodified) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
@@ -704,8 +670,7 @@ void GlobalHandles::MarkNewSpaceWeakUnmodifiedObjectsPending(
}
}
-template <GlobalHandles::IterationMode mode>
-void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots(ObjectVisitor* v) {
+void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots(RootVisitor* v) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
DCHECK(node->is_in_new_space_list());
@@ -713,36 +678,18 @@ void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots(ObjectVisitor* v) {
node->IsWeakRetainer()) {
// Pending weak phantom handles die immediately. Everything else survives.
if (node->IsPendingPhantomResetHandle()) {
- if (mode == IterationMode::HANDLE_PHANTOM_NODES ||
- mode == IterationMode::HANDLE_PHANTOM_NODES_VISIT_OTHERS) {
- node->ResetPhantomHandle();
- ++number_of_phantom_handle_resets_;
- }
+ node->ResetPhantomHandle();
+ ++number_of_phantom_handle_resets_;
} else if (node->IsPendingPhantomCallback()) {
- if (mode == IterationMode::HANDLE_PHANTOM_NODES ||
- mode == IterationMode::HANDLE_PHANTOM_NODES_VISIT_OTHERS) {
- node->CollectPhantomCallbackData(isolate(),
- &pending_phantom_callbacks_);
- }
+ node->CollectPhantomCallbackData(isolate(),
+ &pending_phantom_callbacks_);
} else {
- if (mode == IterationMode::VISIT_OTHERS ||
- mode == IterationMode::HANDLE_PHANTOM_NODES_VISIT_OTHERS) {
- v->VisitPointer(node->location());
- }
+ v->VisitRootPointer(Root::kGlobalHandles, node->location());
}
}
}
}
-template void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots<
- GlobalHandles::HANDLE_PHANTOM_NODES>(ObjectVisitor* v);
-
-template void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots<
- GlobalHandles::VISIT_OTHERS>(ObjectVisitor* v);
-
-template void GlobalHandles::IterateNewSpaceWeakUnmodifiedRoots<
- GlobalHandles::HANDLE_PHANTOM_NODES_VISIT_OTHERS>(ObjectVisitor* v);
-
void GlobalHandles::InvokeSecondPassPhantomCallbacks(
List<PendingPhantomCallback>* callbacks, Isolate* isolate) {
while (callbacks->length() != 0) {
@@ -928,25 +875,33 @@ int GlobalHandles::PostGarbageCollectionProcessing(
return freed_nodes;
}
-
-void GlobalHandles::IterateStrongRoots(ObjectVisitor* v) {
+void GlobalHandles::IterateStrongRoots(RootVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
if (it.node()->IsStrongRetainer()) {
- v->VisitPointer(it.node()->location());
+ v->VisitRootPointer(Root::kGlobalHandles, it.node()->location());
}
}
}
DISABLE_CFI_PERF
-void GlobalHandles::IterateAllRoots(ObjectVisitor* v) {
+void GlobalHandles::IterateAllRoots(RootVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
if (it.node()->IsRetainer()) {
- v->VisitPointer(it.node()->location());
+ v->VisitRootPointer(Root::kGlobalHandles, it.node()->location());
}
}
}
+DISABLE_CFI_PERF
+void GlobalHandles::IterateAllNewSpaceRoots(RootVisitor* v) {
+ for (int i = 0; i < new_space_nodes_.length(); ++i) {
+ Node* node = new_space_nodes_[i];
+ if (node->IsRetainer()) {
+ v->VisitRootPointer(Root::kGlobalHandles, node->location());
+ }
+ }
+}
DISABLE_CFI_PERF
void GlobalHandles::ApplyPersistentHandleVisitor(
@@ -991,30 +946,6 @@ void GlobalHandles::IterateWeakRootsInNewSpaceWithClassIds(
}
}
-
-int GlobalHandles::NumberOfWeakHandles() {
- int count = 0;
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (it.node()->IsWeakRetainer()) {
- count++;
- }
- }
- return count;
-}
-
-
-int GlobalHandles::NumberOfGlobalObjectWeakHandles() {
- int count = 0;
- for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (it.node()->IsWeakRetainer() &&
- it.node()->object()->IsJSGlobalObject()) {
- count++;
- }
- }
- return count;
-}
-
-
void GlobalHandles::RecordStats(HeapStats* stats) {
*stats->global_handle_count = 0;
*stats->weak_global_handle_count = 0;
@@ -1074,9 +1005,7 @@ void GlobalHandles::Print() {
#endif
-void GlobalHandles::TearDown() {
- // TODO(1428): invoke weak callbacks.
-}
+void GlobalHandles::TearDown() {}
EternalHandles::EternalHandles() : size_(0) {
for (unsigned i = 0; i < arraysize(singleton_handles_); i++) {
@@ -1089,21 +1018,21 @@ EternalHandles::~EternalHandles() {
for (int i = 0; i < blocks_.length(); i++) delete[] blocks_[i];
}
-
-void EternalHandles::IterateAllRoots(ObjectVisitor* visitor) {
+void EternalHandles::IterateAllRoots(RootVisitor* visitor) {
int limit = size_;
for (int i = 0; i < blocks_.length(); i++) {
DCHECK(limit > 0);
Object** block = blocks_[i];
- visitor->VisitPointers(block, block + Min(limit, kSize));
+ visitor->VisitRootPointers(Root::kEternalHandles, block,
+ block + Min(limit, kSize));
limit -= kSize;
}
}
-
-void EternalHandles::IterateNewSpaceRoots(ObjectVisitor* visitor) {
+void EternalHandles::IterateNewSpaceRoots(RootVisitor* visitor) {
for (int i = 0; i < new_space_indices_.length(); i++) {
- visitor->VisitPointer(GetLocation(new_space_indices_[i]));
+ visitor->VisitRootPointer(Root::kEternalHandles,
+ GetLocation(new_space_indices_[i]));
}
}
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index 000fc64ecd..c56568de9f 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -18,7 +18,7 @@ namespace v8 {
namespace internal {
class HeapStats;
-class ObjectVisitor;
+class RootVisitor;
// Structure for tracking global handles.
// A single list keeps all the allocated global handles.
@@ -43,12 +43,6 @@ enum WeaknessType {
class GlobalHandles {
public:
- enum IterationMode {
- HANDLE_PHANTOM_NODES_VISIT_OTHERS,
- VISIT_OTHERS,
- HANDLE_PHANTOM_NODES
- };
-
~GlobalHandles();
// Creates a new global handle that is alive until Destroy is called.
@@ -85,13 +79,6 @@ class GlobalHandles {
void RecordStats(HeapStats* stats);
- // Returns the current number of weak handles.
- int NumberOfWeakHandles();
-
- // Returns the current number of weak handles to global objects.
- // These handles are also included in NumberOfWeakHandles().
- int NumberOfGlobalObjectWeakHandles();
-
// Returns the current number of handles to global objects.
int global_handles_count() const {
return number_of_global_handles_;
@@ -125,10 +112,12 @@ class GlobalHandles {
GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags);
// Iterates over all strong handles.
- void IterateStrongRoots(ObjectVisitor* v);
+ void IterateStrongRoots(RootVisitor* v);
// Iterates over all handles.
- void IterateAllRoots(ObjectVisitor* v);
+ void IterateAllRoots(RootVisitor* v);
+
+ void IterateAllNewSpaceRoots(RootVisitor* v);
// Iterates over all handles that have embedder-assigned class ID.
void IterateAllRootsWithClassIds(v8::PersistentHandleVisitor* v);
@@ -142,7 +131,7 @@ class GlobalHandles {
void IterateWeakRootsInNewSpaceWithClassIds(v8::PersistentHandleVisitor* v);
// Iterates over all weak roots in heap.
- void IterateWeakRoots(ObjectVisitor* v);
+ void IterateWeakRoots(RootVisitor* v);
// Find all weak handles satisfying the callback predicate, mark
// them as pending.
@@ -154,15 +143,7 @@ class GlobalHandles {
// may also include old space objects).
// Iterates over strong and dependent handles. See the node above.
- void IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v);
-
- // Finds weak independent or partially independent handles satisfying
- // the callback predicate and marks them as pending. See the note above.
- void IdentifyNewSpaceWeakIndependentHandles(WeakSlotCallbackWithHeap f);
-
- // Iterates over weak independent or partially independent handles.
- // See the note above.
- void IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v);
+ void IterateNewSpaceStrongAndDependentRoots(RootVisitor* v);
// Finds weak independent or unmodified handles satisfying
// the callback predicate and marks them as pending. See the note above.
@@ -171,8 +152,7 @@ class GlobalHandles {
// Iterates over weak independent or unmodified handles.
// See the note above.
- template <IterationMode mode>
- void IterateNewSpaceWeakUnmodifiedRoots(ObjectVisitor* v);
+ void IterateNewSpaceWeakUnmodifiedRoots(RootVisitor* v);
// Identify unmodified objects that are in weak state and marks them
// unmodified
@@ -304,9 +284,9 @@ class EternalHandles {
}
// Iterates over all handles.
- void IterateAllRoots(ObjectVisitor* visitor);
+ void IterateAllRoots(RootVisitor* visitor);
// Iterates over all handles which might be in new space.
- void IterateNewSpaceRoots(ObjectVisitor* visitor);
+ void IterateNewSpaceRoots(RootVisitor* visitor);
// Rebuilds new space list.
void PostGarbageCollectionProcessing(Heap* heap);
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 3b4c2a602e..1194472d29 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -118,7 +118,10 @@ namespace internal {
#define V8_DOUBLE_FIELDS_UNBOXING 0
#endif
-#define V8_CONCURRENT_MARKING 0
+// Some types of tracing require the SFI to store a unique ID.
+#if defined(V8_TRACE_MAPS) || defined(V8_TRACE_IGNITION)
+#define V8_SFI_HAS_UNIQUE_ID 1
+#endif
typedef uint8_t byte;
typedef byte* Address;
@@ -590,6 +593,7 @@ enum Executability { NOT_EXECUTABLE, EXECUTABLE };
enum VisitMode {
VISIT_ALL,
+ VISIT_ALL_IN_MINOR_MC_UPDATE,
VISIT_ALL_IN_SCAVENGE,
VISIT_ALL_IN_SWEEP_NEWSPACE,
VISIT_ONLY_STRONG,
@@ -674,6 +678,8 @@ enum InlineCacheState {
enum WhereToStart { kStartAtReceiver, kStartAtPrototype };
+enum ResultSentinel { kNotFound = -1, kUnsupported = -2 };
+
// The Store Buffer (GC).
typedef enum {
kStoreBufferFullEvent,
@@ -1034,12 +1040,12 @@ enum VariableLocation : uint8_t {
kLastVariableLocation = MODULE
};
-// ES6 Draft Rev3 10.2 specifies declarative environment records with mutable
-// and immutable bindings that can be in two states: initialized and
-// uninitialized. In ES5 only immutable bindings have these two states. When
-// accessing a binding, it needs to be checked for initialization. However in
-// the following cases the binding is initialized immediately after creation
-// so the initialization check can always be skipped:
+// ES6 specifies declarative environment records with mutable and immutable
+// bindings that can be in two states: initialized and uninitialized.
+// When accessing a binding, it needs to be checked for initialization.
+// However in the following cases the binding is initialized immediately
+// after creation so the initialization check can always be skipped:
+//
// 1. Var declared local variables.
// var foo;
// 2. A local variable introduced by a function declaration.
@@ -1048,20 +1054,11 @@ enum VariableLocation : uint8_t {
// function x(foo) {}
// 4. Catch bound variables.
// try {} catch (foo) {}
-// 6. Function variables of named function expressions.
+// 6. Function name variables of named function expressions.
// var x = function foo() {}
// 7. Implicit binding of 'this'.
// 8. Implicit binding of 'arguments' in functions.
//
-// ES5 specified object environment records which are introduced by ES elements
-// such as Program and WithStatement that associate identifier bindings with the
-// properties of some object. In the specification only mutable bindings exist
-// (which may be non-writable) and have no distinct initialization step. However
-// V8 allows const declarations in global code with distinct creation and
-// initialization steps which are represented by non-writable properties in the
-// global object. As a result also these bindings need to be checked for
-// initialization.
-//
// The following enum specifies a flag that indicates if the binding needs a
// distinct initialization step (kNeedsInitialization) or if the binding is
// immediately initialized upon creation (kCreatedInitialized).
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index 17a7bee3a2..cce00a891d 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -7,7 +7,6 @@
#include "src/api.h"
#include "src/handles.h"
-#include "src/heap/heap.h"
#include "src/isolate.h"
namespace v8 {
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 324806443e..3afda94208 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -133,10 +133,13 @@ class Handle final : public HandleBase {
// MaybeHandle to force validation before being used as handles.
static const Handle<T> null() { return Handle<T>(); }
+ // Location equality.
+ bool equals(Handle<T> other) const { return address() == other.address(); }
+
// Provide function object for location equality comparison.
struct equal_to : public std::binary_function<Handle<T>, Handle<T>, bool> {
V8_INLINE bool operator()(Handle<T> lhs, Handle<T> rhs) const {
- return lhs.address() == rhs.address();
+ return lhs.equals(rhs);
}
};
diff --git a/deps/v8/src/heap-symbols.h b/deps/v8/src/heap-symbols.h
index f18dcbabe8..525da14cd3 100644
--- a/deps/v8/src/heap-symbols.h
+++ b/deps/v8/src/heap-symbols.h
@@ -213,9 +213,7 @@
V(intl_resolved_symbol) \
V(megamorphic_symbol) \
V(native_context_index_symbol) \
- V(nonexistent_symbol) \
V(nonextensible_symbol) \
- V(normal_ic_symbol) \
V(not_mapped_symbol) \
V(premonomorphic_symbol) \
V(promise_async_stack_id_symbol) \
diff --git a/deps/v8/src/heap/array-buffer-tracker.cc b/deps/v8/src/heap/array-buffer-tracker.cc
index 5423dfaed9..b4b4757808 100644
--- a/deps/v8/src/heap/array-buffer-tracker.cc
+++ b/deps/v8/src/heap/array-buffer-tracker.cc
@@ -5,6 +5,7 @@
#include "src/heap/array-buffer-tracker.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/heap.h"
+#include "src/heap/spaces.h"
namespace v8 {
namespace internal {
@@ -13,19 +14,16 @@ LocalArrayBufferTracker::~LocalArrayBufferTracker() {
CHECK(array_buffers_.empty());
}
-template <LocalArrayBufferTracker::FreeMode free_mode>
-void LocalArrayBufferTracker::Free() {
+template <typename Callback>
+void LocalArrayBufferTracker::Free(Callback should_free) {
size_t freed_memory = 0;
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
JSArrayBuffer* buffer = reinterpret_cast<JSArrayBuffer*>(it->first);
- // TODO(mlippautz): Create a dependency on the collector to avoid getting
- // the marking state out of thin air.
- if ((free_mode == kFreeAll) ||
- ObjectMarking::IsWhite(buffer, MarkingState::Internal(buffer))) {
+ if (should_free(buffer)) {
const size_t len = it->second;
- heap_->isolate()->array_buffer_allocator()->Free(buffer->backing_store(),
- len);
+ buffer->FreeBackingStore();
+
freed_memory += len;
it = array_buffers_.erase(it);
} else {
@@ -64,8 +62,7 @@ void LocalArrayBufferTracker::Process(Callback callback) {
it = array_buffers_.erase(it);
} else if (result == kRemoveEntry) {
const size_t len = it->second;
- heap_->isolate()->array_buffer_allocator()->Free(
- it->first->backing_store(), len);
+ it->first->FreeBackingStore();
freed_memory += len;
it = array_buffers_.erase(it);
} else {
@@ -88,12 +85,14 @@ void ArrayBufferTracker::FreeDeadInNewSpace(Heap* heap) {
heap->account_external_memory_concurrently_freed();
}
-void ArrayBufferTracker::FreeDead(Page* page) {
+void ArrayBufferTracker::FreeDead(Page* page,
+ const MarkingState& marking_state) {
// Callers need to ensure having the page lock.
LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) return;
- DCHECK(!page->SweepingDone());
- tracker->Free<LocalArrayBufferTracker::kFreeDead>();
+ tracker->Free([&marking_state](JSArrayBuffer* buffer) {
+ return ObjectMarking::IsWhite(buffer, marking_state);
+ });
if (tracker->IsEmpty()) {
page->ReleaseLocalTracker();
}
@@ -102,7 +101,7 @@ void ArrayBufferTracker::FreeDead(Page* page) {
void ArrayBufferTracker::FreeAll(Page* page) {
LocalArrayBufferTracker* tracker = page->local_tracker();
if (tracker == nullptr) return;
- tracker->Free<LocalArrayBufferTracker::kFreeAll>();
+ tracker->Free([](JSArrayBuffer* buffer) { return true; });
if (tracker->IsEmpty()) {
page->ReleaseLocalTracker();
}
diff --git a/deps/v8/src/heap/array-buffer-tracker.h b/deps/v8/src/heap/array-buffer-tracker.h
index 3a57ab70cd..56f042780e 100644
--- a/deps/v8/src/heap/array-buffer-tracker.h
+++ b/deps/v8/src/heap/array-buffer-tracker.h
@@ -16,6 +16,7 @@ namespace internal {
class Heap;
class JSArrayBuffer;
+class MarkingState;
class Page;
class ArrayBufferTracker : public AllStatic {
@@ -40,7 +41,7 @@ class ArrayBufferTracker : public AllStatic {
// Frees all backing store pointers for dead JSArrayBuffer on a given page.
// Requires marking information to be present. Requires the page lock to be
// taken by the caller.
- static void FreeDead(Page* page);
+ static void FreeDead(Page* page, const MarkingState& marking_state);
// Frees all remaining, live or dead, array buffers on a page. Only useful
// during tear down.
@@ -71,9 +72,15 @@ class LocalArrayBufferTracker {
inline void Add(Key key, const Value& value);
inline Value Remove(Key key);
- // Frees up array buffers determined by |free_mode|.
- template <FreeMode free_mode>
- void Free();
+ // Frees up array buffers.
+ //
+ // Sample usage:
+ // Free([](HeapObject* array_buffer) {
+ // if (should_free_internal(array_buffer)) return true;
+ // return false;
+ // });
+ template <typename Callback>
+ void Free(Callback should_free);
// Processes buffers one by one. The CallbackResult of the callback decides
// what action to take on the buffer.
diff --git a/deps/v8/src/heap/concurrent-marking-deque.h b/deps/v8/src/heap/concurrent-marking-deque.h
new file mode 100644
index 0000000000..1490923a2f
--- /dev/null
+++ b/deps/v8/src/heap/concurrent-marking-deque.h
@@ -0,0 +1,175 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CONCURRENT_MARKING_DEQUE_
+#define V8_HEAP_CONCURRENT_MARKING_DEQUE_
+
+#include <deque>
+
+#include "src/base/platform/mutex.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+class Isolate;
+class HeapObject;
+
+enum class MarkingThread { kMain, kConcurrent };
+
+enum class TargetDeque { kShared, kBailout };
+
+// The concurrent marking deque supports deque operations for two threads:
+// main and concurrent. It is implemented using two deques: shared and bailout.
+//
+// The concurrent thread can use the push and pop operations with the
+// MarkingThread::kConcurrent argument. All other operations are intended
+// to be used by the main thread only.
+//
+// The interface of the concurrent marking deque for the main thread matches
+// that of the sequential marking deque, so they can be easily switched
+// at compile time without updating the main thread call-sites.
+//
+// The shared deque is shared between the main thread and the concurrent
+// thread, so both threads can push to and pop from the shared deque.
+// The bailout deque stores objects that cannot be processed by the concurrent
+// thread. Only the concurrent thread can push to it and only the main thread
+// can pop from it.
+class ConcurrentMarkingDeque {
+ public:
+ // The heap parameter is needed to match the interface
+ // of the sequential marking deque.
+ explicit ConcurrentMarkingDeque(Heap* heap) {}
+
+ // Pushes the object into the specified deque assuming that the function is
+ // called on the specified thread. The main thread can push only to the shared
+ // deque. The concurrent thread can push to both deques.
+ bool Push(HeapObject* object, MarkingThread thread = MarkingThread::kMain,
+ TargetDeque target = TargetDeque::kShared) {
+ switch (target) {
+ case TargetDeque::kShared:
+ shared_deque_.Push(object);
+ break;
+ case TargetDeque::kBailout:
+ bailout_deque_.Push(object);
+ break;
+ }
+ return true;
+ }
+
+ // Pops an object from the bailout or shared deque assuming that the function
+ // is called on the specified thread. The main thread first tries to pop the
+ // bailout deque. If the deque is empty then it tries the shared deque.
+ // If the shared deque is also empty, then the function returns nullptr.
+ // The concurrent thread pops only from the shared deque.
+ HeapObject* Pop(MarkingThread thread = MarkingThread::kMain) {
+ if (thread == MarkingThread::kMain) {
+ HeapObject* result = bailout_deque_.Pop();
+ if (result != nullptr) return result;
+ }
+ return shared_deque_.Pop();
+ }
+
+ // All the following operations can used only by the main thread.
+ void Clear() {
+ bailout_deque_.Clear();
+ shared_deque_.Clear();
+ }
+
+ bool IsFull() { return false; }
+
+ bool IsEmpty() { return bailout_deque_.IsEmpty() && shared_deque_.IsEmpty(); }
+
+ int Size() { return bailout_deque_.Size() + shared_deque_.Size(); }
+
+ // This is used for a large array with a progress bar.
+ // For simpicity, unshift to the bailout deque so that the concurrent thread
+ // does not see such objects.
+ bool Unshift(HeapObject* object) {
+ bailout_deque_.Unshift(object);
+ return true;
+ }
+
+ // Calls the specified callback on each element of the deques and replaces
+ // the element with the result of the callback. If the callback returns
+ // nullptr then the element is removed from the deque.
+ // The callback must accept HeapObject* and return HeapObject*.
+ template <typename Callback>
+ void Update(Callback callback) {
+ bailout_deque_.Update(callback);
+ shared_deque_.Update(callback);
+ }
+
+ // These empty functions are needed to match the interface
+ // of the sequential marking deque.
+ void SetUp() {}
+ void TearDown() {}
+ void StartUsing() {}
+ void StopUsing() {}
+ void ClearOverflowed() {}
+ void SetOverflowed() {}
+ bool overflowed() const { return false; }
+
+ private:
+ // Simple, slow, and thread-safe deque that forwards all operations to
+ // a lock-protected std::deque.
+ class Deque {
+ public:
+ Deque() { cache_padding_[0] = 0; }
+ void Clear() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ return deque_.clear();
+ }
+ bool IsEmpty() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ return deque_.empty();
+ }
+ int Size() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ return static_cast<int>(deque_.size());
+ }
+ void Push(HeapObject* object) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ deque_.push_back(object);
+ }
+ HeapObject* Pop() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ if (deque_.empty()) return nullptr;
+ HeapObject* result = deque_.back();
+ deque_.pop_back();
+ return result;
+ }
+ void Unshift(HeapObject* object) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ deque_.push_front(object);
+ }
+ template <typename Callback>
+ void Update(Callback callback) {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ std::deque<HeapObject*> new_deque;
+ for (auto object : deque_) {
+ HeapObject* new_object = callback(object);
+ if (new_object) {
+ new_deque.push_back(new_object);
+ }
+ }
+ deque_.swap(new_deque);
+ }
+
+ private:
+ base::Mutex mutex_;
+ std::deque<HeapObject*> deque_;
+ // Ensure that two deques do not share the same cache line.
+ static int const kCachePadding = 64;
+ char cache_padding_[kCachePadding];
+ };
+ Deque bailout_deque_;
+ Deque shared_deque_;
+ DISALLOW_COPY_AND_ASSIGN(ConcurrentMarkingDeque);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CONCURRENT_MARKING_DEQUE_
diff --git a/deps/v8/src/heap/concurrent-marking.cc b/deps/v8/src/heap/concurrent-marking.cc
index c29e23c4c2..f541828e29 100644
--- a/deps/v8/src/heap/concurrent-marking.cc
+++ b/deps/v8/src/heap/concurrent-marking.cc
@@ -7,9 +7,12 @@
#include <stack>
#include <unordered_map>
+#include "src/heap/concurrent-marking-deque.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/marking.h"
+#include "src/heap/objects-visiting-inl.h"
+#include "src/heap/objects-visiting.h"
#include "src/isolate.h"
#include "src/locked-queue-inl.h"
#include "src/utils-inl.h"
@@ -19,128 +22,254 @@
namespace v8 {
namespace internal {
-class ConcurrentMarkingMarkbits {
+// Helper class for storing in-object slot addresses and values.
+class SlotSnapshot {
public:
- ConcurrentMarkingMarkbits() {}
- ~ConcurrentMarkingMarkbits() {
- for (auto chunk_bitmap : bitmap_) {
- FreeBitmap(chunk_bitmap.second);
+ SlotSnapshot() : number_of_slots_(0) {}
+ int number_of_slots() const { return number_of_slots_; }
+ Object** slot(int i) const { return snapshot_[i].first; }
+ Object* value(int i) const { return snapshot_[i].second; }
+ void clear() { number_of_slots_ = 0; }
+ void add(Object** slot, Object* value) {
+ snapshot_[number_of_slots_].first = slot;
+ snapshot_[number_of_slots_].second = value;
+ ++number_of_slots_;
+ }
+
+ private:
+ static const int kMaxSnapshotSize = JSObject::kMaxInstanceSize / kPointerSize;
+ int number_of_slots_;
+ std::pair<Object**, Object*> snapshot_[kMaxSnapshotSize];
+ DISALLOW_COPY_AND_ASSIGN(SlotSnapshot);
+};
+
+class ConcurrentMarkingVisitor final
+ : public HeapVisitor<int, ConcurrentMarkingVisitor> {
+ public:
+ using BaseClass = HeapVisitor<int, ConcurrentMarkingVisitor>;
+
+ explicit ConcurrentMarkingVisitor(ConcurrentMarkingDeque* deque)
+ : deque_(deque) {}
+
+ bool ShouldVisit(HeapObject* object) override {
+ return ObjectMarking::GreyToBlack<MarkBit::AccessMode::ATOMIC>(
+ object, marking_state(object));
+ }
+
+ void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ for (Object** p = start; p < end; p++) {
+ Object* object = reinterpret_cast<Object*>(
+ base::NoBarrier_Load(reinterpret_cast<const base::AtomicWord*>(p)));
+ if (!object->IsHeapObject()) continue;
+ MarkObject(HeapObject::cast(object));
}
}
- bool Mark(HeapObject* obj) {
- Address address = obj->address();
- MemoryChunk* chunk = MemoryChunk::FromAddress(address);
- if (bitmap_.count(chunk) == 0) {
- bitmap_[chunk] = AllocateBitmap();
+
+ void VisitPointersInSnapshot(const SlotSnapshot& snapshot) {
+ for (int i = 0; i < snapshot.number_of_slots(); i++) {
+ Object* object = snapshot.value(i);
+ if (!object->IsHeapObject()) continue;
+ MarkObject(HeapObject::cast(object));
}
- MarkBit mark_bit =
- bitmap_[chunk]->MarkBitFromIndex(chunk->AddressToMarkbitIndex(address));
- if (mark_bit.Get()) return false;
- mark_bit.Set();
- return true;
}
- Bitmap* AllocateBitmap() {
- return static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
+ // ===========================================================================
+ // JS object =================================================================
+ // ===========================================================================
+
+ int VisitJSObject(Map* map, JSObject* object) override {
+ int size = JSObject::BodyDescriptor::SizeOf(map, object);
+ const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, size);
+ if (!ShouldVisit(object)) return 0;
+ VisitPointersInSnapshot(snapshot);
+ return size;
+ }
+
+ int VisitJSObjectFast(Map* map, JSObject* object) override {
+ return VisitJSObject(map, object);
}
- void FreeBitmap(Bitmap* bitmap) { free(bitmap); }
+ int VisitJSApiObject(Map* map, JSObject* object) override {
+ return VisitJSObject(map, object);
+ }
- private:
- std::unordered_map<MemoryChunk*, Bitmap*> bitmap_;
-};
+ // ===========================================================================
+ // Fixed array object ========================================================
+ // ===========================================================================
-class ConcurrentMarkingVisitor : public ObjectVisitor {
- public:
- ConcurrentMarkingVisitor() : bytes_marked_(0) {}
+ int VisitFixedArray(Map* map, FixedArray* object) override {
+ // TODO(ulan): implement iteration with prefetched length.
+ return BaseClass::VisitFixedArray(map, object);
+ }
- void VisitPointers(Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) {
- if (!(*p)->IsHeapObject()) continue;
- MarkObject(HeapObject::cast(*p));
- }
+ // ===========================================================================
+ // Code object ===============================================================
+ // ===========================================================================
+
+ int VisitCode(Map* map, Code* object) override {
+ deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ return 0;
+ }
+
+ // ===========================================================================
+ // Objects with weak fields and/or side-effectiful visitation.
+ // ===========================================================================
+
+ int VisitBytecodeArray(Map* map, BytecodeArray* object) override {
+ // TODO(ulan): implement iteration of strong fields.
+ deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ return 0;
+ }
+
+ int VisitJSFunction(Map* map, JSFunction* object) override {
+ // TODO(ulan): implement iteration of strong fields.
+ deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ return 0;
}
- void MarkObject(HeapObject* obj) {
- if (markbits_.Mark(obj)) {
- bytes_marked_ += obj->Size();
- marking_stack_.push(obj);
+ int VisitMap(Map* map, Map* object) override {
+ // TODO(ulan): implement iteration of strong fields.
+ deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ return 0;
+ }
+
+ int VisitNativeContext(Map* map, Context* object) override {
+ // TODO(ulan): implement iteration of strong fields.
+ deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ return 0;
+ }
+
+ int VisitSharedFunctionInfo(Map* map, SharedFunctionInfo* object) override {
+ // TODO(ulan): implement iteration of strong fields.
+ deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ return 0;
+ }
+
+ int VisitTransitionArray(Map* map, TransitionArray* object) override {
+ // TODO(ulan): implement iteration of strong fields.
+ deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ return 0;
+ }
+
+ int VisitWeakCell(Map* map, WeakCell* object) override {
+ // TODO(ulan): implement iteration of strong fields.
+ deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ return 0;
+ }
+
+ int VisitJSWeakCollection(Map* map, JSWeakCollection* object) override {
+ // TODO(ulan): implement iteration of strong fields.
+ deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kBailout);
+ return 0;
+ }
+
+ void MarkObject(HeapObject* object) {
+ if (ObjectMarking::WhiteToGrey<MarkBit::AccessMode::ATOMIC>(
+ object, marking_state(object))) {
+ deque_->Push(object, MarkingThread::kConcurrent, TargetDeque::kShared);
}
}
- void MarkTransitively() {
- while (!marking_stack_.empty()) {
- HeapObject* obj = marking_stack_.top();
- marking_stack_.pop();
- obj->Iterate(this);
+ private:
+ // Helper class for collecting in-object slot addresses and values.
+ class SlotSnapshottingVisitor final : public ObjectVisitor {
+ public:
+ explicit SlotSnapshottingVisitor(SlotSnapshot* slot_snapshot)
+ : slot_snapshot_(slot_snapshot) {
+ slot_snapshot_->clear();
}
+
+ void VisitPointers(HeapObject* host, Object** start,
+ Object** end) override {
+ for (Object** p = start; p < end; p++) {
+ Object* object = reinterpret_cast<Object*>(
+ base::NoBarrier_Load(reinterpret_cast<const base::AtomicWord*>(p)));
+ slot_snapshot_->add(p, object);
+ }
+ }
+
+ private:
+ SlotSnapshot* slot_snapshot_;
+ };
+
+ const SlotSnapshot& MakeSlotSnapshot(Map* map, HeapObject* object, int size) {
+ // TODO(ulan): Iterate only the existing fields and skip slack at the end
+ // of the object.
+ SlotSnapshottingVisitor visitor(&slot_snapshot_);
+ visitor.VisitPointer(object,
+ reinterpret_cast<Object**>(object->map_slot()));
+ JSObject::BodyDescriptor::IterateBody(object, size, &visitor);
+ return slot_snapshot_;
}
- size_t bytes_marked() { return bytes_marked_; }
+ MarkingState marking_state(HeapObject* object) const {
+ return MarkingState::Internal(object);
+ }
- private:
- size_t bytes_marked_;
- std::stack<HeapObject*> marking_stack_;
- ConcurrentMarkingMarkbits markbits_;
+ ConcurrentMarkingDeque* deque_;
+ SlotSnapshot slot_snapshot_;
};
class ConcurrentMarking::Task : public CancelableTask {
public:
- Task(Heap* heap, std::vector<HeapObject*>* root_set,
+ Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
base::Semaphore* on_finish)
- : CancelableTask(heap->isolate()),
- heap_(heap),
- on_finish_(on_finish),
- root_set_(root_set) {}
+ : CancelableTask(isolate),
+ concurrent_marking_(concurrent_marking),
+ on_finish_(on_finish) {}
virtual ~Task() {}
private:
// v8::internal::CancelableTask overrides.
void RunInternal() override {
- double time_ms = heap_->MonotonicallyIncreasingTimeInMs();
- {
- TimedScope scope(&time_ms);
- for (HeapObject* obj : *root_set_) {
- marking_visitor_.MarkObject(obj);
- }
- marking_visitor_.MarkTransitively();
- }
- if (FLAG_trace_concurrent_marking) {
- heap_->isolate()->PrintWithTimestamp(
- "concurrently marked %dKB in %.2fms\n",
- static_cast<int>(marking_visitor_.bytes_marked() / KB), time_ms);
- }
+ concurrent_marking_->Run();
on_finish_->Signal();
}
- Heap* heap_;
+ ConcurrentMarking* concurrent_marking_;
base::Semaphore* on_finish_;
- ConcurrentMarkingVisitor marking_visitor_;
- std::vector<HeapObject*>* root_set_;
DISALLOW_COPY_AND_ASSIGN(Task);
};
-ConcurrentMarking::ConcurrentMarking(Heap* heap)
- : heap_(heap), pending_task_semaphore_(0), is_task_pending_(false) {
- // Concurrent marking does not work with double unboxing.
- STATIC_ASSERT(!(V8_CONCURRENT_MARKING && V8_DOUBLE_FIELDS_UNBOXING));
+ConcurrentMarking::ConcurrentMarking(Heap* heap, ConcurrentMarkingDeque* deque)
+ : heap_(heap),
+ pending_task_semaphore_(0),
+ deque_(deque),
+ visitor_(new ConcurrentMarkingVisitor(deque_)),
+ is_task_pending_(false) {
// The runtime flag should be set only if the compile time flag was set.
- CHECK(!FLAG_concurrent_marking || V8_CONCURRENT_MARKING);
+#ifndef V8_CONCURRENT_MARKING
+ CHECK(!FLAG_concurrent_marking);
+#endif
}
-ConcurrentMarking::~ConcurrentMarking() {}
+ConcurrentMarking::~ConcurrentMarking() { delete visitor_; }
-void ConcurrentMarking::AddRoot(HeapObject* object) {
- root_set_.push_back(object);
+void ConcurrentMarking::Run() {
+ double time_ms = heap_->MonotonicallyIncreasingTimeInMs();
+ size_t bytes_marked = 0;
+ base::Mutex* relocation_mutex = heap_->relocation_mutex();
+ {
+ TimedScope scope(&time_ms);
+ HeapObject* object;
+ while ((object = deque_->Pop(MarkingThread::kConcurrent)) != nullptr) {
+ base::LockGuard<base::Mutex> guard(relocation_mutex);
+ bytes_marked += visitor_->Visit(object);
+ }
+ }
+ if (FLAG_trace_concurrent_marking) {
+ heap_->isolate()->PrintWithTimestamp("concurrently marked %dKB in %.2fms\n",
+ static_cast<int>(bytes_marked / KB),
+ time_ms);
+ }
}
void ConcurrentMarking::StartTask() {
if (!FLAG_concurrent_marking) return;
is_task_pending_ = true;
-
V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new Task(heap_, &root_set_, &pending_task_semaphore_),
+ new Task(heap_->isolate(), this, &pending_task_semaphore_),
v8::Platform::kShortRunningTask);
}
@@ -148,7 +277,6 @@ void ConcurrentMarking::WaitForTaskToComplete() {
if (!FLAG_concurrent_marking) return;
pending_task_semaphore_.Wait();
is_task_pending_ = false;
- root_set_.clear();
}
void ConcurrentMarking::EnsureTaskCompleted() {
diff --git a/deps/v8/src/heap/concurrent-marking.h b/deps/v8/src/heap/concurrent-marking.h
index 9d7b6b58ca..134fa38f64 100644
--- a/deps/v8/src/heap/concurrent-marking.h
+++ b/deps/v8/src/heap/concurrent-marking.h
@@ -5,8 +5,6 @@
#ifndef V8_HEAP_CONCURRENT_MARKING_
#define V8_HEAP_CONCURRENT_MARKING_
-#include <vector>
-
#include "src/allocation.h"
#include "src/cancelable-task.h"
#include "src/utils.h"
@@ -15,16 +13,16 @@
namespace v8 {
namespace internal {
+class ConcurrentMarkingDeque;
+class ConcurrentMarkingVisitor;
class Heap;
class Isolate;
class ConcurrentMarking {
public:
- explicit ConcurrentMarking(Heap* heap);
+ ConcurrentMarking(Heap* heap, ConcurrentMarkingDeque* deque_);
~ConcurrentMarking();
- void AddRoot(HeapObject* object);
-
void StartTask();
void WaitForTaskToComplete();
bool IsTaskPending() { return is_task_pending_; }
@@ -32,10 +30,12 @@ class ConcurrentMarking {
private:
class Task;
+ void Run();
Heap* heap_;
base::Semaphore pending_task_semaphore_;
+ ConcurrentMarkingDeque* deque_;
+ ConcurrentMarkingVisitor* visitor_;
bool is_task_pending_;
- std::vector<HeapObject*> root_set_;
};
} // namespace internal
diff --git a/deps/v8/src/heap/gc-tracer.cc b/deps/v8/src/heap/gc-tracer.cc
index 45d55dc68b..46d5bb66ee 100644
--- a/deps/v8/src/heap/gc-tracer.cc
+++ b/deps/v8/src/heap/gc-tracer.cc
@@ -527,13 +527,49 @@ void GCTracer::PrintNVP() const {
"mutator=%.1f "
"gc=%s "
"reduce_memory=%d "
+ "minor_mc=%.2f "
+ "finish_sweeping=%.2f "
"mark=%.2f "
+ "mark.identify_global_handles=%.2f "
+ "mark.seed=%.2f "
"mark.roots=%.2f "
- "mark.old_to_new=%.2f\n",
+ "mark.weak=%.2f "
+ "mark.global_handles=%.2f "
+ "clear=%.2f "
+ "clear.string_table=%.2f "
+ "clear.weak_lists=%.2f "
+ "evacuate=%.2f "
+ "evacuate.copy=%.2f "
+ "evacuate.update_pointers=%.2f "
+ "evacuate.update_pointers.to_new=%.2f "
+ "evacuate.update_pointers.to_new.tospace=%.2f "
+ "evacuate.update_pointers.to_new.roots=%.2f "
+ "evacuate.update_pointers.to_new.old=%.2f "
+ "update_marking_deque=%.2f "
+ "reset_liveness=%.2f\n",
duration, spent_in_mutator, "mmc", current_.reduce_memory,
+ current_.scopes[Scope::MINOR_MC],
+ current_.scopes[Scope::MINOR_MC_SWEEPING],
current_.scopes[Scope::MINOR_MC_MARK],
+ current_.scopes[Scope::MINOR_MC_MARK_IDENTIFY_GLOBAL_HANDLES],
+ current_.scopes[Scope::MINOR_MC_MARK_SEED],
current_.scopes[Scope::MINOR_MC_MARK_ROOTS],
- current_.scopes[Scope::MINOR_MC_MARK_OLD_TO_NEW_POINTERS]);
+ current_.scopes[Scope::MINOR_MC_MARK_WEAK],
+ current_.scopes[Scope::MINOR_MC_MARK_GLOBAL_HANDLES],
+ current_.scopes[Scope::MINOR_MC_CLEAR],
+ current_.scopes[Scope::MINOR_MC_CLEAR_STRING_TABLE],
+ current_.scopes[Scope::MINOR_MC_CLEAR_WEAK_LISTS],
+ current_.scopes[Scope::MINOR_MC_EVACUATE],
+ current_.scopes[Scope::MINOR_MC_EVACUATE_COPY],
+ current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS],
+ current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW],
+ current_
+ .scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_TOSPACE],
+ current_
+ .scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS],
+ current_.scopes[Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_OLD],
+ current_.scopes[Scope::MINOR_MC_MARKING_DEQUE],
+ current_.scopes[Scope::MINOR_MC_RESET_LIVENESS]);
break;
case Event::MARK_COMPACTOR:
case Event::INCREMENTAL_MARK_COMPACTOR:
diff --git a/deps/v8/src/heap/gc-tracer.h b/deps/v8/src/heap/gc-tracer.h
index d971021cf5..96b21c6712 100644
--- a/deps/v8/src/heap/gc-tracer.h
+++ b/deps/v8/src/heap/gc-tracer.h
@@ -10,7 +10,7 @@
#include "src/base/ring-buffer.h"
#include "src/counters.h"
#include "src/globals.h"
-#include "testing/gtest/include/gtest/gtest_prod.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
namespace v8 {
namespace internal {
@@ -34,67 +34,85 @@ enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
F(MC_INCREMENTAL_EXTERNAL_EPILOGUE) \
F(MC_INCREMENTAL_EXTERNAL_PROLOGUE)
-#define TRACER_SCOPES(F) \
- INCREMENTAL_SCOPES(F) \
- F(HEAP_EPILOGUE) \
- F(HEAP_EPILOGUE_REDUCE_NEW_SPACE) \
- F(HEAP_EXTERNAL_EPILOGUE) \
- F(HEAP_EXTERNAL_PROLOGUE) \
- F(HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES) \
- F(HEAP_PROLOGUE) \
- F(MC_CLEAR) \
- F(MC_CLEAR_CODE_FLUSH) \
- F(MC_CLEAR_DEPENDENT_CODE) \
- F(MC_CLEAR_MAPS) \
- F(MC_CLEAR_SLOTS_BUFFER) \
- F(MC_CLEAR_STORE_BUFFER) \
- F(MC_CLEAR_STRING_TABLE) \
- F(MC_CLEAR_WEAK_CELLS) \
- F(MC_CLEAR_WEAK_COLLECTIONS) \
- F(MC_CLEAR_WEAK_LISTS) \
- F(MC_EPILOGUE) \
- F(MC_EVACUATE) \
- F(MC_EVACUATE_CANDIDATES) \
- F(MC_EVACUATE_CLEAN_UP) \
- F(MC_EVACUATE_COPY) \
- F(MC_EVACUATE_EPILOGUE) \
- F(MC_EVACUATE_PROLOGUE) \
- F(MC_EVACUATE_REBALANCE) \
- F(MC_EVACUATE_UPDATE_POINTERS) \
- F(MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED) \
- F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
- F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
- F(MC_FINISH) \
- F(MC_MARK) \
- F(MC_MARK_FINISH_INCREMENTAL) \
- F(MC_MARK_PREPARE_CODE_FLUSH) \
- F(MC_MARK_ROOTS) \
- F(MC_MARK_WEAK_CLOSURE) \
- F(MC_MARK_WEAK_CLOSURE_EPHEMERAL) \
- F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
- F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
- F(MC_MARK_WEAK_CLOSURE_HARMONY) \
- F(MC_MARK_WRAPPER_EPILOGUE) \
- F(MC_MARK_WRAPPER_PROLOGUE) \
- F(MC_MARK_WRAPPER_TRACING) \
- F(MC_PROLOGUE) \
- F(MC_SWEEP) \
- F(MC_SWEEP_CODE) \
- F(MC_SWEEP_MAP) \
- F(MC_SWEEP_OLD) \
- F(MC_MINOR_MC) \
- F(MINOR_MC_MARK) \
- F(MINOR_MC_MARK_CODE_FLUSH_CANDIDATES) \
- F(MINOR_MC_MARK_GLOBAL_HANDLES) \
- F(MINOR_MC_MARK_OLD_TO_NEW_POINTERS) \
- F(MINOR_MC_MARK_ROOTS) \
- F(MINOR_MC_MARK_WEAK) \
- F(SCAVENGER_CODE_FLUSH_CANDIDATES) \
- F(SCAVENGER_EVACUATE) \
- F(SCAVENGER_OLD_TO_NEW_POINTERS) \
- F(SCAVENGER_ROOTS) \
- F(SCAVENGER_SCAVENGE) \
- F(SCAVENGER_SEMISPACE) \
+#define TRACER_SCOPES(F) \
+ INCREMENTAL_SCOPES(F) \
+ F(HEAP_EPILOGUE) \
+ F(HEAP_EPILOGUE_REDUCE_NEW_SPACE) \
+ F(HEAP_EXTERNAL_EPILOGUE) \
+ F(HEAP_EXTERNAL_PROLOGUE) \
+ F(HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES) \
+ F(HEAP_PROLOGUE) \
+ F(MC_CLEAR) \
+ F(MC_CLEAR_CODE_FLUSH) \
+ F(MC_CLEAR_DEPENDENT_CODE) \
+ F(MC_CLEAR_MAPS) \
+ F(MC_CLEAR_SLOTS_BUFFER) \
+ F(MC_CLEAR_STORE_BUFFER) \
+ F(MC_CLEAR_STRING_TABLE) \
+ F(MC_CLEAR_WEAK_CELLS) \
+ F(MC_CLEAR_WEAK_COLLECTIONS) \
+ F(MC_CLEAR_WEAK_LISTS) \
+ F(MC_EPILOGUE) \
+ F(MC_EVACUATE) \
+ F(MC_EVACUATE_CANDIDATES) \
+ F(MC_EVACUATE_CLEAN_UP) \
+ F(MC_EVACUATE_COPY) \
+ F(MC_EVACUATE_EPILOGUE) \
+ F(MC_EVACUATE_PROLOGUE) \
+ F(MC_EVACUATE_REBALANCE) \
+ F(MC_EVACUATE_UPDATE_POINTERS) \
+ F(MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED) \
+ F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
+ F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
+ F(MC_FINISH) \
+ F(MC_MARK) \
+ F(MC_MARK_FINISH_INCREMENTAL) \
+ F(MC_MARK_PREPARE_CODE_FLUSH) \
+ F(MC_MARK_ROOTS) \
+ F(MC_MARK_WEAK_CLOSURE) \
+ F(MC_MARK_WEAK_CLOSURE_EPHEMERAL) \
+ F(MC_MARK_WEAK_CLOSURE_WEAK_HANDLES) \
+ F(MC_MARK_WEAK_CLOSURE_WEAK_ROOTS) \
+ F(MC_MARK_WEAK_CLOSURE_HARMONY) \
+ F(MC_MARK_WRAPPER_EPILOGUE) \
+ F(MC_MARK_WRAPPER_PROLOGUE) \
+ F(MC_MARK_WRAPPER_TRACING) \
+ F(MC_PROLOGUE) \
+ F(MC_SWEEP) \
+ F(MC_SWEEP_CODE) \
+ F(MC_SWEEP_MAP) \
+ F(MC_SWEEP_OLD) \
+ F(MINOR_MC) \
+ F(MINOR_MC_CLEAR) \
+ F(MINOR_MC_CLEAR_STRING_TABLE) \
+ F(MINOR_MC_CLEAR_WEAK_LISTS) \
+ F(MINOR_MC_EVACUATE) \
+ F(MINOR_MC_EVACUATE_CLEAN_UP) \
+ F(MINOR_MC_EVACUATE_COPY) \
+ F(MINOR_MC_EVACUATE_EPILOGUE) \
+ F(MINOR_MC_EVACUATE_PROLOGUE) \
+ F(MINOR_MC_EVACUATE_REBALANCE) \
+ F(MINOR_MC_EVACUATE_UPDATE_POINTERS) \
+ F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW) \
+ F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
+ F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_TOSPACE) \
+ F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_OLD) \
+ F(MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK) \
+ F(MINOR_MC_MARK) \
+ F(MINOR_MC_MARK_GLOBAL_HANDLES) \
+ F(MINOR_MC_MARK_IDENTIFY_GLOBAL_HANDLES) \
+ F(MINOR_MC_MARK_SEED) \
+ F(MINOR_MC_MARK_ROOTS) \
+ F(MINOR_MC_MARK_WEAK) \
+ F(MINOR_MC_MARKING_DEQUE) \
+ F(MINOR_MC_RESET_LIVENESS) \
+ F(MINOR_MC_SWEEPING) \
+ F(SCAVENGER_CODE_FLUSH_CANDIDATES) \
+ F(SCAVENGER_EVACUATE) \
+ F(SCAVENGER_OLD_TO_NEW_POINTERS) \
+ F(SCAVENGER_ROOTS) \
+ F(SCAVENGER_SCAVENGE) \
+ F(SCAVENGER_SEMISPACE) \
F(SCAVENGER_WEAK)
#define TRACE_GC(tracer, scope_id) \
diff --git a/deps/v8/src/heap/heap-inl.h b/deps/v8/src/heap/heap-inl.h
index a0e731119c..87aac8731d 100644
--- a/deps/v8/src/heap/heap-inl.h
+++ b/deps/v8/src/heap/heap-inl.h
@@ -23,6 +23,7 @@
#include "src/msan.h"
#include "src/objects-inl.h"
#include "src/objects/scope-info.h"
+#include "src/string-hasher.h"
namespace v8 {
namespace internal {
@@ -37,23 +38,21 @@ HeapObject* AllocationResult::ToObjectChecked() {
return HeapObject::cast(object_);
}
-void PromotionQueue::insert(HeapObject* target, int32_t size,
- bool was_marked_black) {
+void PromotionQueue::insert(HeapObject* target, int32_t size) {
if (emergency_stack_ != NULL) {
- emergency_stack_->Add(Entry(target, size, was_marked_black));
+ emergency_stack_->Add(Entry(target, size));
return;
}
if ((rear_ - 1) < limit_) {
RelocateQueueHead();
- emergency_stack_->Add(Entry(target, size, was_marked_black));
+ emergency_stack_->Add(Entry(target, size));
return;
}
struct Entry* entry = reinterpret_cast<struct Entry*>(--rear_);
entry->obj_ = target;
entry->size_ = size;
- entry->was_marked_black_ = was_marked_black;
// Assert no overflow into live objects.
#ifdef DEBUG
@@ -62,21 +61,18 @@ void PromotionQueue::insert(HeapObject* target, int32_t size,
#endif
}
-void PromotionQueue::remove(HeapObject** target, int32_t* size,
- bool* was_marked_black) {
+void PromotionQueue::remove(HeapObject** target, int32_t* size) {
DCHECK(!is_empty());
if (front_ == rear_) {
Entry e = emergency_stack_->RemoveLast();
*target = e.obj_;
*size = e.size_;
- *was_marked_black = e.was_marked_black_;
return;
}
struct Entry* entry = reinterpret_cast<struct Entry*>(--front_);
*target = entry->obj_;
*size = entry->size_;
- *was_marked_black = entry->was_marked_black_;
// Assert no underflow.
SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
@@ -239,7 +235,7 @@ AllocationResult Heap::AllocateOneByteInternalizedString(
}
// String maps are all immortal immovable objects.
- result->set_map_no_write_barrier(map);
+ result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
// Set length and hash fields of the allocated string.
String* answer = String::cast(result);
answer->set_length(str.length());
@@ -270,7 +266,7 @@ AllocationResult Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
if (!allocation.To(&result)) return allocation;
}
- result->set_map(map);
+ result->set_map_after_allocation(map);
// Set length and hash fields of the allocated string.
String* answer = String::cast(result);
answer->set_length(str.length());
@@ -706,18 +702,20 @@ void Heap::ExternalStringTable::AddString(String* string) {
}
}
-void Heap::ExternalStringTable::IterateNewSpaceStrings(ObjectVisitor* v) {
+void Heap::ExternalStringTable::IterateNewSpaceStrings(RootVisitor* v) {
if (!new_space_strings_.is_empty()) {
Object** start = &new_space_strings_[0];
- v->VisitPointers(start, start + new_space_strings_.length());
+ v->VisitRootPointers(Root::kExternalStringsTable, start,
+ start + new_space_strings_.length());
}
}
-void Heap::ExternalStringTable::IterateAll(ObjectVisitor* v) {
+void Heap::ExternalStringTable::IterateAll(RootVisitor* v) {
IterateNewSpaceStrings(v);
if (!old_space_strings_.is_empty()) {
Object** start = &old_space_strings_[0];
- v->VisitPointers(start, start + old_space_strings_.length());
+ v->VisitRootPointers(Root::kExternalStringsTable, start,
+ start + old_space_strings_.length());
}
}
@@ -793,12 +791,18 @@ void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
}
void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) {
- DCHECK(construct_stub_create_deopt_pc_offset() == Smi::kZero);
+ // TODO(tebbi): Remove second half of DCHECK once
+ // FLAG_harmony_restrict_constructor_return is gone.
+ DCHECK(construct_stub_create_deopt_pc_offset() == Smi::kZero ||
+ construct_stub_create_deopt_pc_offset() == Smi::FromInt(pc_offset));
set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset));
}
void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) {
- DCHECK(construct_stub_invoke_deopt_pc_offset() == Smi::kZero);
+ // TODO(tebbi): Remove second half of DCHECK once
+ // FLAG_harmony_restrict_constructor_return is gone.
+ DCHECK(construct_stub_invoke_deopt_pc_offset() == Smi::kZero ||
+ construct_stub_invoke_deopt_pc_offset() == Smi::FromInt(pc_offset));
set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset));
}
@@ -850,13 +854,21 @@ AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
heap_->always_allocate_scope_count_.Increment(1);
}
-
AlwaysAllocateScope::~AlwaysAllocateScope() {
heap_->always_allocate_scope_count_.Decrement(1);
}
+void VerifyPointersVisitor::VisitPointers(HeapObject* host, Object** start,
+ Object** end) {
+ VerifyPointers(start, end);
+}
-void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
+void VerifyPointersVisitor::VisitRootPointers(Root root, Object** start,
+ Object** end) {
+ VerifyPointers(start, end);
+}
+
+void VerifyPointersVisitor::VerifyPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
@@ -868,8 +880,8 @@ void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
}
}
-
-void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
+void VerifySmisVisitor::VisitRootPointers(Root root, Object** start,
+ Object** end) {
for (Object** current = start; current < end; current++) {
CHECK((*current)->IsSmi());
}
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 75d3f365ef..ad3bfef559 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -87,7 +87,6 @@ Heap::Heap()
initial_old_generation_size_(max_old_generation_size_ /
kInitalOldGenerationLimitFactor),
old_generation_size_configured_(false),
- max_executable_size_(256ul * (kPointerSize / 4) * MB),
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap.
// Will be 4 * reserved_semispace_size_ to ensure that young
@@ -897,7 +896,8 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) {
if (gc_reason == GarbageCollectionReason::kLastResort) {
InvokeOutOfMemoryCallback();
}
- RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GC_AllAvailableGarbage);
+ RuntimeCallTimerScope runtime_timer(
+ isolate(), &RuntimeCallStats::GC_AllAvailableGarbage);
if (isolate()->concurrent_recompilation_enabled()) {
// The optimizing compiler may be unnecessarily holding on to memory.
DisallowHeapAllocation no_recursive_gc;
@@ -979,8 +979,8 @@ bool Heap::CollectGarbage(GarbageCollector collector,
const char* collector_reason,
const v8::GCCallbackFlags gc_callback_flags) {
// The VM is in the GC state until exiting this function.
- VMState<GC> state(isolate_);
- RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GC);
+ VMState<GC> state(isolate());
+ RuntimeCallTimerScope runtime_timer(isolate(), &RuntimeCallStats::GC);
#ifdef DEBUG
// Reset the allocation timeout to the GC interval, but make sure to
@@ -1115,10 +1115,12 @@ void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
}
}
-void Heap::StartIdleIncrementalMarking(GarbageCollectionReason gc_reason) {
+void Heap::StartIdleIncrementalMarking(
+ GarbageCollectionReason gc_reason,
+ const GCCallbackFlags gc_callback_flags) {
gc_idle_time_handler_->ResetNoProgressCounter();
StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason,
- kNoGCCallbackFlags);
+ gc_callback_flags);
}
@@ -1137,7 +1139,7 @@ void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
// Helper class for verifying the string table.
class StringTableVerifier : public ObjectVisitor {
public:
- void VisitPointers(Object** start, Object** end) override {
+ void VisitPointers(HeapObject* host, Object** start, Object** end) override {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject()) {
@@ -1167,13 +1169,13 @@ bool Heap::ReserveSpace(Reservation* reservations, List<Address>* maps) {
for (int space = NEW_SPACE; space < SerializerDeserializer::kNumberOfSpaces;
space++) {
Reservation* reservation = &reservations[space];
- DCHECK_LE(1, reservation->length());
+ DCHECK_LE(1, reservation->size());
if (reservation->at(0).size == 0) continue;
bool perform_gc = false;
if (space == MAP_SPACE) {
// We allocate each map individually to avoid fragmentation.
maps->Clear();
- DCHECK_EQ(1, reservation->length());
+ DCHECK_EQ(1, reservation->size());
int num_maps = reservation->at(0).size / Map::kSize;
for (int i = 0; i < num_maps; i++) {
// The deserializer will update the skip list.
@@ -1194,7 +1196,7 @@ bool Heap::ReserveSpace(Reservation* reservations, List<Address>* maps) {
}
} else if (space == LO_SPACE) {
// Just check that we can allocate during deserialization.
- DCHECK_EQ(1, reservation->length());
+ DCHECK_EQ(1, reservation->size());
perform_gc = !CanExpandOldGeneration(reservation->at(0).size);
} else {
for (auto& chunk : *reservation) {
@@ -1354,8 +1356,7 @@ bool Heap::PerformGarbageCollection(
break;
case SCAVENGER:
if ((fast_promotion_mode_ &&
- CanExpandOldGeneration(new_space()->Size())) ||
- concurrent_marking_->IsTaskPending()) {
+ CanExpandOldGeneration(new_space()->Size()))) {
tracer()->NotifyYoungGenerationHandling(
YoungGenerationHandling::kFastPromotionDuringScavenge);
EvacuateYoungGeneration();
@@ -1431,7 +1432,8 @@ bool Heap::PerformGarbageCollection(
void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
- RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GCPrologueCallback);
+ RuntimeCallTimerScope runtime_timer(isolate(),
+ &RuntimeCallStats::GCPrologueCallback);
for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
if (gc_type & gc_prologue_callbacks_[i].gc_type) {
if (!gc_prologue_callbacks_[i].pass_isolate) {
@@ -1449,7 +1451,8 @@ void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
void Heap::CallGCEpilogueCallbacks(GCType gc_type,
GCCallbackFlags gc_callback_flags) {
- RuntimeCallTimerScope(isolate(), &RuntimeCallStats::GCEpilogueCallback);
+ RuntimeCallTimerScope runtime_timer(isolate(),
+ &RuntimeCallStats::GCEpilogueCallback);
for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
if (!gc_epilogue_callbacks_[i].pass_isolate) {
@@ -1497,9 +1500,11 @@ void Heap::MinorMarkCompact() {
SetGCState(MINOR_MARK_COMPACT);
LOG(isolate_, ResourceEvent("MinorMarkCompact", "begin"));
- TRACE_GC(tracer(), GCTracer::Scope::MC_MINOR_MC);
+ TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC);
AlwaysAllocateScope always_allocate(isolate());
PauseAllocationObserversScope pause_observers(this);
+ IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
+ incremental_marking());
minor_mark_compact_collector()->CollectGarbage();
@@ -1627,6 +1632,7 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
void Heap::EvacuateYoungGeneration() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_EVACUATE);
+ base::LockGuard<base::Mutex> guard(relocation_mutex());
if (!FLAG_concurrent_marking) {
DCHECK(fast_promotion_mode_);
DCHECK(CanExpandOldGeneration(new_space()->Size()));
@@ -1668,7 +1674,7 @@ void Heap::EvacuateYoungGeneration() {
void Heap::Scavenge() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE);
- RelocationLock relocation_lock(this);
+ base::LockGuard<base::Mutex> guard(relocation_mutex());
// There are soft limits in the allocation code, designed to trigger a mark
// sweep collection by failing allocations. There is no sense in trying to
// trigger one during scavenge: scavenges allocation should always succeed.
@@ -1678,6 +1684,9 @@ void Heap::Scavenge() {
// Pause the inline allocation steps.
PauseAllocationObserversScope pause_observers(this);
+ IncrementalMarking::PauseBlackAllocationScope pause_black_allocation(
+ incremental_marking());
+
mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
SetGCState(SCAVENGE);
@@ -1715,7 +1724,7 @@ void Heap::Scavenge() {
Address new_space_front = new_space_->ToSpaceStart();
promotion_queue_.Initialize();
- ScavengeVisitor scavenge_visitor(this);
+ RootScavengeVisitor root_scavenge_visitor(this);
isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
&IsUnmodifiedHeapObject);
@@ -1723,7 +1732,7 @@ void Heap::Scavenge() {
{
// Copy roots.
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_ROOTS);
- IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
+ IterateRoots(&root_scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
}
{
@@ -1750,8 +1759,7 @@ void Heap::Scavenge() {
{
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_WEAK);
- // Copy objects reachable from the encountered weak collections list.
- scavenge_visitor.VisitPointer(&encountered_weak_collections_);
+ IterateEncounteredWeakCollections(&root_scavenge_visitor);
}
{
@@ -1759,23 +1767,23 @@ void Heap::Scavenge() {
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_CODE_FLUSH_CANDIDATES);
MarkCompactCollector* collector = mark_compact_collector();
if (collector->is_code_flushing_enabled()) {
- collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
+ collector->code_flusher()->VisitListHeads(&root_scavenge_visitor);
+ collector->code_flusher()
+ ->IteratePointersToFromSpace<StaticScavengeVisitor>();
}
}
{
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SEMISPACE);
- new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+ new_space_front = DoScavenge(new_space_front);
}
isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
&IsUnscavengedHeapObject);
- isolate()
- ->global_handles()
- ->IterateNewSpaceWeakUnmodifiedRoots<
- GlobalHandles::HANDLE_PHANTOM_NODES_VISIT_OTHERS>(&scavenge_visitor);
- new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+ isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
+ &root_scavenge_visitor);
+ new_space_front = DoScavenge(new_space_front);
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
@@ -1963,12 +1971,12 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
DisallowHeapAllocation no_allocation;
// All external strings are listed in the external string table.
- class ExternalStringTableVisitorAdapter : public ObjectVisitor {
+ class ExternalStringTableVisitorAdapter : public RootVisitor {
public:
explicit ExternalStringTableVisitorAdapter(
v8::ExternalResourceVisitor* visitor)
: visitor_(visitor) {}
- virtual void VisitPointers(Object** start, Object** end) {
+ virtual void VisitRootPointers(Root root, Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
DCHECK((*p)->IsExternalString());
visitor_->VisitExternalString(
@@ -1983,8 +1991,7 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
external_string_table_.IterateAll(&external_string_table_visitor);
}
-Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
- Address new_space_front) {
+Address Heap::DoScavenge(Address new_space_front) {
do {
SemiSpace::AssertValidRange(new_space_front, new_space_->top());
// The addresses new_space_front and new_space_.top() define a
@@ -2007,8 +2014,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
while (!promotion_queue()->is_empty()) {
HeapObject* target;
int32_t size;
- bool was_marked_black;
- promotion_queue()->remove(&target, &size, &was_marked_black);
+ promotion_queue()->remove(&target, &size);
// Promoted object might be already partially visited
// during old space pointer iteration. Thus we search specifically
@@ -2016,8 +2022,7 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// to new space.
DCHECK(!target->IsMap());
- IterateAndScavengePromotedObject(target, static_cast<int>(size),
- was_marked_black);
+ IterateAndScavengePromotedObject(target, static_cast<int>(size));
}
}
@@ -2115,10 +2120,9 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
Object* result = nullptr;
AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
if (!allocation.To(&result)) return allocation;
-
// Map::cast cannot be used due to uninitialized map field.
- reinterpret_cast<Map*>(result)->set_map(
- reinterpret_cast<Map*>(root(kMetaMapRootIndex)));
+ reinterpret_cast<Map*>(result)->set_map_after_allocation(
+ reinterpret_cast<Map*>(root(kMetaMapRootIndex)), SKIP_WRITE_BARRIER);
reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
// Initialize to only containing tagged fields.
@@ -2151,7 +2155,7 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
if (!allocation.To(&result)) return allocation;
isolate()->counters()->maps_created()->Increment();
- result->set_map_no_write_barrier(meta_map());
+ result->set_map_after_allocation(meta_map(), SKIP_WRITE_BARRIER);
Map* map = Map::cast(result);
map->set_instance_type(instance_type);
map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
@@ -2254,7 +2258,7 @@ bool Heap::CreateInitialMaps() {
// Map::cast cannot be used due to uninitialized map field.
Map* new_meta_map = reinterpret_cast<Map*>(obj);
set_meta_map(new_meta_map);
- new_meta_map->set_map(new_meta_map);
+ new_meta_map->set_map_after_allocation(new_meta_map);
{ // Partial map allocation
#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name) \
@@ -2440,8 +2444,7 @@ bool Heap::CreateInitialMaps() {
ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
native_context_map()->set_dictionary_map(true);
- native_context_map()->set_visitor_id(
- StaticVisitorBase::kVisitNativeContext);
+ native_context_map()->set_visitor_id(kVisitNativeContext);
ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
shared_function_info)
@@ -2512,7 +2515,7 @@ AllocationResult Heap::AllocateHeapNumber(MutableMode mode,
}
Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
- HeapObject::cast(result)->set_map_no_write_barrier(map);
+ HeapObject::cast(result)->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
return result;
}
@@ -2525,7 +2528,7 @@ AllocationResult Heap::AllocateCell(Object* value) {
AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
- result->set_map_no_write_barrier(cell_map());
+ result->set_map_after_allocation(cell_map(), SKIP_WRITE_BARRIER);
Cell::cast(result)->set_value(value);
return result;
}
@@ -2538,7 +2541,8 @@ AllocationResult Heap::AllocatePropertyCell() {
AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
- result->set_map_no_write_barrier(global_property_cell_map());
+ result->set_map_after_allocation(global_property_cell_map(),
+ SKIP_WRITE_BARRIER);
PropertyCell* cell = PropertyCell::cast(result);
cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
@@ -2556,7 +2560,7 @@ AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
- result->set_map_no_write_barrier(weak_cell_map());
+ result->set_map_after_allocation(weak_cell_map(), SKIP_WRITE_BARRIER);
WeakCell::cast(result)->initialize(value);
WeakCell::cast(result)->clear_next(the_hole_value());
return result;
@@ -2570,7 +2574,8 @@ AllocationResult Heap::AllocateTransitionArray(int capacity) {
AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED);
if (!allocation.To(&raw_array)) return allocation;
}
- raw_array->set_map_no_write_barrier(transition_array_map());
+ raw_array->set_map_after_allocation(transition_array_map(),
+ SKIP_WRITE_BARRIER);
TransitionArray* array = TransitionArray::cast(raw_array);
array->set_length(capacity);
MemsetPointer(array->data_start(), undefined_value(), capacity);
@@ -2793,7 +2798,8 @@ void Heap::CreateInitialObjects() {
{
Handle<FixedArray> empty_sloppy_arguments_elements =
factory->NewFixedArray(2, TENURED);
- empty_sloppy_arguments_elements->set_map(sloppy_arguments_elements_map());
+ empty_sloppy_arguments_elements->set_map_after_allocation(
+ sloppy_arguments_elements_map(), SKIP_WRITE_BARRIER);
set_empty_sloppy_arguments_elements(*empty_sloppy_arguments_elements);
}
@@ -2926,7 +2932,6 @@ bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
case kInstanceofCacheMapRootIndex:
case kInstanceofCacheAnswerRootIndex:
case kCodeStubsRootIndex:
- case kEmptyScriptRootIndex:
case kScriptListRootIndex:
case kMaterializedObjectsRootIndex:
case kMicrotaskQueueRootIndex:
@@ -2970,6 +2975,7 @@ bool Heap::IsUnmodifiedHeapObject(Object** p) {
Object* maybe_constructor = js_object->map()->GetConstructor();
if (!maybe_constructor->IsJSFunction()) return false;
JSFunction* constructor = JSFunction::cast(maybe_constructor);
+ if (js_object->elements()->length() != 0) return false;
return constructor->initial_map() == heap_object->map();
}
@@ -3066,7 +3072,7 @@ AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
if (!allocation.To(&result)) return allocation;
}
- result->set_map_no_write_barrier(byte_array_map());
+ result->set_map_after_allocation(byte_array_map(), SKIP_WRITE_BARRIER);
ByteArray::cast(result)->set_length(length);
return result;
}
@@ -3090,7 +3096,7 @@ AllocationResult Heap::AllocateBytecodeArray(int length,
if (!allocation.To(&result)) return allocation;
}
- result->set_map_no_write_barrier(bytecode_array_map());
+ result->set_map_after_allocation(bytecode_array_map(), SKIP_WRITE_BARRIER);
BytecodeArray* instance = BytecodeArray::cast(result);
instance->set_length(length);
instance->set_frame_size(frame_size);
@@ -3111,15 +3117,18 @@ HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
if (size == 0) return nullptr;
HeapObject* filler = HeapObject::FromAddress(addr);
if (size == kPointerSize) {
- filler->set_map_no_write_barrier(
- reinterpret_cast<Map*>(root(kOnePointerFillerMapRootIndex)));
+ filler->set_map_after_allocation(
+ reinterpret_cast<Map*>(root(kOnePointerFillerMapRootIndex)),
+ SKIP_WRITE_BARRIER);
} else if (size == 2 * kPointerSize) {
- filler->set_map_no_write_barrier(
- reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)));
+ filler->set_map_after_allocation(
+ reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)),
+ SKIP_WRITE_BARRIER);
} else {
DCHECK_GT(size, 2 * kPointerSize);
- filler->set_map_no_write_barrier(
- reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)));
+ filler->set_map_after_allocation(
+ reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)),
+ SKIP_WRITE_BARRIER);
FreeSpace::cast(filler)->nobarrier_set_size(size);
}
if (mode == ClearRecordedSlots::kYes) {
@@ -3202,8 +3211,8 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
!Marking::IsBlack(ObjectMarking::MarkBitFrom(
HeapObject::FromAddress(new_start),
MarkingState::Internal(HeapObject::FromAddress(new_start))))) {
- IncrementalMarking::TransferMark(this, object,
- HeapObject::FromAddress(new_start));
+ incremental_marking()->TransferMark(this, object,
+ HeapObject::FromAddress(new_start));
}
// Technically in new space this write might be omitted (except for
@@ -3320,7 +3329,8 @@ AllocationResult Heap::AllocateFixedTypedArrayWithExternalPointer(
if (!allocation.To(&result)) return allocation;
}
- result->set_map_no_write_barrier(MapForFixedTypedArray(array_type));
+ result->set_map_after_allocation(MapForFixedTypedArray(array_type),
+ SKIP_WRITE_BARRIER);
FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(result);
elements->set_base_pointer(Smi::kZero, SKIP_WRITE_BARRIER);
elements->set_external_pointer(external_pointer, SKIP_WRITE_BARRIER);
@@ -3365,7 +3375,8 @@ AllocationResult Heap::AllocateFixedTypedArray(int length,
array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
if (!allocation.To(&object)) return allocation;
- object->set_map_no_write_barrier(MapForFixedTypedArray(array_type));
+ object->set_map_after_allocation(MapForFixedTypedArray(array_type),
+ SKIP_WRITE_BARRIER);
FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
elements->set_base_pointer(elements, SKIP_WRITE_BARRIER);
elements->set_external_pointer(
@@ -3406,7 +3417,7 @@ AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
}
}
- result->set_map_no_write_barrier(code_map());
+ result->set_map_after_allocation(code_map(), SKIP_WRITE_BARRIER);
Code* code = Code::cast(result);
DCHECK(IsAligned(bit_cast<intptr_t>(code->address()), kCodeAlignment));
DCHECK(!memory_allocator()->code_range()->valid() ||
@@ -3455,7 +3466,7 @@ AllocationResult Heap::CopyBytecodeArray(BytecodeArray* bytecode_array) {
if (!allocation.To(&result)) return allocation;
}
- result->set_map_no_write_barrier(bytecode_array_map());
+ result->set_map_after_allocation(bytecode_array_map(), SKIP_WRITE_BARRIER);
BytecodeArray* copy = BytecodeArray::cast(result);
copy->set_length(bytecode_array->length());
copy->set_frame_size(bytecode_array->frame_size());
@@ -3472,7 +3483,8 @@ AllocationResult Heap::CopyBytecodeArray(BytecodeArray* bytecode_array) {
void Heap::InitializeAllocationMemento(AllocationMemento* memento,
AllocationSite* allocation_site) {
- memento->set_map_no_write_barrier(allocation_memento_map());
+ memento->set_map_after_allocation(allocation_memento_map(),
+ SKIP_WRITE_BARRIER);
DCHECK(allocation_site->map() == allocation_site_map());
memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
if (FLAG_allocation_site_pretenuring) {
@@ -3493,7 +3505,7 @@ AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
AllocationResult allocation = AllocateRaw(size, space);
if (!allocation.To(&result)) return allocation;
// No need for write barrier since object is white and map is in old space.
- result->set_map_no_write_barrier(map);
+ result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
if (allocation_site != NULL) {
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(result) + map->instance_size());
@@ -3729,7 +3741,7 @@ AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
if (!allocation.To(&result)) return allocation;
}
- result->set_map_no_write_barrier(map);
+ result->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
// Set length and hash fields of the allocated string.
String* answer = String::cast(result);
answer->set_length(chars);
@@ -3772,7 +3784,7 @@ AllocationResult Heap::AllocateRawOneByteString(int length,
}
// Partially initialize the object.
- result->set_map_no_write_barrier(one_byte_string_map());
+ result->set_map_after_allocation(one_byte_string_map(), SKIP_WRITE_BARRIER);
String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField);
DCHECK_EQ(size, HeapObject::cast(result)->Size());
@@ -3796,7 +3808,7 @@ AllocationResult Heap::AllocateRawTwoByteString(int length,
}
// Partially initialize the object.
- result->set_map_no_write_barrier(string_map());
+ result->set_map_after_allocation(string_map(), SKIP_WRITE_BARRIER);
String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField);
DCHECK_EQ(size, HeapObject::cast(result)->Size());
@@ -3812,7 +3824,7 @@ AllocationResult Heap::AllocateEmptyFixedArray() {
if (!allocation.To(&result)) return allocation;
}
// Initialize the object.
- result->set_map_no_write_barrier(fixed_array_map());
+ result->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
FixedArray::cast(result)->set_length(0);
return result;
}
@@ -3825,7 +3837,7 @@ AllocationResult Heap::AllocateEmptyScopeInfo() {
if (!allocation.To(&result)) return allocation;
}
// Initialize the object.
- result->set_map_no_write_barrier(scope_info_map());
+ result->set_map_after_allocation(scope_info_map(), SKIP_WRITE_BARRIER);
FixedArray::cast(result)->set_length(0);
return result;
}
@@ -3841,7 +3853,7 @@ AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
if (!allocation.To(&obj)) return allocation;
}
- obj->set_map_no_write_barrier(fixed_array_map());
+ obj->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
FixedArray* result = FixedArray::cast(obj);
result->set_length(len);
@@ -3853,7 +3865,8 @@ AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
// TODO(mvstanton): The map is set twice because of protection against calling
// set() on a COW FixedArray. Issue v8:3221 created to track this, and
// we might then be able to remove this whole method.
- HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
+ HeapObject::cast(obj)->set_map_after_allocation(fixed_cow_array_map(),
+ SKIP_WRITE_BARRIER);
return result;
}
@@ -3875,7 +3888,7 @@ AllocationResult Heap::CopyFixedArrayAndGrow(FixedArray* src, int grow_by,
if (!allocation.To(&obj)) return allocation;
}
- obj->set_map_no_write_barrier(fixed_array_map());
+ obj->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
FixedArray* result = FixedArray::cast(obj);
result->set_length(new_len);
@@ -3898,7 +3911,7 @@ AllocationResult Heap::CopyFixedArrayUpTo(FixedArray* src, int new_len,
AllocationResult allocation = AllocateRawFixedArray(new_len, pretenure);
if (!allocation.To(&obj)) return allocation;
}
- obj->set_map_no_write_barrier(fixed_array_map());
+ obj->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
FixedArray* result = FixedArray::cast(obj);
result->set_length(new_len);
@@ -3917,7 +3930,7 @@ AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
if (!allocation.To(&obj)) return allocation;
}
- obj->set_map_no_write_barrier(map);
+ obj->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
FixedArray* result = FixedArray::cast(obj);
DisallowHeapAllocation no_gc;
@@ -3945,7 +3958,7 @@ AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
if (!allocation.To(&obj)) return allocation;
}
- obj->set_map_no_write_barrier(map);
+ obj->set_map_after_allocation(map, SKIP_WRITE_BARRIER);
CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
src->address() + FixedDoubleArray::kLengthOffset,
FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
@@ -3986,7 +3999,7 @@ AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
if (!allocation.To(&result)) return allocation;
}
- result->set_map_no_write_barrier(fixed_array_map());
+ result->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
MemsetPointer(array->data_start(), filler, length);
@@ -4008,7 +4021,7 @@ AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
if (!allocation.To(&obj)) return allocation;
}
- obj->set_map_no_write_barrier(fixed_array_map());
+ obj->set_map_after_allocation(fixed_array_map(), SKIP_WRITE_BARRIER);
FixedArray::cast(obj)->set_length(length);
return obj;
}
@@ -4022,7 +4035,8 @@ AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
if (!allocation.To(&elements)) return allocation;
- elements->set_map_no_write_barrier(fixed_double_array_map());
+ elements->set_map_after_allocation(fixed_double_array_map(),
+ SKIP_WRITE_BARRIER);
FixedDoubleArray::cast(elements)->set_length(length);
return elements;
}
@@ -4054,7 +4068,7 @@ AllocationResult Heap::AllocateSymbol() {
AllocationResult allocation = AllocateRaw(Symbol::kSize, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
- result->set_map_no_write_barrier(symbol_map());
+ result->set_map_after_allocation(symbol_map(), SKIP_WRITE_BARRIER);
// Generate a random hash value.
int hash = isolate()->GenerateIdentityHash(Name::kHashBitMask);
@@ -4227,36 +4241,10 @@ void Heap::FinalizeIncrementalMarkingIfComplete(
(mark_compact_collector()->marking_deque()->IsEmpty() &&
local_embedder_heap_tracer()
->ShouldFinalizeIncrementalMarking())) {
- CollectAllGarbage(current_gc_flags_, gc_reason);
+ CollectAllGarbage(current_gc_flags_, gc_reason, current_gc_callback_flags_);
}
}
-bool Heap::TryFinalizeIdleIncrementalMarking(
- double idle_time_in_ms, GarbageCollectionReason gc_reason) {
- size_t size_of_objects = static_cast<size_t>(SizeOfObjects());
- double final_incremental_mark_compact_speed_in_bytes_per_ms =
- tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond();
- if (incremental_marking()->IsReadyToOverApproximateWeakClosure() ||
- (!incremental_marking()->finalize_marking_completed() &&
- mark_compact_collector()->marking_deque()->IsEmpty() &&
- local_embedder_heap_tracer()->ShouldFinalizeIncrementalMarking() &&
- gc_idle_time_handler_->ShouldDoOverApproximateWeakClosure(
- idle_time_in_ms))) {
- FinalizeIncrementalMarking(gc_reason);
- return true;
- } else if (incremental_marking()->IsComplete() ||
- (mark_compact_collector()->marking_deque()->IsEmpty() &&
- local_embedder_heap_tracer()
- ->ShouldFinalizeIncrementalMarking() &&
- gc_idle_time_handler_->ShouldDoFinalIncrementalMarkCompact(
- idle_time_in_ms, size_of_objects,
- final_incremental_mark_compact_speed_in_bytes_per_ms))) {
- CollectAllGarbage(current_gc_flags_, gc_reason);
- return true;
- }
- return false;
-}
-
void Heap::RegisterDeserializedObjectsForBlackAllocation(
Reservation* reservations, List<HeapObject*>* large_objects) {
// TODO(hpayer): We do not have to iterate reservations on black objects
@@ -4275,8 +4263,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
HeapObject* obj = HeapObject::FromAddress(addr);
// There might be grey objects due to black to grey transitions in
// incremental marking. E.g. see VisitNativeContextIncremental.
- DCHECK(
- ObjectMarking::IsBlackOrGrey(obj, MarkingState::Internal(obj)));
+ DCHECK(ObjectMarking::IsBlackOrGrey(obj, MarkingState::Internal(obj)));
if (ObjectMarking::IsBlack(obj, MarkingState::Internal(obj))) {
incremental_marking()->IterateBlackObject(obj);
}
@@ -4284,6 +4271,10 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
}
}
}
+ // We potentially deserialized wrappers which require registering with the
+ // embedder as the marker will not find them.
+ local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
+
// Large object space doesn't use reservations, so it needs custom handling.
for (HeapObject* object : *large_objects) {
incremental_marking()->IterateBlackObject(object);
@@ -4293,7 +4284,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
void Heap::NotifyObjectLayoutChange(HeapObject* object,
const DisallowHeapAllocation&) {
if (FLAG_incremental_marking && incremental_marking()->IsMarking()) {
- incremental_marking()->MarkGrey(this, object);
+ incremental_marking()->MarkBlackAndPush(object);
}
#ifdef VERIFY_HEAP
DCHECK(pending_layout_change_object_ == nullptr);
@@ -4302,10 +4293,47 @@ void Heap::NotifyObjectLayoutChange(HeapObject* object,
}
#ifdef VERIFY_HEAP
+// Helper class for collecting slot addresses.
+class SlotCollectingVisitor final : public ObjectVisitor {
+ public:
+ void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ for (Object** p = start; p < end; p++) {
+ slots_.push_back(p);
+ }
+ }
+
+ int number_of_slots() { return static_cast<int>(slots_.size()); }
+
+ Object** slot(int i) { return slots_[i]; }
+
+ private:
+ std::vector<Object**> slots_;
+};
+
void Heap::VerifyObjectLayoutChange(HeapObject* object, Map* new_map) {
+ // Check that Heap::NotifyObjectLayout was called for object transitions
+ // that are not safe for concurrent marking.
+ // If you see this check triggering for a freshly allocated object,
+ // use object->set_map_after_allocation() to initialize its map.
if (pending_layout_change_object_ == nullptr) {
- DCHECK(!object->IsJSObject() ||
- !object->map()->TransitionRequiresSynchronizationWithGC(new_map));
+ if (object->IsJSObject()) {
+ DCHECK(!object->map()->TransitionRequiresSynchronizationWithGC(new_map));
+ } else {
+ // Check that the set of slots before and after the transition match.
+ SlotCollectingVisitor old_visitor;
+ object->IterateFast(&old_visitor);
+ MapWord old_map_word = object->map_word();
+ // Temporarily set the new map to iterate new slots.
+ object->set_map_word(MapWord::FromMap(new_map));
+ SlotCollectingVisitor new_visitor;
+ object->IterateFast(&new_visitor);
+ // Restore the old map.
+ object->set_map_word(old_map_word);
+ DCHECK_EQ(new_visitor.number_of_slots(), old_visitor.number_of_slots());
+ for (int i = 0; i < new_visitor.number_of_slots(); i++) {
+ DCHECK_EQ(new_visitor.slot(i), old_visitor.slot(i));
+ }
+ }
} else {
DCHECK_EQ(pending_layout_change_object_, object);
pending_layout_change_object_ = nullptr;
@@ -4338,8 +4366,7 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_COMPLETION, StepOrigin::kTask);
if (remaining_idle_time_in_ms > 0.0) {
- TryFinalizeIdleIncrementalMarking(
- remaining_idle_time_in_ms,
+ FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason::kFinalizeMarkingViaTask);
}
result = incremental_marking()->IsStopped();
@@ -4772,7 +4799,6 @@ bool Heap::RootIsImmortalImmovable(int root_index) {
}
}
-
#ifdef VERIFY_HEAP
void Heap::Verify() {
CHECK(HasBeenSetUp());
@@ -4802,6 +4828,137 @@ void Heap::Verify() {
mark_compact_collector()->VerifyOmittedMapChecks();
}
}
+
+class SlotVerifyingVisitor : public ObjectVisitor {
+ public:
+ SlotVerifyingVisitor(std::set<Address>* untyped,
+ std::set<std::pair<SlotType, Address> >* typed)
+ : untyped_(untyped), typed_(typed) {}
+
+ virtual bool ShouldHaveBeenRecorded(HeapObject* host, Object* target) = 0;
+
+ void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ for (Object** slot = start; slot < end; slot++) {
+ if (ShouldHaveBeenRecorded(host, *slot)) {
+ CHECK_GT(untyped_->count(reinterpret_cast<Address>(slot)), 0);
+ }
+ }
+ }
+
+ void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
+ Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ if (ShouldHaveBeenRecorded(host, target)) {
+ CHECK(
+ InTypedSet(CODE_TARGET_SLOT, rinfo->pc()) ||
+ (rinfo->IsInConstantPool() &&
+ InTypedSet(CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address())));
+ }
+ }
+
+ void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) override {
+ Object* target = rinfo->code_age_stub();
+ if (ShouldHaveBeenRecorded(host, target)) {
+ CHECK(
+ InTypedSet(CODE_TARGET_SLOT, rinfo->pc()) ||
+ (rinfo->IsInConstantPool() &&
+ InTypedSet(CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address())));
+ }
+ }
+
+ void VisitCodeEntry(JSFunction* host, Address entry_address) override {
+ Object* target = Code::GetObjectFromEntryAddress(entry_address);
+ if (ShouldHaveBeenRecorded(host, target)) {
+ CHECK(InTypedSet(CODE_ENTRY_SLOT, entry_address));
+ }
+ }
+
+ void VisitCellPointer(Code* host, RelocInfo* rinfo) override {
+ Object* target = rinfo->target_cell();
+ if (ShouldHaveBeenRecorded(host, target)) {
+ CHECK(InTypedSet(CELL_TARGET_SLOT, rinfo->pc()) ||
+ (rinfo->IsInConstantPool() &&
+ InTypedSet(OBJECT_SLOT, rinfo->constant_pool_entry_address())));
+ }
+ }
+
+ void VisitDebugTarget(Code* host, RelocInfo* rinfo) override {
+ Object* target =
+ Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
+ if (ShouldHaveBeenRecorded(host, target)) {
+ CHECK(
+ InTypedSet(DEBUG_TARGET_SLOT, rinfo->pc()) ||
+ (rinfo->IsInConstantPool() &&
+ InTypedSet(CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address())));
+ }
+ }
+
+ void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
+ Object* target = rinfo->target_object();
+ if (ShouldHaveBeenRecorded(host, target)) {
+ CHECK(InTypedSet(EMBEDDED_OBJECT_SLOT, rinfo->pc()) ||
+ (rinfo->IsInConstantPool() &&
+ InTypedSet(OBJECT_SLOT, rinfo->constant_pool_entry_address())));
+ }
+ }
+
+ private:
+ bool InTypedSet(SlotType type, Address slot) {
+ return typed_->count(std::make_pair(type, slot)) > 0;
+ }
+ std::set<Address>* untyped_;
+ std::set<std::pair<SlotType, Address> >* typed_;
+};
+
+class OldToNewSlotVerifyingVisitor : public SlotVerifyingVisitor {
+ public:
+ OldToNewSlotVerifyingVisitor(Heap* heap, std::set<Address>* untyped,
+ std::set<std::pair<SlotType, Address> >* typed)
+ : SlotVerifyingVisitor(untyped, typed), heap_(heap) {}
+
+ bool ShouldHaveBeenRecorded(HeapObject* host, Object* target) override {
+ return target->IsHeapObject() && heap_->InNewSpace(target) &&
+ !heap_->InNewSpace(host);
+ }
+
+ private:
+ Heap* heap_;
+};
+
+template <RememberedSetType direction>
+void CollectSlots(MemoryChunk* chunk, Address start, Address end,
+ std::set<Address>* untyped,
+ std::set<std::pair<SlotType, Address> >* typed) {
+ RememberedSet<direction>::Iterate(chunk, [start, end, untyped](Address slot) {
+ if (start <= slot && slot < end) {
+ untyped->insert(slot);
+ }
+ return KEEP_SLOT;
+ });
+ RememberedSet<direction>::IterateTyped(
+ chunk, [start, end, typed](SlotType type, Address host, Address slot) {
+ if (start <= slot && slot < end) {
+ typed->insert(std::make_pair(type, slot));
+ }
+ return KEEP_SLOT;
+ });
+}
+
+void Heap::VerifyRememberedSetFor(HeapObject* object) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ base::LockGuard<base::RecursiveMutex> lock_guard(chunk->mutex());
+ Address start = object->address();
+ Address end = start + object->Size();
+ std::set<Address> old_to_new;
+ std::set<std::pair<SlotType, Address> > typed_old_to_new;
+ if (!InNewSpace(object)) {
+ store_buffer()->MoveAllEntriesToRememberedSet();
+ CollectSlots<OLD_TO_NEW>(chunk, start, end, &old_to_new, &typed_old_to_new);
+ OldToNewSlotVerifyingVisitor visitor(this, &old_to_new, &typed_old_to_new);
+ object->IterateBody(&visitor);
+ }
+ // TODO(ulan): Add old to old slot set verification once all weak objects
+ // have their own instance types and slots are recorded for all weal fields.
+}
#endif
@@ -4818,11 +4975,11 @@ void Heap::ZapFromSpace() {
class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
public:
- IterateAndScavengePromotedObjectsVisitor(Heap* heap, HeapObject* target,
- bool record_slots)
- : heap_(heap), target_(target), record_slots_(record_slots) {}
+ IterateAndScavengePromotedObjectsVisitor(Heap* heap, bool record_slots)
+ : heap_(heap), record_slots_(record_slots) {}
- inline void VisitPointers(Object** start, Object** end) override {
+ inline void VisitPointers(HeapObject* host, Object** start,
+ Object** end) override {
Address slot_address = reinterpret_cast<Address>(start);
Page* page = Page::FromAddress(slot_address);
@@ -4845,7 +5002,7 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
} else if (record_slots_ &&
MarkCompactCollector::IsOnEvacuationCandidate(
HeapObject::cast(target))) {
- heap_->mark_compact_collector()->RecordSlot(target_, slot, target);
+ heap_->mark_compact_collector()->RecordSlot(host, slot, target);
}
}
@@ -4853,23 +5010,22 @@ class IterateAndScavengePromotedObjectsVisitor final : public ObjectVisitor {
}
}
- inline void VisitCodeEntry(Address code_entry_slot) override {
+ inline void VisitCodeEntry(JSFunction* host,
+ Address code_entry_slot) override {
// Black allocation requires us to process objects referenced by
// promoted objects.
if (heap_->incremental_marking()->black_allocation()) {
Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
- IncrementalMarking::MarkGrey(heap_, code);
+ heap_->incremental_marking()->WhiteToGreyAndPush(code);
}
}
private:
Heap* heap_;
- HeapObject* target_;
bool record_slots_;
};
-void Heap::IterateAndScavengePromotedObject(HeapObject* target, int size,
- bool was_marked_black) {
+void Heap::IterateAndScavengePromotedObject(HeapObject* target, int size) {
// We are not collecting slots on new space objects during mutation
// thus we have to scan for pointers to evacuation candidates when we
// promote objects. But we should not record any slots in non-black
@@ -4882,7 +5038,7 @@ void Heap::IterateAndScavengePromotedObject(HeapObject* target, int size,
ObjectMarking::IsBlack(target, MarkingState::Internal(target));
}
- IterateAndScavengePromotedObjectsVisitor visitor(this, target, record_slots);
+ IterateAndScavengePromotedObjectsVisitor visitor(this, record_slots);
if (target->IsJSFunction()) {
// JSFunctions reachable through kNextFunctionLinkOffset are weak. Slots for
// this links are recorded during processing of weak lists.
@@ -4890,29 +5046,16 @@ void Heap::IterateAndScavengePromotedObject(HeapObject* target, int size,
} else {
target->IterateBody(target->map()->instance_type(), size, &visitor);
}
-
- // When black allocations is on, we have to visit not already marked black
- // objects (in new space) promoted to black pages to keep their references
- // alive.
- // TODO(hpayer): Implement a special promotion visitor that incorporates
- // regular visiting and IteratePromotedObjectPointers.
- if (!was_marked_black) {
- if (incremental_marking()->black_allocation()) {
- IncrementalMarking::MarkGrey(this, target->map());
- incremental_marking()->IterateBlackObject(target);
- }
- }
}
-
-void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
+void Heap::IterateRoots(RootVisitor* v, VisitMode mode) {
IterateStrongRoots(v, mode);
IterateWeakRoots(v, mode);
}
-
-void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
- v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
+void Heap::IterateWeakRoots(RootVisitor* v, VisitMode mode) {
+ v->VisitRootPointer(Root::kStringTable, reinterpret_cast<Object**>(
+ &roots_[kStringTableRootIndex]));
v->Synchronize(VisitorSynchronization::kStringTable);
if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
// Scavenge collections have special processing for this.
@@ -4921,27 +5064,32 @@ void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
v->Synchronize(VisitorSynchronization::kExternalStringsTable);
}
-
-void Heap::IterateSmiRoots(ObjectVisitor* v) {
+void Heap::IterateSmiRoots(RootVisitor* v) {
// Acquire execution access since we are going to read stack limit values.
ExecutionAccess access(isolate());
- v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
+ v->VisitRootPointers(Root::kSmiRootList, &roots_[kSmiRootsStart],
+ &roots_[kRootListLength]);
v->Synchronize(VisitorSynchronization::kSmiRootList);
}
+void Heap::IterateEncounteredWeakCollections(RootVisitor* visitor) {
+ visitor->VisitRootPointer(Root::kWeakCollections,
+ &encountered_weak_collections_);
+}
+
// We cannot avoid stale handles to left-trimmed objects, but can only make
// sure all handles still needed are updated. Filter out a stale pointer
// and clear the slot to allow post processing of handles (needed because
// the sweeper might actually free the underlying page).
-class FixStaleLeftTrimmedHandlesVisitor : public ObjectVisitor {
+class FixStaleLeftTrimmedHandlesVisitor : public RootVisitor {
public:
explicit FixStaleLeftTrimmedHandlesVisitor(Heap* heap) : heap_(heap) {
USE(heap_);
}
- void VisitPointer(Object** p) override { FixHandle(p); }
+ void VisitRootPointer(Root root, Object** p) override { FixHandle(p); }
- void VisitPointers(Object** start, Object** end) override {
+ void VisitRootPointers(Root root, Object** start, Object** end) override {
for (Object** p = start; p < end; p++) FixHandle(p);
}
@@ -4973,8 +5121,9 @@ class FixStaleLeftTrimmedHandlesVisitor : public ObjectVisitor {
Heap* heap_;
};
-void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
- v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
+void Heap::IterateStrongRoots(RootVisitor* v, VisitMode mode) {
+ v->VisitRootPointers(Root::kStrongRootList, &roots_[0],
+ &roots_[kStrongRootListLength]);
v->Synchronize(VisitorSynchronization::kStrongRootList);
// The serializer/deserializer iterates the root list twice, first to pick
// off immortal immovable roots to make sure they end up on the first page,
@@ -5003,7 +5152,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// Iterate over the builtin code objects and code stubs in the
// heap. Note that it is not necessary to iterate over code objects
// on scavenge collections.
- if (mode != VISIT_ALL_IN_SCAVENGE) {
+ if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_MINOR_MC_UPDATE) {
isolate_->builtins()->IterateBuiltins(v);
v->Synchronize(VisitorSynchronization::kBuiltins);
isolate_->interpreter()->IterateDispatchTable(v);
@@ -5023,6 +5172,9 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
case VISIT_ALL_IN_SCAVENGE:
isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
break;
+ case VISIT_ALL_IN_MINOR_MC_UPDATE:
+ isolate_->global_handles()->IterateAllNewSpaceRoots(v);
+ break;
case VISIT_ALL_IN_SWEEP_NEWSPACE:
case VISIT_ALL:
isolate_->global_handles()->IterateAllRoots(v);
@@ -5031,7 +5183,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->Synchronize(VisitorSynchronization::kGlobalHandles);
// Iterate over eternal handles.
- if (mode == VISIT_ALL_IN_SCAVENGE) {
+ if (mode == VISIT_ALL_IN_SCAVENGE || mode == VISIT_ALL_IN_MINOR_MC_UPDATE) {
isolate_->eternal_handles()->IterateNewSpaceRoots(v);
} else {
isolate_->eternal_handles()->IterateAllRoots(v);
@@ -5044,7 +5196,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// Iterate over other strong roots (currently only identity maps).
for (StrongRootsList* list = strong_roots_list_; list; list = list->next) {
- v->VisitPointers(list->start, list->end);
+ v->VisitRootPointers(Root::kStrongRoots, list->start, list->end);
}
v->Synchronize(VisitorSynchronization::kStrongRoots);
@@ -5063,7 +5215,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
// and through the API, we should gracefully handle the case that the heap
// size is not big enough to fit all the initial objects.
bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
- size_t max_executable_size, size_t code_range_size) {
+ size_t code_range_size) {
if (HasBeenSetUp()) return false;
// Overwrite default configuration.
@@ -5073,9 +5225,6 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
if (max_old_space_size != 0) {
max_old_generation_size_ = max_old_space_size * MB;
}
- if (max_executable_size != 0) {
- max_executable_size_ = max_executable_size * MB;
- }
// If max space size flags are specified overwrite the configuration.
if (FLAG_max_semi_space_size > 0) {
@@ -5085,15 +5234,11 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
max_old_generation_size_ =
static_cast<size_t>(FLAG_max_old_space_size) * MB;
}
- if (FLAG_max_executable_size > 0) {
- max_executable_size_ = static_cast<size_t>(FLAG_max_executable_size) * MB;
- }
if (Page::kPageSize > MB) {
max_semi_space_size_ = ROUND_UP(max_semi_space_size_, Page::kPageSize);
max_old_generation_size_ =
ROUND_UP(max_old_generation_size_, Page::kPageSize);
- max_executable_size_ = ROUND_UP(max_executable_size_, Page::kPageSize);
}
if (FLAG_stress_compaction) {
@@ -5135,12 +5280,6 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
Max(static_cast<size_t>(paged_space_count * Page::kPageSize),
max_old_generation_size_);
- // The max executable size must be less than or equal to the max old
- // generation size.
- if (max_executable_size_ > max_old_generation_size_) {
- max_executable_size_ = max_old_generation_size_;
- }
-
if (FLAG_initial_old_space_size > 0) {
initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
} else {
@@ -5185,9 +5324,7 @@ void Heap::GetFromRingBuffer(char* buffer) {
memcpy(buffer + copied, trace_ring_buffer_, ring_buffer_end_);
}
-
-bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
-
+bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0); }
void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
*stats->start_marker = HeapStats::kStartMarker;
@@ -5508,16 +5645,12 @@ bool Heap::SetUp() {
// Set up memory allocator.
memory_allocator_ = new MemoryAllocator(isolate_);
- if (!memory_allocator_->SetUp(MaxReserved(), MaxExecutableSize(),
- code_range_size_))
- return false;
+ if (!memory_allocator_->SetUp(MaxReserved(), code_range_size_)) return false;
store_buffer_ = new StoreBuffer(this);
incremental_marking_ = new IncrementalMarking(this);
- concurrent_marking_ = new ConcurrentMarking(this);
-
for (int i = 0; i <= LAST_SPACE; i++) {
space_[i] = nullptr;
}
@@ -5546,7 +5679,14 @@ bool Heap::SetUp() {
// Set up the seed that is used to randomize the string hash function.
DCHECK(hash_seed() == 0);
- if (FLAG_randomize_hashes) InitializeHashSeed();
+ if (FLAG_randomize_hashes) {
+ if (FLAG_hash_seed == 0) {
+ int rnd = isolate()->random_number_generator()->NextInt();
+ set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
+ } else {
+ set_hash_seed(Smi::FromInt(FLAG_hash_seed));
+ }
+ }
for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount);
i++) {
@@ -5556,8 +5696,15 @@ bool Heap::SetUp() {
tracer_ = new GCTracer(this);
scavenge_collector_ = new Scavenger(this);
mark_compact_collector_ = new MarkCompactCollector(this);
- if (FLAG_minor_mc)
- minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
+ incremental_marking_->set_marking_deque(
+ mark_compact_collector_->marking_deque());
+#ifdef V8_CONCURRENT_MARKING
+ concurrent_marking_ =
+ new ConcurrentMarking(this, mark_compact_collector_->marking_deque());
+#else
+ concurrent_marking_ = new ConcurrentMarking(this, nullptr);
+#endif
+ minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
gc_idle_time_handler_ = new GCIdleTimeHandler();
memory_reducer_ = new MemoryReducer(this);
if (V8_UNLIKELY(FLAG_gc_stats)) {
@@ -5584,14 +5731,6 @@ bool Heap::SetUp() {
return true;
}
-void Heap::InitializeHashSeed() {
- if (FLAG_hash_seed == 0) {
- int rnd = isolate()->random_number_generator()->NextInt();
- set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
- } else {
- set_hash_seed(Smi::FromInt(FLAG_hash_seed));
- }
-}
bool Heap::CreateHeapObjects() {
// Create initial maps.
@@ -5675,7 +5814,7 @@ void Heap::RegisterExternallyReferencedObject(Object** object) {
HeapObject* heap_object = HeapObject::cast(*object);
DCHECK(Contains(heap_object));
if (FLAG_incremental_marking_wrappers && incremental_marking()->IsMarking()) {
- IncrementalMarking::MarkGrey(this, heap_object);
+ incremental_marking()->WhiteToGreyAndPush(heap_object);
} else {
DCHECK(mark_compact_collector()->in_use());
mark_compact_collector()->MarkObject(heap_object);
@@ -5943,9 +6082,9 @@ void Heap::FatalProcessOutOfMemory(const char* location, bool is_heap_oom) {
#ifdef DEBUG
-class PrintHandleVisitor : public ObjectVisitor {
+class PrintHandleVisitor : public RootVisitor {
public:
- void VisitPointers(Object** start, Object** end) override {
+ void VisitRootPointers(Root root, Object** start, Object** end) override {
for (Object** p = start; p < end; p++)
PrintF(" handle %p to %p\n", reinterpret_cast<void*>(p),
reinterpret_cast<void*>(*p));
@@ -5961,13 +6100,13 @@ void Heap::PrintHandles() {
#endif
-class CheckHandleCountVisitor : public ObjectVisitor {
+class CheckHandleCountVisitor : public RootVisitor {
public:
CheckHandleCountVisitor() : handle_count_(0) {}
~CheckHandleCountVisitor() override {
CHECK(handle_count_ < HandleScope::kCheckHandleThreshold);
}
- void VisitPointers(Object** start, Object** end) override {
+ void VisitRootPointers(Root root, Object** start, Object** end) override {
handle_count_ += end - start;
}
@@ -6123,11 +6262,28 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
}
private:
- class MarkingVisitor : public ObjectVisitor {
+ class MarkingVisitor : public ObjectVisitor, public RootVisitor {
public:
MarkingVisitor() : marking_stack_(10) {}
- void VisitPointers(Object** start, Object** end) override {
+ void VisitPointers(HeapObject* host, Object** start,
+ Object** end) override {
+ MarkPointers(start, end);
+ }
+
+ void VisitRootPointers(Root root, Object** start, Object** end) override {
+ MarkPointers(start, end);
+ }
+
+ void TransitiveClosure() {
+ while (!marking_stack_.is_empty()) {
+ HeapObject* obj = marking_stack_.RemoveLast();
+ obj->Iterate(this);
+ }
+ }
+
+ private:
+ void MarkPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
if (!(*p)->IsHeapObject()) continue;
HeapObject* obj = HeapObject::cast(*p);
@@ -6141,15 +6297,6 @@ class UnreachableObjectsFilter : public HeapObjectsFilter {
}
}
}
-
- void TransitiveClosure() {
- while (!marking_stack_.is_empty()) {
- HeapObject* obj = marking_stack_.RemoveLast();
- obj->Iterate(this);
- }
- }
-
- private:
List<HeapObject*> marking_stack_;
};
@@ -6415,5 +6562,23 @@ int Heap::GetStaticVisitorIdForMap(Map* map) {
return StaticVisitorBase::GetVisitorId(map);
}
+const char* AllocationSpaceName(AllocationSpace space) {
+ switch (space) {
+ case NEW_SPACE:
+ return "NEW_SPACE";
+ case OLD_SPACE:
+ return "OLD_SPACE";
+ case CODE_SPACE:
+ return "CODE_SPACE";
+ case MAP_SPACE:
+ return "MAP_SPACE";
+ case LO_SPACE:
+ return "LO_SPACE";
+ default:
+ UNREACHABLE();
+ }
+ return NULL;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index fe7b9341c0..80bc68c172 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -6,7 +6,7 @@
#define V8_HEAP_HEAP_H_
#include <cmath>
-#include <map>
+#include <vector>
// Clients of this interface shouldn't depend on lots of heap internals.
// Do not include anything from src/heap here!
@@ -21,6 +21,7 @@
#include "src/objects.h"
#include "src/objects/hash-table.h"
#include "src/objects/string-table.h"
+#include "src/visitors.h"
namespace v8 {
namespace internal {
@@ -136,6 +137,17 @@ using v8::MemoryPressureLevel;
V(Map, fixed_float32_array_map, FixedFloat32ArrayMap) \
V(Map, fixed_float64_array_map, FixedFloat64ArrayMap) \
V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap) \
+ /* Oddball maps */ \
+ V(Map, undefined_map, UndefinedMap) \
+ V(Map, the_hole_map, TheHoleMap) \
+ V(Map, null_map, NullMap) \
+ V(Map, boolean_map, BooleanMap) \
+ V(Map, uninitialized_map, UninitializedMap) \
+ V(Map, arguments_marker_map, ArgumentsMarkerMap) \
+ V(Map, exception_map, ExceptionMap) \
+ V(Map, termination_exception_map, TerminationExceptionMap) \
+ V(Map, optimized_out_map, OptimizedOutMap) \
+ V(Map, stale_register_map, StaleRegisterMap) \
/* Canonical empty values */ \
V(ByteArray, empty_byte_array, EmptyByteArray) \
V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array) \
@@ -155,6 +167,7 @@ using v8::MemoryPressureLevel;
EmptySlowElementDictionary) \
V(PropertyCell, empty_property_cell, EmptyPropertyCell) \
V(WeakCell, empty_weak_cell, EmptyWeakCell) \
+ V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo) \
/* Protectors */ \
V(PropertyCell, array_protector, ArrayProtector) \
V(Cell, is_concat_spreadable_protector, IsConcatSpreadableProtector) \
@@ -201,25 +214,13 @@ using v8::MemoryPressureLevel;
V(Object, noscript_shared_function_infos, NoScriptSharedFunctionInfos) \
V(FixedArray, serialized_templates, SerializedTemplates) \
V(FixedArray, serialized_global_proxy_sizes, SerializedGlobalProxySizes) \
- /* Configured values */ \
V(TemplateList, message_listeners, MessageListeners) \
- V(InterceptorInfo, noop_interceptor_info, NoOpInterceptorInfo) \
- V(Code, js_entry_code, JsEntryCode) \
- V(Code, js_construct_entry_code, JsConstructEntryCode) \
- /* Oddball maps */ \
- V(Map, undefined_map, UndefinedMap) \
- V(Map, the_hole_map, TheHoleMap) \
- V(Map, null_map, NullMap) \
- V(Map, boolean_map, BooleanMap) \
- V(Map, uninitialized_map, UninitializedMap) \
- V(Map, arguments_marker_map, ArgumentsMarkerMap) \
- V(Map, exception_map, ExceptionMap) \
- V(Map, termination_exception_map, TerminationExceptionMap) \
- V(Map, optimized_out_map, OptimizedOutMap) \
- V(Map, stale_register_map, StaleRegisterMap) \
/* per-Isolate map for JSPromiseCapability. */ \
/* TODO(caitp): Make this a Struct */ \
- V(Map, js_promise_capability_map, JSPromiseCapabilityMap)
+ V(Map, js_promise_capability_map, JSPromiseCapabilityMap) \
+ /* JS Entries */ \
+ V(Code, js_entry_code, JsEntryCode) \
+ V(Code, js_construct_entry_code, JsConstructEntryCode)
// Entries in this list are limited to Smis and are not visited during GC.
#define SMI_ROOT_LIST(V) \
@@ -248,64 +249,93 @@ using v8::MemoryPressureLevel;
// Heap roots that are known to be immortal immovable, for which we can safely
// skip write barriers. This list is not complete and has omissions.
#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
+ V(ArgumentsMarker) \
+ V(ArgumentsMarkerMap) \
+ V(ArrayBufferNeuteringProtector) \
+ V(ArrayIteratorProtector) \
+ V(ArrayProtector) \
+ V(BlockContextMap) \
+ V(BooleanMap) \
V(ByteArrayMap) \
V(BytecodeArrayMap) \
- V(FreeSpaceMap) \
- V(OnePointerFillerMap) \
- V(TwoPointerFillerMap) \
- V(UndefinedValue) \
- V(TheHoleValue) \
- V(NullValue) \
- V(TrueValue) \
- V(FalseValue) \
- V(UninitializedValue) \
+ V(CatchContextMap) \
V(CellMap) \
- V(GlobalPropertyCellMap) \
- V(SharedFunctionInfoMap) \
- V(MetaMap) \
- V(HeapNumberMap) \
- V(MutableHeapNumberMap) \
- V(NativeContextMap) \
- V(FixedArrayMap) \
V(CodeMap) \
- V(ScopeInfoMap) \
- V(ModuleInfoMap) \
- V(FixedCOWArrayMap) \
- V(FixedDoubleArrayMap) \
- V(WeakCellMap) \
- V(TransitionArrayMap) \
- V(HashTableMap) \
- V(OrderedHashTableMap) \
- V(EmptyFixedArray) \
V(EmptyByteArray) \
V(EmptyDescriptorArray) \
- V(ArgumentsMarker) \
- V(SymbolMap) \
- V(SloppyArgumentsElementsMap) \
+ V(EmptyFixedArray) \
+ V(EmptyFixedFloat32Array) \
+ V(EmptyFixedFloat64Array) \
+ V(EmptyFixedInt16Array) \
+ V(EmptyFixedInt32Array) \
+ V(EmptyFixedInt8Array) \
+ V(EmptyFixedUint16Array) \
+ V(EmptyFixedUint32Array) \
+ V(EmptyFixedUint8Array) \
+ V(EmptyFixedUint8ClampedArray) \
+ V(EmptyPropertyCell) \
+ V(EmptyScopeInfo) \
+ V(EmptyScript) \
+ V(EmptySloppyArgumentsElements) \
+ V(EmptySlowElementDictionary) \
+ V(empty_string) \
+ V(EmptyWeakCell) \
+ V(EvalContextMap) \
+ V(Exception) \
+ V(FalseValue) \
+ V(FastArrayIterationProtector) \
+ V(FixedArrayMap) \
+ V(FixedCOWArrayMap) \
+ V(FixedDoubleArrayMap) \
+ V(ForeignMap) \
+ V(FreeSpaceMap) \
V(FunctionContextMap) \
- V(CatchContextMap) \
- V(WithContextMap) \
- V(BlockContextMap) \
+ V(GlobalPropertyCellMap) \
+ V(HashTableMap) \
+ V(HeapNumberMap) \
+ V(HoleNanValue) \
+ V(InfinityValue) \
+ V(IsConcatSpreadableProtector) \
+ V(JsConstructEntryCode) \
+ V(JsEntryCode) \
+ V(JSMessageObjectMap) \
+ V(ManyClosuresCellMap) \
+ V(MetaMap) \
+ V(MinusInfinityValue) \
+ V(MinusZeroValue) \
V(ModuleContextMap) \
- V(EvalContextMap) \
+ V(ModuleInfoMap) \
+ V(MutableHeapNumberMap) \
+ V(NanValue) \
+ V(NativeContextMap) \
+ V(NoClosuresCellMap) \
+ V(NullMap) \
+ V(NullValue) \
+ V(OneClosureCellMap) \
+ V(OnePointerFillerMap) \
+ V(OptimizedOut) \
+ V(OrderedHashTableMap) \
+ V(ScopeInfoMap) \
V(ScriptContextMap) \
- V(UndefinedMap) \
+ V(SharedFunctionInfoMap) \
+ V(SloppyArgumentsElementsMap) \
+ V(SpeciesProtector) \
+ V(StaleRegister) \
+ V(StringLengthProtector) \
+ V(SymbolMap) \
+ V(TerminationException) \
V(TheHoleMap) \
- V(NullMap) \
- V(BooleanMap) \
+ V(TheHoleValue) \
+ V(TransitionArrayMap) \
+ V(TrueValue) \
+ V(TwoPointerFillerMap) \
+ V(UndefinedCell) \
+ V(UndefinedMap) \
+ V(UndefinedValue) \
V(UninitializedMap) \
- V(ArgumentsMarkerMap) \
- V(JSMessageObjectMap) \
- V(ForeignMap) \
- V(NoClosuresCellMap) \
- V(OneClosureCellMap) \
- V(ManyClosuresCellMap) \
- V(NanValue) \
- V(InfinityValue) \
- V(MinusZeroValue) \
- V(MinusInfinityValue) \
- V(EmptyWeakCell) \
- V(empty_string) \
+ V(UninitializedValue) \
+ V(WeakCellMap) \
+ V(WithContextMap) \
PRIVATE_SYMBOL_LIST(V)
// Forward declarations.
@@ -328,6 +358,7 @@ class ObjectIterator;
class ObjectStats;
class Page;
class PagedSpace;
+class RootVisitor;
class Scavenger;
class ScavengeJob;
class Space;
@@ -403,9 +434,8 @@ class PromotionQueue {
inline void SetNewLimit(Address limit);
inline bool IsBelowPromotionQueue(Address to_space_top);
- inline void insert(HeapObject* target, int32_t size, bool was_marked_black);
- inline void remove(HeapObject** target, int32_t* size,
- bool* was_marked_black);
+ inline void insert(HeapObject* target, int32_t size);
+ inline void remove(HeapObject** target, int32_t* size);
bool is_empty() {
return (front_ == rear_) &&
@@ -414,12 +444,10 @@ class PromotionQueue {
private:
struct Entry {
- Entry(HeapObject* obj, int32_t size, bool was_marked_black)
- : obj_(obj), size_(size), was_marked_black_(was_marked_black) {}
+ Entry(HeapObject* obj, int32_t size) : obj_(obj), size_(size) {}
HeapObject* obj_;
- int32_t size_ : 31;
- bool was_marked_black_ : 1;
+ int32_t size_;
};
inline Page* GetHeadPage();
@@ -562,19 +590,9 @@ class Heap {
enum UpdateAllocationSiteMode { kGlobal, kCached };
- // Taking this lock prevents the GC from entering a phase that relocates
+ // Taking this mutex prevents the GC from entering a phase that relocates
// object references.
- class RelocationLock {
- public:
- explicit RelocationLock(Heap* heap) : heap_(heap) {
- heap_->relocation_mutex_.Lock();
- }
-
- ~RelocationLock() { heap_->relocation_mutex_.Unlock(); }
-
- private:
- Heap* heap_;
- };
+ base::Mutex* relocation_mutex() { return &relocation_mutex_; }
// Support for partial snapshots. After calling this we have a linear
// space to write objects in each space.
@@ -583,7 +601,7 @@ class Heap {
Address start;
Address end;
};
- typedef List<Chunk> Reservation;
+ typedef std::vector<Chunk> Reservation;
static const int kInitalOldGenerationLimitFactor = 2;
@@ -609,16 +627,6 @@ class Heap {
static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
static const int kMaxOldSpaceSizeHugeMemoryDevice = 1024 * kPointerMultiplier;
- // The executable size has to be a multiple of Page::kPageSize.
- // Sizes are in MB.
- static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier;
- static const int kMaxExecutableSizeMediumMemoryDevice =
- 192 * kPointerMultiplier;
- static const int kMaxExecutableSizeHighMemoryDevice =
- 256 * kPointerMultiplier;
- static const int kMaxExecutableSizeHugeMemoryDevice =
- 256 * kPointerMultiplier;
-
static const int kTraceRingBufferSize = 512;
static const int kStacktraceBufferSize = 512;
@@ -666,7 +674,7 @@ class Heap {
static void FatalProcessOutOfMemory(const char* location,
bool is_heap_oom = false);
- static bool RootIsImmortalImmovable(int root_index);
+ V8_EXPORT_PRIVATE static bool RootIsImmortalImmovable(int root_index);
// Checks whether the space is valid.
static bool IsValidAllocationSpace(AllocationSpace space);
@@ -786,9 +794,7 @@ class Heap {
Object* encountered_weak_collections() const {
return encountered_weak_collections_;
}
- void VisitEncounteredWeakCollections(ObjectVisitor* visitor) {
- visitor->VisitPointer(&encountered_weak_collections_);
- }
+ void IterateEncounteredWeakCollections(RootVisitor* visitor);
void set_encountered_weak_cells(Object* weak_cell) {
encountered_weak_cells_ = weak_cell;
@@ -987,16 +993,13 @@ class Heap {
// Configure heap size in MB before setup. Return false if the heap has been
// set up already.
bool ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
- size_t max_executable_size, size_t code_range_size);
+ size_t code_range_size);
bool ConfigureHeapDefault();
// Prepares the heap, setting up memory areas that are needed in the isolate
// without actually creating any objects.
bool SetUp();
- // (Re-)Initialize hash seed from flag or RNG.
- void InitializeHashSeed();
-
// Bootstraps the object heap with the core set of objects required to run.
// Returns whether it succeeded.
bool CreateHeapObjects();
@@ -1179,18 +1182,17 @@ class Heap {
// ===========================================================================
// Iterates over all roots in the heap.
- void IterateRoots(ObjectVisitor* v, VisitMode mode);
+ void IterateRoots(RootVisitor* v, VisitMode mode);
// Iterates over all strong roots in the heap.
- void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
+ void IterateStrongRoots(RootVisitor* v, VisitMode mode);
// Iterates over entries in the smi roots list. Only interesting to the
// serializer/deserializer, since GC does not care about smis.
- void IterateSmiRoots(ObjectVisitor* v);
+ void IterateSmiRoots(RootVisitor* v);
// Iterates over all the other roots in the heap.
- void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
+ void IterateWeakRoots(RootVisitor* v, VisitMode mode);
// Iterate pointers of promoted objects.
- void IterateAndScavengePromotedObject(HeapObject* target, int size,
- bool was_marked_black);
+ void IterateAndScavengePromotedObject(HeapObject* target, int size);
// ===========================================================================
// Store buffer API. =========================================================
@@ -1217,7 +1219,9 @@ class Heap {
// Start incremental marking and ensure that idle time handler can perform
// incremental steps.
- void StartIdleIncrementalMarking(GarbageCollectionReason gc_reason);
+ void StartIdleIncrementalMarking(
+ GarbageCollectionReason gc_reason,
+ GCCallbackFlags gc_callback_flags = GCCallbackFlags::kNoGCCallbackFlags);
// Starts incremental marking assuming incremental marking is currently
// stopped.
@@ -1231,9 +1235,6 @@ class Heap {
void FinalizeIncrementalMarkingIfComplete(GarbageCollectionReason gc_reason);
- bool TryFinalizeIdleIncrementalMarking(double idle_time_in_ms,
- GarbageCollectionReason gc_reason);
-
void RegisterDeserializedObjectsForBlackAllocation(
Reservation* reservations, List<HeapObject*>* large_objects);
@@ -1247,11 +1248,9 @@ class Heap {
// The runtime uses this function to notify potentially unsafe object layout
// changes that require special synchronization with the concurrent marker.
- // A layout change is unsafe if
- // - it removes a tagged in-object field.
- // - it replaces a tagged in-objects field with an untagged in-object field.
void NotifyObjectLayoutChange(HeapObject* object,
const DisallowHeapAllocation&);
+
#ifdef VERIFY_HEAP
// This function checks that either
// - the map transition is safe,
@@ -1345,7 +1344,6 @@ class Heap {
size_t MaxSemiSpaceSize() { return max_semi_space_size_; }
size_t InitialSemiSpaceSize() { return initial_semispace_size_; }
size_t MaxOldGenerationSize() { return max_old_generation_size_; }
- size_t MaxExecutableSize() { return max_executable_size_; }
// Returns the capacity of the heap in bytes w/o growing. Heap grows when
// more spaces are needed until it reaches the limit.
@@ -1516,6 +1514,7 @@ class Heap {
#ifdef VERIFY_HEAP
// Verify the heap is in its normal state before or after a GC.
void Verify();
+ void VerifyRememberedSetFor(HeapObject* object);
#endif
#ifdef DEBUG
@@ -1544,8 +1543,8 @@ class Heap {
// Registers an external string.
inline void AddString(String* string);
- inline void IterateAll(ObjectVisitor* v);
- inline void IterateNewSpaceStrings(ObjectVisitor* v);
+ inline void IterateAll(RootVisitor* v);
+ inline void IterateNewSpaceStrings(RootVisitor* v);
inline void PromoteAllNewSpaceStrings();
// Restores internal invariant and gets rid of collected strings. Must be
@@ -1828,7 +1827,7 @@ class Heap {
void Scavenge();
void EvacuateYoungGeneration();
- Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
+ Address DoScavenge(Address new_space_front);
void UpdateNewSpaceReferencesInExternalStringTable(
ExternalStringTableUpdaterCallback updater_func);
@@ -2164,7 +2163,6 @@ class Heap {
size_t initial_max_old_generation_size_;
size_t initial_old_generation_size_;
bool old_generation_size_configured_;
- size_t max_executable_size_;
size_t maximum_committed_;
// For keeping track of how much data has survived
@@ -2391,6 +2389,7 @@ class Heap {
friend class IncrementalMarkingJob;
friend class LargeObjectSpace;
friend class MarkCompactCollector;
+ friend class MarkCompactCollectorBase;
friend class MinorMarkCompactCollector;
friend class MarkCompactMarkingVisitor;
friend class NewSpace;
@@ -2462,16 +2461,23 @@ class AlwaysAllocateScope {
// point into the heap to a location that has a map pointer at its first word.
// Caveat: Heap::Contains is an approximation because it can return true for
// objects in a heap space but above the allocation pointer.
-class VerifyPointersVisitor : public ObjectVisitor {
+class VerifyPointersVisitor : public ObjectVisitor, public RootVisitor {
public:
- inline void VisitPointers(Object** start, Object** end) override;
+ inline void VisitPointers(HeapObject* host, Object** start,
+ Object** end) override;
+ inline void VisitRootPointers(Root root, Object** start,
+ Object** end) override;
+
+ private:
+ inline void VerifyPointers(Object** start, Object** end);
};
// Verify that all objects are Smis.
-class VerifySmisVisitor : public ObjectVisitor {
+class VerifySmisVisitor : public RootVisitor {
public:
- inline void VisitPointers(Object** start, Object** end) override;
+ inline void VisitRootPointers(Root root, Object** start,
+ Object** end) override;
};
@@ -2630,6 +2636,8 @@ class AllocationObserver {
DISALLOW_COPY_AND_ASSIGN(AllocationObserver);
};
+V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/incremental-marking-job.cc b/deps/v8/src/heap/incremental-marking-job.cc
index 393b9cce7e..47a27faf15 100644
--- a/deps/v8/src/heap/incremental-marking-job.cc
+++ b/deps/v8/src/heap/incremental-marking-job.cc
@@ -10,6 +10,7 @@
#include "src/heap/incremental-marking.h"
#include "src/isolate.h"
#include "src/v8.h"
+#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
@@ -42,6 +43,10 @@ void IncrementalMarkingJob::Task::Step(Heap* heap) {
}
void IncrementalMarkingJob::Task::RunInternal() {
+ VMState<GC> state(isolate());
+ RuntimeCallTimerScope runtime_timer(
+ isolate(), &RuntimeCallStats::GC_IncrementalMarkingJob);
+
Heap* heap = isolate()->heap();
job_->NotifyTask();
IncrementalMarking* incremental_marking = heap->incremental_marking();
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 3d31d19d2e..58731d570b 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -17,16 +17,27 @@
#include "src/heap/objects-visiting.h"
#include "src/tracing/trace-event.h"
#include "src/v8.h"
+#include "src/visitors.h"
+#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
+void IncrementalMarking::Observer::Step(int bytes_allocated, Address, size_t) {
+ VMState<GC> state(incremental_marking_.heap()->isolate());
+ RuntimeCallTimerScope runtime_timer(
+ incremental_marking_.heap()->isolate(),
+ &RuntimeCallStats::GC_IncrementalMarkingObserver);
+ incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
+}
+
IncrementalMarking::IncrementalMarking(Heap* heap)
: heap_(heap),
- state_(STOPPED),
+ marking_deque_(nullptr),
initial_old_generation_size_(0),
bytes_marked_ahead_of_schedule_(0),
unscanned_bytes_of_large_object_(0),
+ state_(STOPPED),
idle_marking_delay_counter_(0),
incremental_marking_finalization_rounds_(0),
is_compacting_(false),
@@ -41,15 +52,13 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
HeapObject* value_heap_obj = HeapObject::cast(value);
- DCHECK(!ObjectMarking::IsImpossible(value_heap_obj,
- MarkingState::Internal(value_heap_obj)));
- DCHECK(!ObjectMarking::IsImpossible(obj, MarkingState::Internal(obj)));
+ DCHECK(!ObjectMarking::IsImpossible<kAtomicity>(
+ value_heap_obj, marking_state(value_heap_obj)));
+ DCHECK(!ObjectMarking::IsImpossible<kAtomicity>(obj, marking_state(obj)));
const bool is_black =
- ObjectMarking::IsBlack(obj, MarkingState::Internal(obj));
+ ObjectMarking::IsBlack<kAtomicity>(obj, marking_state(obj));
- if (is_black && ObjectMarking::IsWhite(
- value_heap_obj, MarkingState::Internal(value_heap_obj))) {
- WhiteToGreyAndPush(value_heap_obj);
+ if (is_black && WhiteToGreyAndPush(value_heap_obj)) {
RestartIfNotMarking();
}
return is_compacting_ && is_black;
@@ -120,9 +129,26 @@ void IncrementalMarking::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
}
}
-void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj) {
- ObjectMarking::WhiteToGrey(obj, MarkingState::Internal(obj));
- heap_->mark_compact_collector()->marking_deque()->Push(obj);
+bool IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj) {
+ if (ObjectMarking::WhiteToGrey<kAtomicity>(obj, marking_state(obj))) {
+ marking_deque()->Push(obj);
+ return true;
+ }
+ return false;
+}
+
+void IncrementalMarking::MarkBlackAndPush(HeapObject* obj) {
+ // Color the object black and push it into the bailout deque.
+ ObjectMarking::WhiteToGrey<kAtomicity>(obj, marking_state(obj));
+ if (ObjectMarking::GreyToBlack<kAtomicity>(obj, marking_state(obj))) {
+#ifdef V8_CONCURRENT_MARKING
+ marking_deque()->Push(obj, MarkingThread::kMain, TargetDeque::kBailout);
+#else
+ if (!marking_deque()->Push(obj)) {
+ ObjectMarking::BlackToGrey<kAtomicity>(obj, marking_state(obj));
+ }
+#endif
+ }
}
void IncrementalMarking::TransferMark(Heap* heap, HeapObject* from,
@@ -132,24 +158,42 @@ void IncrementalMarking::TransferMark(Heap* heap, HeapObject* from,
DCHECK(MemoryChunk::FromAddress(from->address()) ==
MemoryChunk::FromAddress(to->address()));
- if (!heap->incremental_marking()->IsMarking()) return;
+ if (!IsMarking()) return;
// If the mark doesn't move, we don't check the color of the object.
// It doesn't matter whether the object is black, since it hasn't changed
// size, so the adjustment to the live data count will be zero anyway.
if (from == to) return;
- MarkBit new_mark_bit =
- ObjectMarking::MarkBitFrom(to, MarkingState::Internal(to));
- MarkBit old_mark_bit =
- ObjectMarking::MarkBitFrom(from, MarkingState::Internal(from));
+ MarkBit new_mark_bit = ObjectMarking::MarkBitFrom(to, marking_state(to));
+ MarkBit old_mark_bit = ObjectMarking::MarkBitFrom(from, marking_state(from));
- if (Marking::IsBlack(old_mark_bit)) {
- Marking::MarkBlack(new_mark_bit);
- } else if (Marking::IsGrey(old_mark_bit)) {
- Marking::WhiteToGrey(new_mark_bit);
- heap->mark_compact_collector()->marking_deque()->Push(to);
- heap->incremental_marking()->RestartIfNotMarking();
+ if (Marking::IsBlack<kAtomicity>(old_mark_bit)) {
+ if (from->address() + kPointerSize == to->address()) {
+ // The old and the new markbits overlap. The |to| object has the
+ // grey color. To make it black, we need to set the second bit.
+ DCHECK(new_mark_bit.Get<kAtomicity>());
+ new_mark_bit.Next().Set<kAtomicity>();
+ } else {
+ bool success = Marking::WhiteToBlack<kAtomicity>(new_mark_bit);
+ DCHECK(success);
+ USE(success);
+ }
+ } else if (Marking::IsGrey<kAtomicity>(old_mark_bit)) {
+ if (from->address() + kPointerSize == to->address()) {
+ // The old and the new markbits overlap. The |to| object has the
+ // white color. To make it grey, we need to set the first bit.
+ // Note that Marking::WhiteToGrey does not work here because
+ // old_mark_bit.Next() can be set by the concurrent marker at any time.
+ new_mark_bit.Set();
+ DCHECK(!new_mark_bit.Next().Get());
+ } else {
+ bool success = Marking::WhiteToGrey<kAtomicity>(new_mark_bit);
+ DCHECK(success);
+ USE(success);
+ }
+ marking_deque()->Push(to);
+ RestartIfNotMarking();
}
}
@@ -186,16 +230,16 @@ class IncrementalMarkingMarkingVisitor
HeapObject::RawField(object, end_offset));
start_offset = end_offset;
end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
- scan_until_end =
- heap->mark_compact_collector()->marking_deque()->IsFull();
+ scan_until_end = heap->incremental_marking()->marking_deque()->IsFull();
} while (scan_until_end && start_offset < object_size);
chunk->set_progress_bar(start_offset);
if (start_offset < object_size) {
- if (ObjectMarking::IsGrey(object, MarkingState::Internal(object))) {
- heap->mark_compact_collector()->marking_deque()->Unshift(object);
+ if (ObjectMarking::IsGrey<IncrementalMarking::kAtomicity>(
+ object, heap->incremental_marking()->marking_state(object))) {
+ heap->incremental_marking()->marking_deque()->Unshift(object);
} else {
- DCHECK(
- ObjectMarking::IsBlack(object, MarkingState::Internal(object)));
+ DCHECK(ObjectMarking::IsBlack<IncrementalMarking::kAtomicity>(
+ object, heap->incremental_marking()->marking_state(object)));
heap->mark_compact_collector()->UnshiftBlack(object);
}
heap->incremental_marking()->NotifyIncompleteScanOfObject(
@@ -218,11 +262,11 @@ class IncrementalMarkingMarkingVisitor
HeapObject* heap_obj = HeapObject::cast(cache);
// Mark the object grey if it is white, do not enque it into the marking
// deque.
- if (ObjectMarking::IsWhite(heap_obj,
- MarkingState::Internal(heap_obj))) {
- ObjectMarking::WhiteToGrey(heap_obj,
- MarkingState::Internal(heap_obj));
- }
+ Heap* heap = map->GetHeap();
+ bool ignored =
+ ObjectMarking::WhiteToGrey<IncrementalMarking::kAtomicity>(
+ heap_obj, heap->incremental_marking()->marking_state(heap_obj));
+ USE(ignored);
}
}
VisitNativeContext(map, context);
@@ -249,46 +293,43 @@ class IncrementalMarkingMarkingVisitor
// Marks the object grey and pushes it on the marking stack.
INLINE(static void MarkObject(Heap* heap, Object* obj)) {
- IncrementalMarking::MarkGrey(heap, HeapObject::cast(obj));
+ heap->incremental_marking()->WhiteToGreyAndPush(HeapObject::cast(obj));
}
// Marks the object black without pushing it on the marking stack.
// Returns true if object needed marking and false otherwise.
INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
HeapObject* heap_object = HeapObject::cast(obj);
- if (ObjectMarking::IsWhite(heap_object,
- MarkingState::Internal(heap_object))) {
- ObjectMarking::WhiteToBlack(heap_object,
- MarkingState::Internal(heap_object));
- return true;
- }
- return false;
+ return ObjectMarking::WhiteToBlack<IncrementalMarking::kAtomicity>(
+ heap_object, heap->incremental_marking()->marking_state(heap_object));
}
};
void IncrementalMarking::IterateBlackObject(HeapObject* object) {
if (IsMarking() &&
- ObjectMarking::IsBlack(object, MarkingState::Internal(object))) {
+ ObjectMarking::IsBlack<kAtomicity>(object, marking_state(object))) {
Page* page = Page::FromAddress(object->address());
if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
// IterateBlackObject requires us to visit the whole object.
page->ResetProgressBar();
}
Map* map = object->map();
- MarkGrey(heap_, map);
+ WhiteToGreyAndPush(map);
IncrementalMarkingMarkingVisitor::IterateBody(map, object);
}
}
-class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
+class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
public:
explicit IncrementalMarkingRootMarkingVisitor(
IncrementalMarking* incremental_marking)
: heap_(incremental_marking->heap()) {}
- void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
+ void VisitRootPointer(Root root, Object** p) override {
+ MarkObjectByPointer(p);
+ }
- void VisitPointers(Object** start, Object** end) override {
+ void VisitRootPointers(Root root, Object** start, Object** end) override {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
}
@@ -297,7 +338,7 @@ class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
Object* obj = *p;
if (!obj->IsHeapObject()) return;
- IncrementalMarking::MarkGrey(heap_, HeapObject::cast(obj));
+ heap_->incremental_marking()->WhiteToGreyAndPush(HeapObject::cast(obj));
}
Heap* heap_;
@@ -528,7 +569,7 @@ void IncrementalMarking::StartMarking() {
PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
- heap_->mark_compact_collector()->marking_deque()->StartUsing();
+ marking_deque()->StartUsing();
ActivateIncrementalWriteBarrier();
@@ -548,10 +589,6 @@ void IncrementalMarking::StartMarking() {
if (FLAG_concurrent_marking) {
ConcurrentMarking* concurrent_marking = heap_->concurrent_marking();
- heap_->mark_compact_collector()->marking_deque()->Iterate(
- [concurrent_marking](HeapObject* obj) {
- concurrent_marking->AddRoot(obj);
- });
concurrent_marking->StartTask();
}
@@ -574,6 +611,19 @@ void IncrementalMarking::StartBlackAllocation() {
}
}
+void IncrementalMarking::PauseBlackAllocation() {
+ DCHECK(FLAG_black_allocation);
+ DCHECK(IsMarking());
+ heap()->old_space()->UnmarkAllocationInfo();
+ heap()->map_space()->UnmarkAllocationInfo();
+ heap()->code_space()->UnmarkAllocationInfo();
+ if (FLAG_trace_incremental_marking) {
+ heap()->isolate()->PrintWithTimestamp(
+ "[IncrementalMarking] Black allocation paused\n");
+ }
+ black_allocation_ = false;
+}
+
void IncrementalMarking::FinishBlackAllocation() {
if (black_allocation_) {
black_allocation_ = false;
@@ -614,7 +664,7 @@ void IncrementalMarking::ProcessWeakCells() {
HeapObject* value = HeapObject::cast(weak_cell->value());
// Remove weak cells with live objects from the list, they do not need
// clearing.
- if (ObjectMarking::IsBlackOrGrey(value, MarkingState::Internal(value))) {
+ if (ObjectMarking::IsBlackOrGrey<kAtomicity>(value, marking_state(value))) {
// Record slot, if value is pointing to an evacuation candidate.
Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
heap_->mark_compact_collector()->RecordSlot(weak_cell, slot, *slot);
@@ -643,10 +693,12 @@ bool ShouldRetainMap(Map* map, int age) {
return false;
}
Object* constructor = map->GetConstructor();
+ Heap* heap = map->GetHeap();
if (!constructor->IsHeapObject() ||
- ObjectMarking::IsWhite(
+ ObjectMarking::IsWhite<IncrementalMarking::kAtomicity>(
HeapObject::cast(constructor),
- MarkingState::Internal(HeapObject::cast(constructor)))) {
+ heap->incremental_marking()->marking_state(
+ HeapObject::cast(constructor)))) {
// The constructor is dead, no new objects with this map can
// be created. Do not retain this map.
return false;
@@ -676,15 +728,15 @@ void IncrementalMarking::RetainMaps() {
int new_age;
Map* map = Map::cast(cell->value());
if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
- ObjectMarking::IsWhite(map, MarkingState::Internal(map))) {
+ ObjectMarking::IsWhite<kAtomicity>(map, marking_state(map))) {
if (ShouldRetainMap(map, age)) {
- MarkGrey(heap(), map);
+ WhiteToGreyAndPush(map);
}
Object* prototype = map->prototype();
if (age > 0 && prototype->IsHeapObject() &&
- ObjectMarking::IsWhite(
+ ObjectMarking::IsWhite<kAtomicity>(
HeapObject::cast(prototype),
- MarkingState::Internal(HeapObject::cast(prototype)))) {
+ marking_state(HeapObject::cast(prototype)))) {
// The prototype is not marked, age the map.
new_age = age - 1;
} else {
@@ -709,9 +761,6 @@ void IncrementalMarking::FinalizeIncrementally() {
double start = heap_->MonotonicallyIncreasingTimeInMs();
- int old_marking_deque_top =
- heap_->mark_compact_collector()->marking_deque()->top();
-
// After finishing incremental marking, we try to discover all unmarked
// objects to reduce the marking load in the final pause.
// 1) We scan and mark the roots again to find all changes to the root set.
@@ -728,11 +777,9 @@ void IncrementalMarking::FinalizeIncrementally() {
ProcessWeakCells();
int marking_progress =
- abs(old_marking_deque_top -
- heap_->mark_compact_collector()->marking_deque()->top());
-
- marking_progress += static_cast<int>(
- heap_->local_embedder_heap_tracer()->NumberOfCachedWrappersToTrace());
+ heap_->mark_compact_collector()->marking_deque()->Size() +
+ static_cast<int>(
+ heap_->local_embedder_heap_tracer()->NumberOfCachedWrappersToTrace());
double end = heap_->MonotonicallyIncreasingTimeInMs();
double delta = end - start;
@@ -764,97 +811,90 @@ void IncrementalMarking::FinalizeIncrementally() {
void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
if (!IsMarking()) return;
- MarkingDeque* marking_deque =
- heap_->mark_compact_collector()->marking_deque();
- int current = marking_deque->bottom();
- int mask = marking_deque->mask();
- int limit = marking_deque->top();
- HeapObject** array = marking_deque->array();
- int new_top = current;
-
Map* filler_map = heap_->one_pointer_filler_map();
- while (current != limit) {
- HeapObject* obj = array[current];
+ marking_deque()->Update([this, filler_map](HeapObject* obj) -> HeapObject* {
DCHECK(obj->IsHeapObject());
- current = ((current + 1) & mask);
// Only pointers to from space have to be updated.
if (heap_->InFromSpace(obj)) {
MapWord map_word = obj->map_word();
- // There may be objects on the marking deque that do not exist anymore,
- // e.g. left trimmed objects or objects from the root set (frames).
- // If these object are dead at scavenging time, their marking deque
- // entries will not point to forwarding addresses. Hence, we can discard
- // them.
- if (map_word.IsForwardingAddress()) {
- HeapObject* dest = map_word.ToForwardingAddress();
- if (ObjectMarking::IsBlack(dest, MarkingState::Internal(dest)))
- continue;
- array[new_top] = dest;
- new_top = ((new_top + 1) & mask);
- DCHECK(new_top != marking_deque->bottom());
- DCHECK(ObjectMarking::IsGrey(obj, MarkingState::Internal(obj)) ||
- (obj->IsFiller() &&
- ObjectMarking::IsWhite(obj, MarkingState::Internal(obj))));
+ if (!map_word.IsForwardingAddress()) {
+ // There may be objects on the marking deque that do not exist anymore,
+ // e.g. left trimmed objects or objects from the root set (frames).
+ // If these object are dead at scavenging time, their marking deque
+ // entries will not point to forwarding addresses. Hence, we can discard
+ // them.
+ return nullptr;
+ }
+ HeapObject* dest = map_word.ToForwardingAddress();
+ DCHECK_IMPLIES(
+ ObjectMarking::IsWhite<kAtomicity>(obj, marking_state(obj)),
+ obj->IsFiller());
+ return dest;
+ } else if (heap_->InToSpace(obj)) {
+ // The object may be on a page that was moved in new space.
+ DCHECK(
+ Page::FromAddress(obj->address())->IsFlagSet(Page::SWEEP_TO_ITERATE));
+ return ObjectMarking::IsBlack<kAtomicity>(obj,
+ MarkingState::External(obj))
+ ? obj
+ : nullptr;
+ } else {
+ // The object may be on a page that was moved from new to old space.
+ if (Page::FromAddress(obj->address())
+ ->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
+ return ObjectMarking::IsBlack<kAtomicity>(obj,
+ MarkingState::External(obj))
+ ? obj
+ : nullptr;
}
- } else if (obj->map() != filler_map) {
+ DCHECK_IMPLIES(
+ ObjectMarking::IsWhite<kAtomicity>(obj, marking_state(obj)),
+ obj->IsFiller());
// Skip one word filler objects that appear on the
// stack when we perform in place array shift.
- array[new_top] = obj;
- new_top = ((new_top + 1) & mask);
- DCHECK(new_top != marking_deque->bottom());
- DCHECK(ObjectMarking::IsGrey(obj, MarkingState::Internal(obj)) ||
- (obj->IsFiller() &&
- ObjectMarking::IsWhite(obj, MarkingState::Internal(obj))) ||
- (MemoryChunk::FromAddress(obj->address())
- ->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
- ObjectMarking::IsBlack(obj, MarkingState::Internal(obj))));
+ return (obj->map() == filler_map) ? nullptr : obj;
}
- }
- marking_deque->set_top(new_top);
+ });
}
-
-void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
- MarkGrey(heap_, map);
-
- IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
-
-#if ENABLE_SLOW_DCHECKS
- MarkBit mark_bit =
- ObjectMarking::MarkBitFrom(obj, MarkingState::Internal(obj));
+bool IncrementalMarking::IsFixedArrayWithProgressBar(HeapObject* obj) {
+ if (!obj->IsFixedArray()) return false;
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
- (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
- Marking::IsBlack(mark_bit)));
-#endif
- MarkBlack(obj, size);
+ return chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR);
}
-void IncrementalMarking::MarkGrey(Heap* heap, HeapObject* object) {
- if (ObjectMarking::IsWhite(object, MarkingState::Internal(object))) {
- heap->incremental_marking()->WhiteToGreyAndPush(object);
- }
-}
-
-void IncrementalMarking::MarkBlack(HeapObject* obj, int size) {
- if (ObjectMarking::IsBlack(obj, MarkingState::Internal(obj))) return;
- ObjectMarking::GreyToBlack(obj, MarkingState::Internal(obj));
+void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
+ MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj, marking_state(obj));
+ DCHECK(Marking::IsGrey<kAtomicity>(mark_bit) ||
+ Marking::IsBlack<kAtomicity>(mark_bit));
+ USE(mark_bit);
+ // The object can already be black in two cases:
+ // 1. The object is a fixed array with the progress bar.
+ // 2. The object is a JSObject that was colored black before
+ // unsafe layout change.
+ // 3. The object is a string that was colored black before
+ // unsafe layout change.
+ if (!ObjectMarking::GreyToBlack<kAtomicity>(obj, marking_state(obj))) {
+ DCHECK(IsFixedArrayWithProgressBar(obj) || obj->IsJSObject() ||
+ obj->IsString());
+ }
+ DCHECK(ObjectMarking::IsBlack<kAtomicity>(obj, marking_state(obj)));
+ WhiteToGreyAndPush(map);
+ IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
}
intptr_t IncrementalMarking::ProcessMarkingDeque(
intptr_t bytes_to_process, ForceCompletionAction completion) {
intptr_t bytes_processed = 0;
- MarkingDeque* marking_deque =
- heap_->mark_compact_collector()->marking_deque();
- while (!marking_deque->IsEmpty() && (bytes_processed < bytes_to_process ||
- completion == FORCE_COMPLETION)) {
- HeapObject* obj = marking_deque->Pop();
+ while (!marking_deque()->IsEmpty() && (bytes_processed < bytes_to_process ||
+ completion == FORCE_COMPLETION)) {
+ HeapObject* obj = marking_deque()->Pop();
// Left trimming may result in white, grey, or black filler objects on the
// marking deque. Ignore these objects.
if (obj->IsFiller()) {
- DCHECK(!ObjectMarking::IsImpossible(obj, MarkingState::Internal(obj)));
+ DCHECK(!ObjectMarking::IsImpossible<kAtomicity>(obj, marking_state(obj)));
continue;
}
@@ -879,7 +919,7 @@ void IncrementalMarking::Hurry() {
// forced e.g. in tests. It should not happen when COMPLETE was set when
// incremental marking finished and a regular GC was triggered after that
// because should_hurry_ will force a full GC.
- if (!heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
+ if (!marking_deque()->IsEmpty()) {
double start = 0.0;
if (FLAG_trace_incremental_marking) {
start = heap_->MonotonicallyIncreasingTimeInMs();
@@ -909,9 +949,10 @@ void IncrementalMarking::Hurry() {
HeapObject* cache = HeapObject::cast(
Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
if (!cache->IsUndefined(heap_->isolate())) {
- if (ObjectMarking::IsGrey(cache, MarkingState::Internal(cache))) {
- ObjectMarking::GreyToBlack(cache, MarkingState::Internal(cache));
- }
+ // Mark the cache black if it is grey.
+ bool ignored =
+ ObjectMarking::GreyToBlack<kAtomicity>(cache, marking_state(cache));
+ USE(ignored);
}
context = Context::cast(context)->next_context_link();
}
@@ -1040,7 +1081,7 @@ double IncrementalMarking::AdvanceIncrementalMarking(
remaining_time_in_ms =
deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
} while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() &&
- !heap()->mark_compact_collector()->marking_deque()->IsEmpty());
+ !marking_deque()->IsEmpty());
return remaining_time_in_ms;
}
@@ -1142,7 +1183,7 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
bytes_marked_ahead_of_schedule_ += bytes_processed;
}
- if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
+ if (marking_deque()->IsEmpty()) {
if (heap_->local_embedder_heap_tracer()
->ShouldFinalizeIncrementalMarking()) {
if (completion == FORCE_COMPLETION ||
diff --git a/deps/v8/src/heap/incremental-marking.h b/deps/v8/src/heap/incremental-marking.h
index 48e03d289d..4a88ab3fae 100644
--- a/deps/v8/src/heap/incremental-marking.h
+++ b/deps/v8/src/heap/incremental-marking.h
@@ -32,10 +32,66 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
enum GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION };
- explicit IncrementalMarking(Heap* heap);
+ class PauseBlackAllocationScope {
+ public:
+ explicit PauseBlackAllocationScope(IncrementalMarking* marking)
+ : marking_(marking), paused_(false) {
+ if (marking_->black_allocation()) {
+ paused_ = true;
+ marking_->PauseBlackAllocation();
+ }
+ }
+
+ ~PauseBlackAllocationScope() {
+ if (paused_) {
+ marking_->StartBlackAllocation();
+ }
+ }
+
+ private:
+ IncrementalMarking* marking_;
+ bool paused_;
+ };
static void Initialize();
+ explicit IncrementalMarking(Heap* heap);
+
+ MarkingState marking_state(HeapObject* object) const {
+ return MarkingState::Internal(object);
+ }
+
+ MarkingState marking_state(MemoryChunk* chunk) const {
+ return MarkingState::Internal(chunk);
+ }
+
+ // Transfers mark bits without requiring proper object headers.
+ void TransferMark(Heap* heap, HeapObject* from, HeapObject* to);
+
+ // Transfers color including live byte count, requiring properly set up
+ // objects.
+ template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
+ V8_INLINE void TransferColor(HeapObject* from, HeapObject* to) {
+ if (ObjectMarking::IsBlack<access_mode>(to, marking_state(to))) {
+ DCHECK(black_allocation());
+ return;
+ }
+
+ DCHECK(ObjectMarking::IsWhite<access_mode>(to, marking_state(to)));
+ if (ObjectMarking::IsGrey<access_mode>(from, marking_state(from))) {
+ bool success =
+ ObjectMarking::WhiteToGrey<access_mode>(to, marking_state(to));
+ DCHECK(success);
+ USE(success);
+ } else if (ObjectMarking::IsBlack<access_mode>(from, marking_state(from))) {
+ bool success =
+ ObjectMarking::WhiteToBlack<access_mode>(to, marking_state(to));
+ DCHECK(success);
+ USE(success);
+ }
+ }
+
+
State state() {
DCHECK(state_ == STOPPED || FLAG_incremental_marking);
return state_;
@@ -126,6 +182,12 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
static const intptr_t kActivationThreshold = 0;
#endif
+#ifdef V8_CONCURRENT_MARKING
+ static const MarkBit::AccessMode kAtomicity = MarkBit::AccessMode::ATOMIC;
+#else
+ static const MarkBit::AccessMode kAtomicity = MarkBit::AccessMode::NON_ATOMIC;
+#endif
+
void FinalizeSweeping();
size_t Step(size_t bytes_to_process, CompletionAction action,
@@ -157,7 +219,14 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
void RecordCodeTargetPatch(Address pc, HeapObject* value);
- void WhiteToGreyAndPush(HeapObject* obj);
+ // Returns true if the function succeeds in transitioning the object
+ // from white to grey.
+ bool WhiteToGreyAndPush(HeapObject* obj);
+
+ // This function is used to color the object black before it undergoes an
+ // unsafe layout change. This is a part of synchronization protocol with
+ // the concurrent marker.
+ void MarkBlackAndPush(HeapObject* obj);
inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
@@ -179,26 +248,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
bool IsIdleMarkingDelayCounterLimitReached();
- static void MarkGrey(Heap* heap, HeapObject* object);
-
- static void MarkBlack(HeapObject* object, int size);
-
- static void TransferMark(Heap* heap, HeapObject* from, HeapObject* to);
-
- V8_INLINE static void TransferColor(HeapObject* from, HeapObject* to) {
- if (ObjectMarking::IsBlack(to, MarkingState::Internal(to))) {
- DCHECK(to->GetHeap()->incremental_marking()->black_allocation());
- return;
- }
-
- DCHECK(ObjectMarking::IsWhite(to, MarkingState::Internal(to)));
- if (ObjectMarking::IsGrey(from, MarkingState::Internal(from))) {
- ObjectMarking::WhiteToGrey(to, MarkingState::Internal(to));
- } else if (ObjectMarking::IsBlack(from, MarkingState::Internal(from))) {
- ObjectMarking::WhiteToBlack(to, MarkingState::Internal(to));
- }
- }
-
void IterateBlackObject(HeapObject* object);
Heap* heap() const { return heap_; }
@@ -213,6 +262,15 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void AbortBlackAllocation();
+ MarkingDeque* marking_deque() {
+ SLOW_DCHECK(marking_deque_ != nullptr);
+ return marking_deque_;
+ }
+
+ void set_marking_deque(MarkingDeque* marking_deque) {
+ marking_deque_ = marking_deque;
+ }
+
private:
class Observer : public AllocationObserver {
public:
@@ -220,9 +278,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
: AllocationObserver(step_size),
incremental_marking_(incremental_marking) {}
- void Step(int bytes_allocated, Address, size_t) override {
- incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
- }
+ void Step(int bytes_allocated, Address, size_t) override;
private:
IncrementalMarking& incremental_marking_;
@@ -233,6 +289,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void StartMarking();
void StartBlackAllocation();
+ void PauseBlackAllocation();
void FinishBlackAllocation();
void MarkRoots();
@@ -260,6 +317,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
intptr_t bytes_to_process,
ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION));
+ INLINE(bool IsFixedArrayWithProgressBar(HeapObject* object));
INLINE(void VisitObject(Map* map, HeapObject* obj, int size));
void IncrementIdleMarkingDelayCounter();
@@ -270,8 +328,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
size_t StepSizeToMakeProgress();
Heap* heap_;
-
- State state_;
+ MarkingDeque* marking_deque_;
double start_time_ms_;
size_t initial_old_generation_size_;
@@ -280,6 +337,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
size_t bytes_marked_ahead_of_schedule_;
size_t unscanned_bytes_of_large_object_;
+ State state_;
+
int idle_marking_delay_counter_;
int incremental_marking_finalization_rounds_;
@@ -289,10 +348,10 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
bool black_allocation_;
bool finalize_marking_completed_;
bool trace_wrappers_toggle_;
+ IncrementalMarkingJob incremental_marking_job_;
GCRequestType request_type_;
- IncrementalMarkingJob incremental_marking_job_;
Observer new_generation_observer_;
Observer old_generation_observer_;
diff --git a/deps/v8/src/heap/item-parallel-job.h b/deps/v8/src/heap/item-parallel-job.h
new file mode 100644
index 0000000000..c3228403ff
--- /dev/null
+++ b/deps/v8/src/heap/item-parallel-job.h
@@ -0,0 +1,178 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_ITEM_PARALLEL_JOB_
+#define V8_HEAP_ITEM_PARALLEL_JOB_
+
+#include <vector>
+
+#include "src/base/platform/semaphore.h"
+#include "src/cancelable-task.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+// This class manages background tasks that process a set of items in parallel.
+// The first task added is executed on the same thread as |job.Run()| is called.
+// All other tasks are scheduled in the background.
+//
+// - Items need to inherit from ItemParallelJob::Item.
+// - Tasks need to inherit from ItemParallelJob::Task.
+//
+// Items need to be marked as finished after processing them. Task and Item
+// ownership is transferred to the job.
+class ItemParallelJob {
+ public:
+ class Task;
+
+ class Item {
+ public:
+ Item() : state_(kAvailable) {}
+ virtual ~Item() {}
+
+ // Marks an item as being finished.
+ void MarkFinished() { CHECK(state_.TrySetValue(kProcessing, kFinished)); }
+
+ private:
+ enum ProcessingState { kAvailable, kProcessing, kFinished };
+
+ bool TryMarkingAsProcessing() {
+ return state_.TrySetValue(kAvailable, kProcessing);
+ }
+ bool IsFinished() { return state_.Value() == kFinished; }
+
+ base::AtomicValue<ProcessingState> state_;
+
+ friend class ItemParallelJob;
+ friend class ItemParallelJob::Task;
+
+ DISALLOW_COPY_AND_ASSIGN(Item);
+ };
+
+ class Task : public CancelableTask {
+ public:
+ explicit Task(Isolate* isolate)
+ : CancelableTask(isolate),
+ items_(nullptr),
+ cur_index_(0),
+ items_considered_(0),
+ on_finish_(nullptr) {}
+ virtual ~Task() {}
+
+ virtual void RunInParallel() = 0;
+
+ protected:
+ // Retrieves a new item that needs to be processed. Returns |nullptr| if
+ // all items are processed. Upon returning an item, the task is required
+ // to process the item and mark the item as finished after doing so.
+ template <class ItemType>
+ ItemType* GetItem() {
+ while (items_considered_++ != items_->size()) {
+ // Wrap around.
+ if (cur_index_ == items_->size()) {
+ cur_index_ = 0;
+ }
+ Item* item = (*items_)[cur_index_++];
+ if (item->TryMarkingAsProcessing()) {
+ return static_cast<ItemType*>(item);
+ }
+ }
+ return nullptr;
+ }
+
+ private:
+ void SetupInternal(base::Semaphore* on_finish, std::vector<Item*>* items,
+ size_t start_index) {
+ on_finish_ = on_finish;
+ items_ = items;
+ cur_index_ = start_index;
+ }
+
+ // We don't allow overriding this method any further.
+ void RunInternal() final {
+ RunInParallel();
+ on_finish_->Signal();
+ }
+
+ std::vector<Item*>* items_;
+ size_t cur_index_;
+ size_t items_considered_;
+ base::Semaphore* on_finish_;
+
+ friend class ItemParallelJob;
+ friend class Item;
+
+ DISALLOW_COPY_AND_ASSIGN(Task);
+ };
+
+ ItemParallelJob(CancelableTaskManager* cancelable_task_manager,
+ base::Semaphore* pending_tasks)
+ : cancelable_task_manager_(cancelable_task_manager),
+ pending_tasks_(pending_tasks) {}
+
+ ~ItemParallelJob() {
+ for (size_t i = 0; i < items_.size(); i++) {
+ Item* item = items_[i];
+ CHECK(item->IsFinished());
+ delete item;
+ }
+ }
+
+ // Adds a task to the job. Transfers ownership to the job.
+ void AddTask(Task* task) { tasks_.push_back(task); }
+
+ // Adds an item to the job. Transfers ownership to the job.
+ void AddItem(Item* item) { items_.push_back(item); }
+
+ void Run() {
+ DCHECK_GE(tasks_.size(), 0);
+ const size_t num_tasks = tasks_.size();
+ const size_t num_items = items_.size();
+ const size_t items_per_task = (num_items + num_tasks - 1) / num_tasks;
+ uint32_t* task_ids = new uint32_t[num_tasks];
+ size_t start_index = 0;
+ Task* main_task = nullptr;
+ Task* task = nullptr;
+ for (size_t i = 0; i < num_tasks; i++, start_index += items_per_task) {
+ task = tasks_[i];
+ if (start_index >= num_items) {
+ start_index -= num_items;
+ }
+ task->SetupInternal(pending_tasks_, &items_, start_index);
+ task_ids[i] = task->id();
+ if (i > 0) {
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ } else {
+ main_task = task;
+ }
+ }
+ // Contribute on main thread.
+ main_task->Run();
+ delete main_task;
+ // Wait for background tasks.
+ for (size_t i = 0; i < num_tasks; i++) {
+ if (cancelable_task_manager_->TryAbort(task_ids[i]) !=
+ CancelableTaskManager::kTaskAborted) {
+ pending_tasks_->Wait();
+ }
+ }
+ delete[] task_ids;
+ }
+
+ private:
+ std::vector<Item*> items_;
+ std::vector<Task*> tasks_;
+ CancelableTaskManager* cancelable_task_manager_;
+ base::Semaphore* pending_tasks_;
+ DISALLOW_COPY_AND_ASSIGN(ItemParallelJob);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_ITEM_PARALLEL_JOB_
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index cef381b1f8..b8e4d46fc3 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -21,15 +21,6 @@ void MarkCompactCollector::PushBlack(HeapObject* obj) {
}
}
-void MinorMarkCompactCollector::PushBlack(HeapObject* obj) {
- DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>(
- obj, MarkingState::External(obj))));
- if (!marking_deque()->Push(obj)) {
- ObjectMarking::BlackToGrey<MarkBit::NON_ATOMIC>(
- obj, MarkingState::External(obj));
- }
-}
-
void MarkCompactCollector::UnshiftBlack(HeapObject* obj) {
DCHECK(ObjectMarking::IsBlack(obj, MarkingState::Internal(obj)));
if (!marking_deque()->Unshift(obj)) {
@@ -38,19 +29,8 @@ void MarkCompactCollector::UnshiftBlack(HeapObject* obj) {
}
void MarkCompactCollector::MarkObject(HeapObject* obj) {
- if (ObjectMarking::IsWhite<MarkBit::NON_ATOMIC>(
+ if (ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(
obj, MarkingState::Internal(obj))) {
- ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(
- obj, MarkingState::Internal(obj));
- PushBlack(obj);
- }
-}
-
-void MinorMarkCompactCollector::MarkObject(HeapObject* obj) {
- if (ObjectMarking::IsWhite<MarkBit::NON_ATOMIC>(
- obj, MarkingState::External(obj))) {
- ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(
- obj, MarkingState::External(obj));
PushBlack(obj);
}
}
@@ -127,6 +107,28 @@ void CodeFlusher::ClearNextCandidate(SharedFunctionInfo* candidate) {
candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
}
+void CodeFlusher::VisitListHeads(RootVisitor* visitor) {
+ visitor->VisitRootPointer(
+ Root::kCodeFlusher,
+ reinterpret_cast<Object**>(&jsfunction_candidates_head_));
+ visitor->VisitRootPointer(
+ Root::kCodeFlusher,
+ reinterpret_cast<Object**>(&shared_function_info_candidates_head_));
+}
+
+template <typename StaticVisitor>
+void CodeFlusher::IteratePointersToFromSpace() {
+ Heap* heap = isolate_->heap();
+ JSFunction* candidate = jsfunction_candidates_head_;
+ while (candidate != nullptr) {
+ JSFunction** slot = GetNextCandidateSlot(candidate);
+ if (heap->InFromSpace(*slot)) {
+ StaticVisitor::VisitPointer(heap, candidate,
+ reinterpret_cast<Object**>(slot));
+ }
+ candidate = GetNextCandidate(candidate);
+ }
+}
template <LiveObjectIterationMode T>
HeapObject* LiveObjectIterator<T>::Next() {
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index fe886ac813..6bb7d3e352 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -18,12 +18,14 @@
#include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h"
+#include "src/heap/item-parallel-job.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/page-parallel-job.h"
#include "src/heap/spaces-inl.h"
+#include "src/heap/workstealing-marking-deque.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/tracing/tracing-category-observer.h"
@@ -51,7 +53,7 @@ STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
#ifdef VERIFY_HEAP
namespace {
-class MarkingVerifier : public ObjectVisitor {
+class MarkingVerifier : public ObjectVisitor, public RootVisitor {
public:
virtual void Run() = 0;
@@ -60,6 +62,16 @@ class MarkingVerifier : public ObjectVisitor {
virtual MarkingState marking_state(MemoryChunk* chunk) = 0;
+ virtual void VerifyPointers(Object** start, Object** end) = 0;
+
+ void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ VerifyPointers(start, end);
+ }
+
+ void VisitRootPointers(Root root, Object** start, Object** end) override {
+ VerifyPointers(start, end);
+ }
+
void VerifyRoots(VisitMode mode);
void VerifyMarkingOnPage(const Page& page, const MarkingState& state,
Address start, Address end);
@@ -152,7 +164,7 @@ class FullMarkingVerifier : public MarkingVerifier {
return MarkingState::Internal(object);
}
- void VisitPointers(Object** start, Object** end) override {
+ void VerifyPointers(Object** start, Object** end) override {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
@@ -161,19 +173,18 @@ class FullMarkingVerifier : public MarkingVerifier {
}
}
- void VisitEmbeddedPointer(RelocInfo* rinfo) override {
+ void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
+ if (!host->IsWeakObject(rinfo->target_object())) {
Object* p = rinfo->target_object();
- VisitPointer(&p);
+ VisitPointer(host, &p);
}
}
- void VisitCell(RelocInfo* rinfo) override {
- Code* code = rinfo->host();
+ void VisitCellPointer(Code* host, RelocInfo* rinfo) override {
DCHECK(rinfo->rmode() == RelocInfo::CELL);
- if (!code->IsWeakObject(rinfo->target_cell())) {
- ObjectVisitor::VisitCell(rinfo);
+ if (!host->IsWeakObject(rinfo->target_cell())) {
+ ObjectVisitor::VisitCellPointer(host, rinfo);
}
}
};
@@ -195,7 +206,7 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
VerifyMarking(heap_->new_space());
}
- void VisitPointers(Object** start, Object** end) override {
+ void VerifyPointers(Object** start, Object** end) override {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
@@ -206,22 +217,25 @@ class YoungGenerationMarkingVerifier : public MarkingVerifier {
}
};
-class EvacuationVerifier : public ObjectVisitor {
+class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
public:
virtual void Run() = 0;
- void VisitPointers(Object** start, Object** end) override {
- for (Object** current = start; current < end; current++) {
- if ((*current)->IsHeapObject()) {
- HeapObject* object = HeapObject::cast(*current);
- CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
- }
- }
+ void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ VerifyPointers(start, end);
+ }
+
+ void VisitRootPointers(Root root, Object** start, Object** end) override {
+ VerifyPointers(start, end);
}
protected:
explicit EvacuationVerifier(Heap* heap) : heap_(heap) {}
+ inline Heap* heap() { return heap_; }
+
+ virtual void VerifyPointers(Object** start, Object** end) = 0;
+
void VerifyRoots(VisitMode mode);
void VerifyEvacuationOnPage(Address start, Address end);
void VerifyEvacuation(NewSpace* new_space);
@@ -277,18 +291,79 @@ class FullEvacuationVerifier : public EvacuationVerifier {
VerifyEvacuation(heap_->code_space());
VerifyEvacuation(heap_->map_space());
}
+
+ protected:
+ void VerifyPointers(Object** start, Object** end) override {
+ for (Object** current = start; current < end; current++) {
+ if ((*current)->IsHeapObject()) {
+ HeapObject* object = HeapObject::cast(*current);
+ if (heap()->InNewSpace(object)) {
+ CHECK(heap()->InToSpace(object));
+ }
+ CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
+ }
+ }
+ }
+};
+
+class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
+ public:
+ explicit YoungGenerationEvacuationVerifier(Heap* heap)
+ : EvacuationVerifier(heap) {}
+
+ void Run() override {
+ VerifyRoots(VISIT_ALL_IN_SCAVENGE);
+ VerifyEvacuation(heap_->new_space());
+ VerifyEvacuation(heap_->old_space());
+ VerifyEvacuation(heap_->code_space());
+ VerifyEvacuation(heap_->map_space());
+ }
+
+ protected:
+ void VerifyPointers(Object** start, Object** end) override {
+ for (Object** current = start; current < end; current++) {
+ if ((*current)->IsHeapObject()) {
+ HeapObject* object = HeapObject::cast(*current);
+ CHECK_IMPLIES(heap()->InNewSpace(object), heap()->InToSpace(object));
+ }
+ }
+ }
};
} // namespace
#endif // VERIFY_HEAP
// =============================================================================
-// MarkCompactCollector
+// MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector
// =============================================================================
+static int NumberOfAvailableCores() {
+ return Max(
+ 1, static_cast<int>(
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
+}
+
+int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
+ return FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), pages) : 1;
+}
+
+int MarkCompactCollectorBase::NumberOfPointerUpdateTasks(int pages) {
+ // Limit the number of update tasks as task creation often dominates the
+ // actual work that is being done.
+ static const int kMaxPointerUpdateTasks = 8;
+ return FLAG_parallel_pointer_update
+ ? Min(kMaxPointerUpdateTasks, Min(NumberOfAvailableCores(), pages))
+ : 1;
+}
+
+int MinorMarkCompactCollector::NumberOfMarkingTasks() {
+ return FLAG_minor_mc_parallel_marking
+ ? Min(NumberOfAvailableCores(), kNumMarkers)
+ : 1;
+}
+
MarkCompactCollector::MarkCompactCollector(Heap* heap)
- : // NOLINT
- heap_(heap),
+ : MarkCompactCollectorBase(heap),
page_parallel_job_semaphore_(0),
#ifdef DEBUG
state_(IDLE),
@@ -318,7 +393,7 @@ void MarkCompactCollector::SetUp() {
}
}
-void MinorMarkCompactCollector::SetUp() { marking_deque()->SetUp(); }
+void MinorMarkCompactCollector::SetUp() {}
void MarkCompactCollector::TearDown() {
AbortCompaction();
@@ -326,7 +401,7 @@ void MarkCompactCollector::TearDown() {
delete code_flusher_;
}
-void MinorMarkCompactCollector::TearDown() { marking_deque()->TearDown(); }
+void MinorMarkCompactCollector::TearDown() {}
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
DCHECK(!p->NeverEvacuate());
@@ -371,6 +446,8 @@ void MarkCompactCollector::CollectGarbage() {
// update the state as they proceed.
DCHECK(state_ == PREPARE_GC);
+ heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
+
MarkLiveObjects();
DCHECK(heap_->incremental_marking()->IsStopped());
@@ -388,7 +465,7 @@ void MarkCompactCollector::CollectGarbage() {
StartSweepSpaces();
- EvacuateNewSpaceAndCandidates();
+ Evacuate();
Finish();
}
@@ -616,25 +693,6 @@ bool MarkCompactCollector::Sweeper::AreSweeperTasksRunning() {
return num_sweeping_tasks_.Value() != 0;
}
-const char* AllocationSpaceName(AllocationSpace space) {
- switch (space) {
- case NEW_SPACE:
- return "NEW_SPACE";
- case OLD_SPACE:
- return "OLD_SPACE";
- case CODE_SPACE:
- return "CODE_SPACE";
- case MAP_SPACE:
- return "MAP_SPACE";
- case LO_SPACE:
- return "LO_SPACE";
- default:
- UNREACHABLE();
- }
-
- return NULL;
-}
-
void MarkCompactCollector::ComputeEvacuationHeuristics(
size_t area_size, int* target_fragmentation_percent,
size_t* max_evacuated_bytes) {
@@ -976,9 +1034,9 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
shared->ShortPrint();
PrintF(" - age: %d]\n", code->GetAge());
}
- // Always flush the optimized code map if there is one.
- if (!shared->OptimizedCodeMapIsCleared()) {
- shared->ClearOptimizedCodeMap();
+ // Always flush the optimized code.
+ if (candidate->has_feedback_vector()) {
+ candidate->feedback_vector()->ClearOptimizedCode();
}
if (shared->HasBytecodeArray()) {
shared->set_code(interpreter_entry_trampoline);
@@ -1028,10 +1086,6 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
candidate->ShortPrint();
PrintF(" - age: %d]\n", code->GetAge());
}
- // Always flush the optimized code map if there is one.
- if (!candidate->OptimizedCodeMapIsCleared()) {
- candidate->ClearOptimizedCodeMap();
- }
if (candidate->HasBytecodeArray()) {
candidate->set_code(interpreter_entry_trampoline);
} else {
@@ -1121,52 +1175,6 @@ void CodeFlusher::EvictCandidate(JSFunction* function) {
}
}
-
-void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
- Heap* heap = isolate_->heap();
-
- JSFunction** slot = &jsfunction_candidates_head_;
- JSFunction* candidate = jsfunction_candidates_head_;
- while (candidate != NULL) {
- if (heap->InFromSpace(candidate)) {
- v->VisitPointer(reinterpret_cast<Object**>(slot));
- }
- candidate = GetNextCandidate(*slot);
- slot = GetNextCandidateSlot(*slot);
- }
-}
-
-class StaticYoungGenerationMarkingVisitor
- : public StaticNewSpaceVisitor<StaticYoungGenerationMarkingVisitor> {
- public:
- static void Initialize(Heap* heap) {
- StaticNewSpaceVisitor<StaticYoungGenerationMarkingVisitor>::Initialize();
- }
-
- inline static void VisitPointer(Heap* heap, HeapObject* object, Object** p) {
- Object* target = *p;
- if (heap->InNewSpace(target)) {
- HeapObject* target_object = HeapObject::cast(target);
- if (MarkRecursively(heap, target_object)) return;
- heap->minor_mark_compact_collector()->MarkObject(target_object);
- }
- }
-
- protected:
- inline static bool MarkRecursively(Heap* heap, HeapObject* object) {
- StackLimitCheck check(heap->isolate());
- if (check.HasOverflowed()) return false;
-
- if (ObjectMarking::IsBlackOrGrey<MarkBit::NON_ATOMIC>(
- object, MarkingState::External(object)))
- return true;
- ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(
- object, MarkingState::External(object));
- IterateBody(object->map(), object);
- return true;
- }
-};
-
class MarkCompactMarkingVisitor
: public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
public:
@@ -1198,11 +1206,7 @@ class MarkCompactMarkingVisitor
// Marks the object black without pushing it on the marking stack.
// Returns true if object needed marking and false otherwise.
INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
- if (ObjectMarking::IsWhite(object, MarkingState::Internal(object))) {
- ObjectMarking::WhiteToBlack(object, MarkingState::Internal(object));
- return true;
- }
- return false;
+ return ObjectMarking::WhiteToBlack(object, MarkingState::Internal(object));
}
// Mark object pointed to by p.
@@ -1220,14 +1224,15 @@ class MarkCompactMarkingVisitor
HeapObject* obj)) {
#ifdef DEBUG
DCHECK(collector->heap()->Contains(obj));
- DCHECK(ObjectMarking::IsWhite(obj, MarkingState::Internal(obj)));
#endif
- Map* map = obj->map();
- Heap* heap = obj->GetHeap();
- ObjectMarking::WhiteToBlack(obj, MarkingState::Internal(obj));
- // Mark the map pointer and the body.
- heap->mark_compact_collector()->MarkObject(map);
- IterateBody(map, obj);
+ if (ObjectMarking::WhiteToBlack(obj, MarkingState::Internal(obj))) {
+ Map* map = obj->map();
+ Heap* heap = obj->GetHeap();
+ ObjectMarking::WhiteToBlack(obj, MarkingState::Internal(obj));
+ // Mark the map pointer and the body.
+ heap->mark_compact_collector()->MarkObject(map);
+ IterateBody(map, obj);
+ }
}
// Visit all unmarked objects pointed to by [start, end).
@@ -1245,8 +1250,6 @@ class MarkCompactMarkingVisitor
if (!o->IsHeapObject()) continue;
collector->RecordSlot(object, p, o);
HeapObject* obj = HeapObject::cast(o);
- if (ObjectMarking::IsBlackOrGrey(obj, MarkingState::Internal(obj)))
- continue;
VisitUnmarkedObject(collector, obj);
}
return true;
@@ -1349,17 +1352,28 @@ class CodeMarkingVisitor : public ThreadVisitor {
MarkCompactCollector* collector_;
};
-
-class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
+class SharedFunctionInfoMarkingVisitor : public ObjectVisitor,
+ public RootVisitor {
public:
explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
: collector_(collector) {}
- void VisitPointers(Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) VisitPointer(p);
+ void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ for (Object** p = start; p < end; p++) MarkObject(p);
+ }
+
+ void VisitPointer(HeapObject* host, Object** slot) override {
+ MarkObject(slot);
}
- void VisitPointer(Object** slot) override {
+ void VisitRootPointers(Root root, Object** start, Object** end) override {
+ for (Object** p = start; p < end; p++) MarkObject(p);
+ }
+
+ void VisitRootPointer(Root root, Object** slot) override { MarkObject(slot); }
+
+ private:
+ void MarkObject(Object** slot) {
Object* obj = *slot;
if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
@@ -1367,8 +1381,6 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
collector_->MarkObject(shared);
}
}
-
- private:
MarkCompactCollector* collector_;
};
@@ -1413,59 +1425,44 @@ void MarkCompactCollector::PrepareForCodeFlushing() {
ProcessMarkingDeque();
}
-class MinorMarkCompactCollector::RootMarkingVisitor : public ObjectVisitor {
- public:
- explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
- : collector_(collector) {}
-
- void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
-
- void VisitPointers(Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
- }
-
- // Skip the weak next code link in a code object, which is visited in
- // ProcessTopOptimizedFrame.
- void VisitNextCodeLink(Object** p) override {}
-
- private:
- void MarkObjectByPointer(Object** p) {
- if (!(*p)->IsHeapObject()) return;
-
- HeapObject* object = HeapObject::cast(*p);
-
- if (!collector_->heap()->InNewSpace(object)) return;
-
- if (ObjectMarking::IsBlackOrGrey<MarkBit::NON_ATOMIC>(
- object, MarkingState::External(object)))
- return;
-
- Map* map = object->map();
- ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(
- object, MarkingState::External(object));
- StaticYoungGenerationMarkingVisitor::IterateBody(map, object);
-
- collector_->EmptyMarkingDeque();
+void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
+ for (Page* p : sweep_to_iterate_pages_) {
+ if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
+ p->ClearFlag(Page::SWEEP_TO_ITERATE);
+ marking_state(p).ClearLiveness();
+ }
}
-
- MinorMarkCompactCollector* collector_;
-};
+ sweep_to_iterate_pages_.clear();
+}
// Visitor class for marking heap roots.
-class MarkCompactCollector::RootMarkingVisitor : public ObjectVisitor {
+// TODO(ulan): Remove ObjectVisitor base class after fixing marking of
+// the string table and the top optimized code.
+class MarkCompactCollector::RootMarkingVisitor : public ObjectVisitor,
+ public RootVisitor {
public:
explicit RootMarkingVisitor(Heap* heap)
: collector_(heap->mark_compact_collector()) {}
- void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
+ void VisitPointer(HeapObject* host, Object** p) override {
+ MarkObjectByPointer(p);
+ }
+
+ void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+ }
+
+ void VisitRootPointer(Root root, Object** p) override {
+ MarkObjectByPointer(p);
+ }
- void VisitPointers(Object** start, Object** end) override {
+ void VisitRootPointers(Root root, Object** start, Object** end) override {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
}
// Skip the weak next code link in a code object, which is visited in
// ProcessTopOptimizedFrame.
- void VisitNextCodeLink(Object** p) override {}
+ void VisitNextCodeLink(Code* host, Object** p) override {}
private:
void MarkObjectByPointer(Object** p) {
@@ -1473,59 +1470,40 @@ class MarkCompactCollector::RootMarkingVisitor : public ObjectVisitor {
HeapObject* object = HeapObject::cast(*p);
- if (ObjectMarking::IsBlackOrGrey<MarkBit::NON_ATOMIC>(
- object, MarkingState::Internal(object)))
- return;
-
- Map* map = object->map();
- // Mark the object.
- ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(
- object, MarkingState::Internal(object));
-
- // Mark the map pointer and body, and push them on the marking stack.
- collector_->MarkObject(map);
- MarkCompactMarkingVisitor::IterateBody(map, object);
-
- // Mark all the objects reachable from the map and body. May leave
- // overflowed objects in the heap.
- collector_->EmptyMarkingDeque();
+ if (ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(
+ object, MarkingState::Internal(object))) {
+ Map* map = object->map();
+ // Mark the map pointer and body, and push them on the marking stack.
+ collector_->MarkObject(map);
+ MarkCompactMarkingVisitor::IterateBody(map, object);
+ // Mark all the objects reachable from the map and body. May leave
+ // overflowed objects in the heap.
+ collector_->EmptyMarkingDeque();
+ }
}
MarkCompactCollector* collector_;
};
-
-// Helper class for pruning the string table.
-template <bool finalize_external_strings, bool record_slots>
-class StringTableCleaner : public ObjectVisitor {
+class InternalizedStringTableCleaner : public ObjectVisitor {
public:
- StringTableCleaner(Heap* heap, HeapObject* table)
- : heap_(heap), pointers_removed_(0), table_(table) {
- DCHECK(!record_slots || table != nullptr);
- }
+ InternalizedStringTableCleaner(Heap* heap, HeapObject* table)
+ : heap_(heap), pointers_removed_(0), table_(table) {}
- void VisitPointers(Object** start, Object** end) override {
+ void VisitPointers(HeapObject* host, Object** start, Object** end) override {
// Visit all HeapObject pointers in [start, end).
MarkCompactCollector* collector = heap_->mark_compact_collector();
+ Object* the_hole = heap_->the_hole_value();
for (Object** p = start; p < end; p++) {
Object* o = *p;
if (o->IsHeapObject()) {
HeapObject* heap_object = HeapObject::cast(o);
if (ObjectMarking::IsWhite(heap_object,
MarkingState::Internal(heap_object))) {
- if (finalize_external_strings) {
- if (o->IsExternalString()) {
- heap_->FinalizeExternalString(String::cast(*p));
- } else {
- // The original external string may have been internalized.
- DCHECK(o->IsThinString());
- }
- } else {
- pointers_removed_++;
- }
+ pointers_removed_++;
// Set the entry to the_hole_value (as deleted).
- *p = heap_->the_hole_value();
- } else if (record_slots) {
+ *p = the_hole;
+ } else {
// StringTable contains only old space strings.
DCHECK(!heap_->InNewSpace(o));
collector->RecordSlot(table_, p, o);
@@ -1535,7 +1513,6 @@ class StringTableCleaner : public ObjectVisitor {
}
int PointersRemoved() {
- DCHECK(!finalize_external_strings);
return pointers_removed_;
}
@@ -1545,8 +1522,95 @@ class StringTableCleaner : public ObjectVisitor {
HeapObject* table_;
};
-typedef StringTableCleaner<false, true> InternalizedStringTableCleaner;
-typedef StringTableCleaner<true, false> ExternalStringTableCleaner;
+class ExternalStringTableCleaner : public RootVisitor {
+ public:
+ explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}
+
+ void VisitRootPointers(Root root, Object** start, Object** end) override {
+ // Visit all HeapObject pointers in [start, end).
+ Object* the_hole = heap_->the_hole_value();
+ for (Object** p = start; p < end; p++) {
+ Object* o = *p;
+ if (o->IsHeapObject()) {
+ HeapObject* heap_object = HeapObject::cast(o);
+ if (ObjectMarking::IsWhite(heap_object,
+ MarkingState::Internal(heap_object))) {
+ if (o->IsExternalString()) {
+ heap_->FinalizeExternalString(String::cast(*p));
+ } else {
+ // The original external string may have been internalized.
+ DCHECK(o->IsThinString());
+ }
+ // Set the entry to the_hole_value (as deleted).
+ *p = the_hole;
+ }
+ }
+ }
+ }
+
+ private:
+ Heap* heap_;
+};
+
+// Helper class for pruning the string table.
+class YoungGenerationExternalStringTableCleaner : public RootVisitor {
+ public:
+ YoungGenerationExternalStringTableCleaner(
+ const MinorMarkCompactCollector& collector)
+ : heap_(collector.heap()), collector_(collector) {}
+
+ void VisitRootPointers(Root root, Object** start, Object** end) override {
+ DCHECK_EQ(static_cast<int>(root),
+ static_cast<int>(Root::kExternalStringsTable));
+ // Visit all HeapObject pointers in [start, end).
+ for (Object** p = start; p < end; p++) {
+ Object* o = *p;
+ if (o->IsHeapObject()) {
+ HeapObject* heap_object = HeapObject::cast(o);
+ if (ObjectMarking::IsWhite(heap_object,
+ collector_.marking_state(heap_object))) {
+ if (o->IsExternalString()) {
+ heap_->FinalizeExternalString(String::cast(*p));
+ } else {
+ // The original external string may have been internalized.
+ DCHECK(o->IsThinString());
+ }
+ // Set the entry to the_hole_value (as deleted).
+ *p = heap_->the_hole_value();
+ }
+ }
+ }
+ }
+
+ private:
+ Heap* heap_;
+ const MinorMarkCompactCollector& collector_;
+};
+
+// Marked young generation objects and all old generation objects will be
+// retained.
+class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
+ public:
+ explicit MinorMarkCompactWeakObjectRetainer(
+ const MinorMarkCompactCollector& collector)
+ : collector_(collector) {}
+
+ virtual Object* RetainAs(Object* object) {
+ HeapObject* heap_object = HeapObject::cast(object);
+ if (!collector_.heap()->InNewSpace(heap_object)) return object;
+
+ DCHECK(!ObjectMarking::IsGrey(heap_object,
+ collector_.marking_state(heap_object)));
+ if (ObjectMarking::IsBlack(heap_object,
+ collector_.marking_state(heap_object))) {
+ return object;
+ }
+ return nullptr;
+ }
+
+ private:
+ const MinorMarkCompactCollector& collector_;
+};
// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
// are retained.
@@ -1586,8 +1650,7 @@ void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
Map* filler_map = heap()->one_pointer_filler_map();
for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
if ((object->map() != filler_map) &&
- ObjectMarking::IsGrey(object, MarkingState::Internal(object))) {
- ObjectMarking::GreyToBlack(object, MarkingState::Internal(object));
+ ObjectMarking::GreyToBlack(object, MarkingState::Internal(object))) {
PushBlack(object);
if (marking_deque()->IsFull()) return;
}
@@ -1599,30 +1662,34 @@ void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
LiveObjectIterator<kGreyObjects> it(p, MarkingState::Internal(p));
HeapObject* object = NULL;
while ((object = it.Next()) != NULL) {
- DCHECK(ObjectMarking::IsGrey(object, MarkingState::Internal(object)));
- ObjectMarking::GreyToBlack(object, MarkingState::Internal(object));
+ bool success =
+ ObjectMarking::GreyToBlack(object, MarkingState::Internal(object));
+ DCHECK(success);
+ USE(success);
PushBlack(object);
if (marking_deque()->IsFull()) return;
}
}
-class RecordMigratedSlotVisitor final : public ObjectVisitor {
+class RecordMigratedSlotVisitor : public ObjectVisitor {
public:
explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
: collector_(collector) {}
- inline void VisitPointer(Object** p) final {
- RecordMigratedSlot(*p, reinterpret_cast<Address>(p));
+ inline void VisitPointer(HeapObject* host, Object** p) final {
+ RecordMigratedSlot(host, *p, reinterpret_cast<Address>(p));
}
- inline void VisitPointers(Object** start, Object** end) final {
+ inline void VisitPointers(HeapObject* host, Object** start,
+ Object** end) final {
while (start < end) {
- RecordMigratedSlot(*start, reinterpret_cast<Address>(start));
+ RecordMigratedSlot(host, *start, reinterpret_cast<Address>(start));
++start;
}
}
- inline void VisitCodeEntry(Address code_entry_slot) final {
+ inline void VisitCodeEntry(JSFunction* host,
+ Address code_entry_slot) override {
Address code_entry = Memory::Address_at(code_entry_slot);
if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot),
@@ -1631,47 +1698,47 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor {
}
}
- inline void VisitCodeTarget(RelocInfo* rinfo) final {
+ inline void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
+ DCHECK_EQ(host, rinfo->host());
DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- Code* host = rinfo->host();
// The target is always in old space, we don't have to record the slot in
// the old-to-new remembered set.
DCHECK(!collector_->heap()->InNewSpace(target));
collector_->RecordRelocSlot(host, rinfo, target);
}
- inline void VisitDebugTarget(RelocInfo* rinfo) final {
+ inline void VisitDebugTarget(Code* host, RelocInfo* rinfo) override {
+ DCHECK_EQ(host, rinfo->host());
DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
rinfo->IsPatchedDebugBreakSlotSequence());
Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
- Code* host = rinfo->host();
// The target is always in old space, we don't have to record the slot in
// the old-to-new remembered set.
DCHECK(!collector_->heap()->InNewSpace(target));
collector_->RecordRelocSlot(host, rinfo, target);
}
- inline void VisitEmbeddedPointer(RelocInfo* rinfo) final {
+ inline void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
+ DCHECK_EQ(host, rinfo->host());
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
HeapObject* object = HeapObject::cast(rinfo->target_object());
- Code* host = rinfo->host();
collector_->heap()->RecordWriteIntoCode(host, rinfo, object);
collector_->RecordRelocSlot(host, rinfo, object);
}
- inline void VisitCell(RelocInfo* rinfo) final {
+ inline void VisitCellPointer(Code* host, RelocInfo* rinfo) override {
+ DCHECK_EQ(host, rinfo->host());
DCHECK(rinfo->rmode() == RelocInfo::CELL);
Cell* cell = rinfo->target_cell();
- Code* host = rinfo->host();
// The cell is always in old space, we don't have to record the slot in
// the old-to-new remembered set.
DCHECK(!collector_->heap()->InNewSpace(cell));
collector_->RecordRelocSlot(host, rinfo, cell);
}
- // Entries that will never move.
- inline void VisitCodeAgeSequence(RelocInfo* rinfo) final {
+ inline void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) override {
+ DCHECK_EQ(host, rinfo->host());
DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
Code* stub = rinfo->code_age_stub();
USE(stub);
@@ -1679,13 +1746,14 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor {
}
// Entries that are skipped for recording.
- inline void VisitExternalReference(RelocInfo* rinfo) final {}
- inline void VisitExternalReference(Address* p) final {}
- inline void VisitRuntimeEntry(RelocInfo* rinfo) final {}
- inline void VisitInternalReference(RelocInfo* rinfo) final {}
+ inline void VisitExternalReference(Code* host, RelocInfo* rinfo) final {}
+ inline void VisitExternalReference(Foreign* host, Address* p) final {}
+ inline void VisitRuntimeEntry(Code* host, RelocInfo* rinfo) final {}
+ inline void VisitInternalReference(Code* host, RelocInfo* rinfo) final {}
- private:
- inline void RecordMigratedSlot(Object* value, Address slot) {
+ protected:
+ inline virtual void RecordMigratedSlot(HeapObject* host, Object* value,
+ Address slot) {
if (value->IsHeapObject()) {
Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
if (p->InNewSpace()) {
@@ -1699,6 +1767,102 @@ class RecordMigratedSlotVisitor final : public ObjectVisitor {
MarkCompactCollector* collector_;
};
+class MigrationObserver {
+ public:
+ explicit MigrationObserver(Heap* heap) : heap_(heap) {}
+
+ virtual ~MigrationObserver() {}
+ virtual void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
+ int size) = 0;
+
+ protected:
+ Heap* heap_;
+};
+
+class ProfilingMigrationObserver final : public MigrationObserver {
+ public:
+ explicit ProfilingMigrationObserver(Heap* heap) : MigrationObserver(heap) {}
+
+ inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
+ int size) final {
+ if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) {
+ PROFILE(heap_->isolate(),
+ CodeMoveEvent(AbstractCode::cast(src), dst->address()));
+ }
+ heap_->OnMoveEvent(dst, src, size);
+ }
+};
+
+class YoungGenerationMigrationObserver final : public MigrationObserver {
+ public:
+ YoungGenerationMigrationObserver(Heap* heap,
+ MarkCompactCollector* mark_compact_collector)
+ : MigrationObserver(heap),
+ mark_compact_collector_(mark_compact_collector) {}
+
+ inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
+ int size) final {
+ // Migrate color to old generation marking in case the object survived young
+ // generation garbage collection.
+ if (heap_->incremental_marking()->IsMarking()) {
+ DCHECK(ObjectMarking::IsWhite(
+ dst, mark_compact_collector_->marking_state(dst)));
+ heap_->incremental_marking()->TransferColor<MarkBit::ATOMIC>(src, dst);
+ }
+ }
+
+ protected:
+ base::Mutex mutex_;
+ MarkCompactCollector* mark_compact_collector_;
+};
+
+class YoungGenerationRecordMigratedSlotVisitor final
+ : public RecordMigratedSlotVisitor {
+ public:
+ explicit YoungGenerationRecordMigratedSlotVisitor(
+ MarkCompactCollector* collector)
+ : RecordMigratedSlotVisitor(collector) {}
+
+ inline void VisitCodeEntry(JSFunction* host, Address code_entry_slot) final {
+ Address code_entry = Memory::Address_at(code_entry_slot);
+ if (Page::FromAddress(code_entry)->IsEvacuationCandidate() &&
+ IsLive(host)) {
+ RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot),
+ nullptr, CODE_ENTRY_SLOT,
+ code_entry_slot);
+ }
+ }
+
+ void VisitCodeTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); }
+ void VisitDebugTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); }
+ void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final {
+ UNREACHABLE();
+ }
+ void VisitCellPointer(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); }
+ void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) final {
+ UNREACHABLE();
+ }
+
+ private:
+ // Only record slots for host objects that are considered as live by the full
+ // collector.
+ inline bool IsLive(HeapObject* object) {
+ return ObjectMarking::IsBlack(object, collector_->marking_state(object));
+ }
+
+ inline void RecordMigratedSlot(HeapObject* host, Object* value,
+ Address slot) final {
+ if (value->IsHeapObject()) {
+ Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
+ if (p->InNewSpace()) {
+ RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
+ } else if (p->IsEvacuationCandidate() && IsLive(host)) {
+ RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot);
+ }
+ }
+ }
+};
+
class HeapObjectVisitor {
public:
virtual ~HeapObjectVisitor() {}
@@ -1706,18 +1870,61 @@ class HeapObjectVisitor {
};
class EvacuateVisitorBase : public HeapObjectVisitor {
+ public:
+ void AddObserver(MigrationObserver* observer) {
+ migration_function_ = RawMigrateObject<MigrationMode::kObserved>;
+ observers_.push_back(observer);
+ }
+
protected:
- enum MigrationMode { kFast, kProfiled };
+ enum MigrationMode { kFast, kObserved };
+
+ typedef void (*MigrateFunction)(EvacuateVisitorBase* base, HeapObject* dst,
+ HeapObject* src, int size,
+ AllocationSpace dest);
+
+ template <MigrationMode mode>
+ static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject* dst,
+ HeapObject* src, int size,
+ AllocationSpace dest) {
+ Address dst_addr = dst->address();
+ Address src_addr = src->address();
+ DCHECK(base->heap_->AllowedToBeMigrated(src, dest));
+ DCHECK(dest != LO_SPACE);
+ if (dest == OLD_SPACE) {
+ DCHECK_OBJECT_SIZE(size);
+ DCHECK(IsAligned(size, kPointerSize));
+ base->heap_->CopyBlock(dst_addr, src_addr, size);
+ if (mode != MigrationMode::kFast)
+ base->ExecuteMigrationObservers(dest, src, dst, size);
+ dst->IterateBodyFast(dst->map()->instance_type(), size,
+ base->record_visitor_);
+ } else if (dest == CODE_SPACE) {
+ DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
+ base->heap_->CopyBlock(dst_addr, src_addr, size);
+ Code::cast(dst)->Relocate(dst_addr - src_addr);
+ if (mode != MigrationMode::kFast)
+ base->ExecuteMigrationObservers(dest, src, dst, size);
+ dst->IterateBodyFast(dst->map()->instance_type(), size,
+ base->record_visitor_);
+ } else {
+ DCHECK_OBJECT_SIZE(size);
+ DCHECK(dest == NEW_SPACE);
+ base->heap_->CopyBlock(dst_addr, src_addr, size);
+ if (mode != MigrationMode::kFast)
+ base->ExecuteMigrationObservers(dest, src, dst, size);
+ }
+ base::NoBarrier_Store(reinterpret_cast<base::AtomicWord*>(src_addr),
+ reinterpret_cast<base::AtomicWord>(dst_addr));
+ }
EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces,
RecordMigratedSlotVisitor* record_visitor)
: heap_(heap),
compaction_spaces_(compaction_spaces),
- record_visitor_(record_visitor),
- profiling_(
- heap->isolate()->is_profiling() ||
- heap->isolate()->logger()->is_logging_code_events() ||
- heap->isolate()->heap_profiler()->is_tracking_object_moves()) {}
+ record_visitor_(record_visitor) {
+ migration_function_ = RawMigrateObject<MigrationMode::kFast>;
+ }
inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
HeapObject** target_object) {
@@ -1734,51 +1941,16 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
return false;
}
- inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
- AllocationSpace dest) {
- if (profiling_) {
- MigrateObject<kProfiled>(dst, src, size, dest);
- } else {
- MigrateObject<kFast>(dst, src, size, dest);
+ inline void ExecuteMigrationObservers(AllocationSpace dest, HeapObject* src,
+ HeapObject* dst, int size) {
+ for (MigrationObserver* obs : observers_) {
+ obs->Move(dest, src, dst, size);
}
}
- template <MigrationMode mode>
inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
AllocationSpace dest) {
- Address dst_addr = dst->address();
- Address src_addr = src->address();
- DCHECK(heap_->AllowedToBeMigrated(src, dest));
- DCHECK(dest != LO_SPACE);
- if (dest == OLD_SPACE) {
- DCHECK_OBJECT_SIZE(size);
- DCHECK(IsAligned(size, kPointerSize));
- heap_->CopyBlock(dst_addr, src_addr, size);
- if ((mode == kProfiled) && dst->IsBytecodeArray()) {
- PROFILE(heap_->isolate(),
- CodeMoveEvent(AbstractCode::cast(src), dst_addr));
- }
- dst->IterateBodyFast(dst->map()->instance_type(), size, record_visitor_);
- } else if (dest == CODE_SPACE) {
- DCHECK_CODEOBJECT_SIZE(size, heap_->code_space());
- if (mode == kProfiled) {
- PROFILE(heap_->isolate(),
- CodeMoveEvent(AbstractCode::cast(src), dst_addr));
- }
- heap_->CopyBlock(dst_addr, src_addr, size);
- Code::cast(dst)->Relocate(dst_addr - src_addr);
- RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
- dst->IterateBodyFast(dst->map()->instance_type(), size, record_visitor_);
- } else {
- DCHECK_OBJECT_SIZE(size);
- DCHECK(dest == NEW_SPACE);
- heap_->CopyBlock(dst_addr, src_addr, size);
- }
- if (mode == kProfiled) {
- heap_->OnMoveEvent(dst, src, size);
- }
- base::NoBarrier_Store(reinterpret_cast<base::AtomicWord*>(src_addr),
- reinterpret_cast<base::AtomicWord>(dst_addr));
+ migration_function_(this, dst, src, size, dest);
}
#ifdef VERIFY_HEAP
@@ -1804,7 +1976,8 @@ class EvacuateVisitorBase : public HeapObjectVisitor {
Heap* heap_;
CompactionSpaceCollection* compaction_spaces_;
RecordMigratedSlotVisitor* record_visitor_;
- bool profiling_;
+ std::vector<MigrationObserver*> observers_;
+ MigrateFunction migration_function_;
};
class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
@@ -2055,15 +2228,12 @@ bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
StringTable* string_table = heap()->string_table();
// Mark the string table itself.
- if (ObjectMarking::IsWhite(string_table,
- MarkingState::Internal(string_table))) {
- // String table could have already been marked by visiting the handles list.
- ObjectMarking::WhiteToBlack(string_table,
- MarkingState::Internal(string_table));
- }
- // Explicitly mark the prefix.
- string_table->IteratePrefix(visitor);
- ProcessMarkingDeque();
+ if (ObjectMarking::WhiteToBlack(string_table,
+ MarkingState::Internal(string_table))) {
+ // Explicitly mark the prefix.
+ string_table->IteratePrefix(visitor);
+ ProcessMarkingDeque();
+ }
}
void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
@@ -2143,7 +2313,7 @@ void MarkCompactCollector::ProcessMarkingDeque() {
// Mark all objects reachable (transitively) from objects on the marking
// stack including references only considered in the atomic marking pause.
void MarkCompactCollector::ProcessEphemeralMarking(
- ObjectVisitor* visitor, bool only_process_harmony_weak_collections) {
+ bool only_process_harmony_weak_collections) {
DCHECK(marking_deque()->IsEmpty() && !marking_deque()->overflowed());
bool work_to_do = true;
while (work_to_do) {
@@ -2171,7 +2341,8 @@ void MarkCompactCollector::ProcessEphemeralMarking(
CHECK_EQ(0, heap()->local_embedder_heap_tracer()->NumberOfWrappersToTrace());
}
-void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
+void MarkCompactCollector::ProcessTopOptimizedFrame(
+ RootMarkingVisitor* visitor) {
for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
!it.done(); it.Advance()) {
if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
@@ -2188,90 +2359,6 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
}
}
-void MarkingDeque::SetUp() {
- backing_store_ = new base::VirtualMemory(kMaxSize);
- backing_store_committed_size_ = 0;
- if (backing_store_ == nullptr) {
- V8::FatalProcessOutOfMemory("MarkingDeque::SetUp");
- }
-}
-
-void MarkingDeque::TearDown() {
- delete backing_store_;
-}
-
-void MarkingDeque::StartUsing() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- if (in_use_) {
- // This can happen in mark-compact GC if the incremental marker already
- // started using the marking deque.
- return;
- }
- in_use_ = true;
- EnsureCommitted();
- array_ = reinterpret_cast<HeapObject**>(backing_store_->address());
- size_t size = FLAG_force_marking_deque_overflows
- ? 64 * kPointerSize
- : backing_store_committed_size_;
- DCHECK(
- base::bits::IsPowerOfTwo32(static_cast<uint32_t>(size / kPointerSize)));
- mask_ = static_cast<int>((size / kPointerSize) - 1);
- top_ = bottom_ = 0;
- overflowed_ = false;
-}
-
-void MarkingDeque::StopUsing() {
- base::LockGuard<base::Mutex> guard(&mutex_);
- if (!in_use_) return;
- DCHECK(IsEmpty());
- DCHECK(!overflowed_);
- top_ = bottom_ = mask_ = 0;
- in_use_ = false;
- if (FLAG_concurrent_sweeping) {
- StartUncommitTask();
- } else {
- Uncommit();
- }
-}
-
-void MarkingDeque::Clear() {
- DCHECK(in_use_);
- top_ = bottom_ = 0;
- overflowed_ = false;
-}
-
-void MarkingDeque::Uncommit() {
- DCHECK(!in_use_);
- bool success = backing_store_->Uncommit(backing_store_->address(),
- backing_store_committed_size_);
- backing_store_committed_size_ = 0;
- CHECK(success);
-}
-
-void MarkingDeque::EnsureCommitted() {
- DCHECK(in_use_);
- if (backing_store_committed_size_ > 0) return;
-
- for (size_t size = kMaxSize; size >= kMinSize; size /= 2) {
- if (backing_store_->Commit(backing_store_->address(), size, false)) {
- backing_store_committed_size_ = size;
- break;
- }
- }
- if (backing_store_committed_size_ == 0) {
- V8::FatalProcessOutOfMemory("MarkingDeque::EnsureCommitted");
- }
-}
-
-void MarkingDeque::StartUncommitTask() {
- if (!uncommit_task_pending_) {
- uncommit_task_pending_ = true;
- UncommitTask* task = new UncommitTask(heap_->isolate(), this);
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
- }
-}
-
class ObjectStatsVisitor : public HeapObjectVisitor {
public:
ObjectStatsVisitor(Heap* heap, ObjectStats* live_stats,
@@ -2335,6 +2422,340 @@ void MarkCompactCollector::RecordObjectStats() {
}
}
+class YoungGenerationMarkingVisitor final
+ : public HeapVisitor<void, YoungGenerationMarkingVisitor> {
+ public:
+ using BaseClass = HeapVisitor<int, YoungGenerationMarkingVisitor>;
+
+ YoungGenerationMarkingVisitor(Heap* heap,
+ WorkStealingMarkingDeque* global_marking_deque,
+ int task_id)
+ : heap_(heap), marking_deque_(global_marking_deque, task_id) {}
+
+ void VisitPointers(HeapObject* host, Object** start, Object** end) final {
+ const int kMinRangeForMarkingRecursion = 64;
+ if (end - start >= kMinRangeForMarkingRecursion) {
+ if (MarkRecursively(host, start, end)) return;
+ }
+ for (Object** p = start; p < end; p++) {
+ VisitPointer(host, p);
+ }
+ }
+
+ void VisitPointer(HeapObject* host, Object** slot) final {
+ Object* target = *slot;
+ if (heap_->InNewSpace(target)) {
+ HeapObject* target_object = HeapObject::cast(target);
+ MarkObjectViaMarkingDeque(target_object);
+ }
+ }
+
+ // Special cases for young generation. Also see StaticNewSpaceVisitor.
+
+ void VisitJSFunction(Map* map, JSFunction* object) final {
+ if (!ShouldVisit(object)) return;
+ int size = JSFunction::BodyDescriptorWeakCode::SizeOf(map, object);
+ VisitMapPointer(object, object->map_slot());
+ JSFunction::BodyDescriptorWeakCode::IterateBody(object, size, this);
+ return;
+ }
+
+ void VisitNativeContext(Map* map, Context* object) final {
+ if (!ShouldVisit(object)) return;
+ int size = Context::ScavengeBodyDescriptor::SizeOf(map, object);
+ VisitMapPointer(object, object->map_slot());
+ Context::ScavengeBodyDescriptor::IterateBody(object, size, this);
+ return;
+ }
+
+ void VisitJSApiObject(Map* map, JSObject* object) final {
+ return VisitJSObject(map, object);
+ }
+
+ void VisitBytecodeArray(Map* map, BytecodeArray* object) final {
+ UNREACHABLE();
+ return;
+ }
+
+ void VisitSharedFunctionInfo(Map* map, SharedFunctionInfo* object) final {
+ UNREACHABLE();
+ return;
+ }
+
+ private:
+ inline MarkingState marking_state(HeapObject* object) {
+ SLOW_DCHECK(
+ MarkingState::External(object).bitmap() ==
+ heap_->minor_mark_compact_collector()->marking_state(object).bitmap());
+ return MarkingState::External(object);
+ }
+
+ inline void MarkObjectViaMarkingDeque(HeapObject* object) {
+ if (ObjectMarking::WhiteToBlack<MarkBit::ATOMIC>(object,
+ marking_state(object))) {
+ // Marking deque overflow is unsupported for the young generation.
+ CHECK(marking_deque_.Push(object));
+ }
+ }
+
+ inline bool MarkRecursively(HeapObject* host, Object** start, Object** end) {
+ // TODO(mlippautz): Stack check on background tasks. We cannot do a reliable
+ // stack check on background tasks yet.
+ for (Object** p = start; p < end; p++) {
+ Object* target = *p;
+ if (heap_->InNewSpace(target)) {
+ HeapObject* target_object = HeapObject::cast(target);
+ if (ObjectMarking::WhiteToBlack<MarkBit::ATOMIC>(
+ target_object, marking_state(target_object))) {
+ Visit(target_object);
+ }
+ }
+ }
+ return true;
+ }
+
+ Heap* heap_;
+ LocalWorkStealingMarkingDeque marking_deque_;
+};
+
+class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
+ public:
+ explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
+ : collector_(collector) {}
+
+ void VisitRootPointer(Root root, Object** p) override {
+ MarkObjectByPointer(p);
+ }
+
+ void VisitRootPointers(Root root, Object** start, Object** end) override {
+ for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+ }
+
+ private:
+ inline MarkingState marking_state(HeapObject* object) {
+ SLOW_DCHECK(MarkingState::External(object).bitmap() ==
+ collector_->marking_state(object).bitmap());
+ return MarkingState::External(object);
+ }
+
+ void MarkObjectByPointer(Object** p) {
+ if (!(*p)->IsHeapObject()) return;
+
+ HeapObject* object = HeapObject::cast(*p);
+
+ if (!collector_->heap()->InNewSpace(object)) return;
+
+ if (ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(
+ object, marking_state(object))) {
+ collector_->marking_visitor(kMainMarker)->Visit(object);
+ collector_->EmptyMarkingDeque();
+ }
+ }
+
+ MinorMarkCompactCollector* collector_;
+};
+
+class MarkingItem;
+class PageMarkingItem;
+class RootMarkingItem;
+class YoungGenerationMarkingTask;
+
+class MarkingItem : public ItemParallelJob::Item {
+ public:
+ virtual ~MarkingItem() {}
+ virtual void Process(YoungGenerationMarkingTask* task) = 0;
+};
+
+class YoungGenerationMarkingTask : public ItemParallelJob::Task {
+ public:
+ YoungGenerationMarkingTask(Isolate* isolate,
+ MinorMarkCompactCollector* collector,
+ WorkStealingMarkingDeque* marking_deque,
+ YoungGenerationMarkingVisitor* visitor,
+ int task_id)
+ : ItemParallelJob::Task(isolate),
+ collector_(collector),
+ marking_deque_(marking_deque, task_id),
+ visitor_(visitor) {}
+
+ void RunInParallel() override {
+ double marking_time = 0.0;
+ {
+ TimedScope scope(&marking_time);
+ MarkingItem* item = nullptr;
+ while ((item = GetItem<MarkingItem>()) != nullptr) {
+ item->Process(this);
+ item->MarkFinished();
+ EmptyLocalMarkingDeque();
+ }
+ EmptyMarkingDeque();
+ DCHECK(marking_deque_.IsEmpty());
+ }
+ if (FLAG_trace_minor_mc_parallel_marking) {
+ PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
+ static_cast<void*>(this), marking_time);
+ }
+ };
+
+ void MarkObject(Object* object) {
+ if (!collector_->heap()->InNewSpace(object)) return;
+ HeapObject* heap_object = HeapObject::cast(object);
+ if (ObjectMarking::WhiteToBlack<MarkBit::ATOMIC>(
+ heap_object, collector_->marking_state(heap_object))) {
+ visitor_->Visit(heap_object);
+ }
+ }
+
+ private:
+ void EmptyLocalMarkingDeque() {
+ HeapObject* object = nullptr;
+ while (marking_deque_.Pop(&object)) {
+ visitor_->Visit(object);
+ }
+ }
+
+ void EmptyMarkingDeque() {
+ HeapObject* object = nullptr;
+ while (marking_deque_.WaitForMoreObjects()) {
+ while (marking_deque_.Pop(&object)) {
+ visitor_->Visit(object);
+ }
+ }
+ }
+
+ MinorMarkCompactCollector* collector_;
+ LocalWorkStealingMarkingDeque marking_deque_;
+ YoungGenerationMarkingVisitor* visitor_;
+};
+
+class BatchedRootMarkingItem : public MarkingItem {
+ public:
+ explicit BatchedRootMarkingItem(std::vector<Object*>&& objects)
+ : objects_(objects) {}
+ virtual ~BatchedRootMarkingItem() {}
+
+ void Process(YoungGenerationMarkingTask* task) override {
+ for (Object* object : objects_) {
+ task->MarkObject(object);
+ }
+ }
+
+ private:
+ std::vector<Object*> objects_;
+};
+
+class PageMarkingItem : public MarkingItem {
+ public:
+ explicit PageMarkingItem(MemoryChunk* chunk) : chunk_(chunk) {}
+ virtual ~PageMarkingItem() {}
+
+ void Process(YoungGenerationMarkingTask* task) override {
+ base::LockGuard<base::RecursiveMutex> guard(chunk_->mutex());
+ MarkUntypedPointers(task);
+ MarkTypedPointers(task);
+ }
+
+ private:
+ inline Heap* heap() { return chunk_->heap(); }
+
+ void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
+ RememberedSet<OLD_TO_NEW>::Iterate(chunk_, [this, task](Address slot) {
+ return CheckAndMarkObject(task, slot);
+ });
+ }
+
+ void MarkTypedPointers(YoungGenerationMarkingTask* task) {
+ Isolate* isolate = heap()->isolate();
+ RememberedSet<OLD_TO_NEW>::IterateTyped(
+ chunk_, [this, isolate, task](SlotType slot_type, Address host_addr,
+ Address slot) {
+ return UpdateTypedSlotHelper::UpdateTypedSlot(
+ isolate, slot_type, slot, [this, task](Object** slot) {
+ return CheckAndMarkObject(task,
+ reinterpret_cast<Address>(slot));
+ });
+ });
+ }
+
+ SlotCallbackResult CheckAndMarkObject(YoungGenerationMarkingTask* task,
+ Address slot_address) {
+ Object* object = *reinterpret_cast<Object**>(slot_address);
+ if (heap()->InNewSpace(object)) {
+ // Marking happens before flipping the young generation, so the object
+ // has to be in ToSpace.
+ DCHECK(heap()->InToSpace(object));
+ HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+ task->MarkObject(heap_object);
+ return KEEP_SLOT;
+ }
+ return REMOVE_SLOT;
+ }
+
+ MemoryChunk* chunk_;
+};
+
+// This root visitor walks all roots and creates items bundling objects that
+// are then processed later on. Slots have to be dereferenced as they could
+// live on the native (C++) stack, which requires filtering out the indirection.
+class MinorMarkCompactCollector::RootMarkingVisitorSeedOnly
+ : public RootVisitor {
+ public:
+ explicit RootMarkingVisitorSeedOnly(ItemParallelJob* job) : job_(job) {
+ buffered_objects_.reserve(kBufferSize);
+ }
+
+ void VisitRootPointer(Root root, Object** p) override {
+ if (!(*p)->IsHeapObject()) return;
+ AddObject(*p);
+ }
+
+ void VisitRootPointers(Root root, Object** start, Object** end) override {
+ for (Object** p = start; p < end; p++) {
+ if (!(*p)->IsHeapObject()) continue;
+ AddObject(*p);
+ }
+ }
+
+ void FlushObjects() {
+ job_->AddItem(new BatchedRootMarkingItem(std::move(buffered_objects_)));
+ // Moving leaves the container in a valid but unspecified state. Reusing the
+ // container requires a call without precondition that resets the state.
+ buffered_objects_.clear();
+ buffered_objects_.reserve(kBufferSize);
+ }
+
+ private:
+ // Bundling several objects together in items avoids issues with allocating
+ // and deallocating items; both are operations that are performed on the main
+ // thread.
+ static const int kBufferSize = 32;
+
+ void AddObject(Object* object) {
+ buffered_objects_.push_back(object);
+ if (buffered_objects_.size() == kBufferSize) FlushObjects();
+ }
+
+ ItemParallelJob* job_;
+ std::vector<Object*> buffered_objects_;
+};
+
+MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
+ : MarkCompactCollectorBase(heap), page_parallel_job_semaphore_(0) {
+ marking_deque_ = new WorkStealingMarkingDeque();
+ for (int i = 0; i < kNumMarkers; i++) {
+ marking_visitor_[i] =
+ new YoungGenerationMarkingVisitor(heap, marking_deque_, i);
+ }
+}
+
+MinorMarkCompactCollector::~MinorMarkCompactCollector() {
+ for (int i = 0; i < kNumMarkers; i++) {
+ DCHECK_NOT_NULL(marking_visitor_[i]);
+ delete marking_visitor_[i];
+ }
+ delete marking_deque_;
+}
+
SlotCallbackResult MinorMarkCompactCollector::CheckAndMarkObject(
Heap* heap, Address slot_address) {
Object* object = *reinterpret_cast<Object**>(slot_address);
@@ -2344,22 +2765,48 @@ SlotCallbackResult MinorMarkCompactCollector::CheckAndMarkObject(
DCHECK(heap->InToSpace(object));
HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
const MarkingState state = MarkingState::External(heap_object);
- if (ObjectMarking::IsBlackOrGrey<MarkBit::NON_ATOMIC>(heap_object, state)) {
- return KEEP_SLOT;
+ if (ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(heap_object, state)) {
+ heap->minor_mark_compact_collector()
+ ->marking_visitor(kMainMarker)
+ ->Visit(heap_object);
}
- ObjectMarking::WhiteToBlack<MarkBit::NON_ATOMIC>(heap_object, state);
- StaticYoungGenerationMarkingVisitor::IterateBody(heap_object->map(),
- heap_object);
return KEEP_SLOT;
}
return REMOVE_SLOT;
}
-static bool IsUnmarkedObject(Heap* heap, Object** p) {
+static bool IsUnmarkedObjectForYoungGeneration(Heap* heap, Object** p) {
DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p));
return heap->InNewSpace(*p) &&
!ObjectMarking::IsBlack(HeapObject::cast(*p),
- MarkingState::Internal(HeapObject::cast(*p)));
+ MarkingState::External(HeapObject::cast(*p)));
+}
+
+void MinorMarkCompactCollector::MarkRootSetInParallel() {
+ // Seed the root set (roots + old->new set).
+ ItemParallelJob job(isolate()->cancelable_task_manager(),
+ &page_parallel_job_semaphore_);
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
+ RootMarkingVisitorSeedOnly root_seed_visitor(&job);
+ heap()->IterateRoots(&root_seed_visitor, VISIT_ALL_IN_SCAVENGE);
+ RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
+ heap(), [&job](MemoryChunk* chunk) {
+ job.AddItem(new PageMarkingItem(chunk));
+ });
+ root_seed_visitor.FlushObjects();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
+ const int num_tasks = NumberOfMarkingTasks();
+ for (int i = 0; i < num_tasks; i++) {
+ job.AddTask(new YoungGenerationMarkingTask(
+ isolate(), this, marking_deque(), marking_visitor(i), i));
+ }
+ job.Run();
+ }
}
void MinorMarkCompactCollector::MarkLiveObjects() {
@@ -2367,91 +2814,224 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
PostponeInterruptsScope postpone(isolate());
- StaticYoungGenerationMarkingVisitor::Initialize(heap());
RootMarkingVisitor root_visitor(this);
- marking_deque()->StartUsing();
-
- isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
- &Heap::IsUnmodifiedHeapObject);
-
- {
- TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
- heap()->IterateRoots(&root_visitor, VISIT_ALL_IN_SCAVENGE);
- ProcessMarkingDeque();
- }
-
{
TRACE_GC(heap()->tracer(),
- GCTracer::Scope::MINOR_MC_MARK_OLD_TO_NEW_POINTERS);
- RememberedSet<OLD_TO_NEW>::Iterate(
- heap(), NON_SYNCHRONIZED,
- [this](Address addr) { return CheckAndMarkObject(heap(), addr); });
- RememberedSet<OLD_TO_NEW>::IterateTyped(
- heap(), NON_SYNCHRONIZED,
- [this](SlotType type, Address host_addr, Address addr) {
- return UpdateTypedSlotHelper::UpdateTypedSlot(
- isolate(), type, addr, [this](Object** addr) {
- return CheckAndMarkObject(heap(),
- reinterpret_cast<Address>(addr));
- });
- });
- ProcessMarkingDeque();
+ GCTracer::Scope::MINOR_MC_MARK_IDENTIFY_GLOBAL_HANDLES);
+ isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
+ &Heap::IsUnmodifiedHeapObject);
}
+ MarkRootSetInParallel();
+
+ // Mark rest on the main thread.
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
- heap()->VisitEncounteredWeakCollections(&root_visitor);
+ heap()->IterateEncounteredWeakCollections(&root_visitor);
ProcessMarkingDeque();
}
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
- &IsUnmarkedObject);
- isolate()
- ->global_handles()
- ->IterateNewSpaceWeakUnmodifiedRoots<GlobalHandles::VISIT_OTHERS>(
- &root_visitor);
+ &IsUnmarkedObjectForYoungGeneration);
+ isolate()->global_handles()->IterateNewSpaceWeakUnmodifiedRoots(
+ &root_visitor);
ProcessMarkingDeque();
}
-
- marking_deque()->StopUsing();
}
void MinorMarkCompactCollector::ProcessMarkingDeque() {
EmptyMarkingDeque();
- DCHECK(!marking_deque()->overflowed());
- DCHECK(marking_deque()->IsEmpty());
}
void MinorMarkCompactCollector::EmptyMarkingDeque() {
- while (!marking_deque()->IsEmpty()) {
- HeapObject* object = marking_deque()->Pop();
-
+ LocalWorkStealingMarkingDeque local_marking_deque(marking_deque(),
+ kMainMarker);
+ HeapObject* object = nullptr;
+ while (local_marking_deque.Pop(&object)) {
DCHECK(!object->IsFiller());
DCHECK(object->IsHeapObject());
DCHECK(heap()->Contains(object));
-
DCHECK(!(ObjectMarking::IsWhite<MarkBit::NON_ATOMIC>(
- object, MarkingState::External(object))));
-
- Map* map = object->map();
+ object, marking_state(object))));
DCHECK((ObjectMarking::IsBlack<MarkBit::NON_ATOMIC>(
- object, MarkingState::External(object))));
- StaticYoungGenerationMarkingVisitor::IterateBody(map, object);
+ object, marking_state(object))));
+ marking_visitor(kMainMarker)->Visit(object);
}
+ DCHECK(local_marking_deque.IsEmpty());
}
void MinorMarkCompactCollector::CollectGarbage() {
- MarkLiveObjects();
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
+ heap()->mark_compact_collector()->sweeper().EnsureNewSpaceCompleted();
+ CleanupSweepToIteratePages();
+ }
+ MarkLiveObjects();
+ ClearNonLiveReferences();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
YoungGenerationMarkingVerifier verifier(heap());
verifier.Run();
}
#endif // VERIFY_HEAP
+
+ Evacuate();
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ YoungGenerationEvacuationVerifier verifier(heap());
+ verifier.Run();
+ }
+#endif // VERIFY_HEAP
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
+ heap()->incremental_marking()->UpdateMarkingDequeAfterScavenge();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
+ for (Page* p : PageRange(heap()->new_space()->FromSpaceStart(),
+ heap()->new_space()->FromSpaceEnd())) {
+ DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE));
+ marking_state(p).ClearLiveness();
+ }
+ }
+
+ heap()->account_external_memory_concurrently_freed();
+}
+
+void MinorMarkCompactCollector::MakeIterable(
+ Page* p, MarkingTreatmentMode marking_mode,
+ FreeSpaceTreatmentMode free_space_mode) {
+ // We have to clear the full collectors markbits for the areas that we
+ // remove here.
+ MarkCompactCollector* full_collector = heap()->mark_compact_collector();
+ Address free_start = p->area_start();
+ DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
+ LiveObjectIterator<kBlackObjects> it(p, marking_state(p));
+ HeapObject* object = nullptr;
+
+ while ((object = it.Next()) != nullptr) {
+ DCHECK(ObjectMarking::IsBlack(object, marking_state(object)));
+ Address free_end = object->address();
+ if (free_end != free_start) {
+ CHECK_GT(free_end, free_start);
+ size_t size = static_cast<size_t>(free_end - free_start);
+ if (free_space_mode == ZAP_FREE_SPACE) {
+ memset(free_start, 0xcc, size);
+ full_collector->marking_state(p).bitmap()->ClearRange(
+ p->AddressToMarkbitIndex(free_start),
+ p->AddressToMarkbitIndex(free_end));
+ }
+ p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
+ ClearRecordedSlots::kNo);
+ }
+ Map* map = object->synchronized_map();
+ int size = object->SizeFromMap(map);
+ free_start = free_end + size;
+ }
+
+ if (free_start != p->area_end()) {
+ CHECK_GT(p->area_end(), free_start);
+ size_t size = static_cast<size_t>(p->area_end() - free_start);
+ if (free_space_mode == ZAP_FREE_SPACE) {
+ memset(free_start, 0xcc, size);
+ full_collector->marking_state(p).bitmap()->ClearRange(
+ p->AddressToMarkbitIndex(free_start),
+ p->AddressToMarkbitIndex(p->area_end()));
+ }
+ p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
+ ClearRecordedSlots::kNo);
+ }
+
+ if (marking_mode == MarkingTreatmentMode::CLEAR) {
+ marking_state(p).ClearLiveness();
+ p->ClearFlag(Page::SWEEP_TO_ITERATE);
+ }
+}
+
+void MinorMarkCompactCollector::ClearNonLiveReferences() {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR);
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE);
+ // Internalized strings are always stored in old space, so there is no need
+ // to clean them here.
+ YoungGenerationExternalStringTableCleaner external_visitor(*this);
+ heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor);
+ heap()->external_string_table_.CleanUpNewSpaceStrings();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_WEAK_LISTS);
+ // Process the weak references.
+ MinorMarkCompactWeakObjectRetainer retainer(*this);
+ heap()->ProcessYoungWeakReferences(&retainer);
+ }
+}
+
+void MinorMarkCompactCollector::EvacuatePrologue() {
+ NewSpace* new_space = heap()->new_space();
+ // Append the list of new space pages to be processed.
+ for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
+ new_space_evacuation_pages_.Add(p);
+ }
+ new_space->Flip();
+ new_space->ResetAllocationInfo();
+}
+
+void MinorMarkCompactCollector::EvacuateEpilogue() {
+ heap()->new_space()->set_age_mark(heap()->new_space()->top());
+}
+
+void MinorMarkCompactCollector::Evacuate() {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
+ base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
+ EvacuatePrologue();
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_COPY);
+ EvacuatePagesInParallel();
+ }
+
+ UpdatePointersAfterEvacuation();
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
+ if (!heap()->new_space()->Rebalance()) {
+ FatalProcessOutOfMemory("NewSpace::Rebalance");
+ }
+ }
+
+ // Give pages that are queued to be freed back to the OS.
+ heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
+ for (Page* p : new_space_evacuation_pages_) {
+ if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
+ p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
+ p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
+ p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ p->SetFlag(Page::SWEEP_TO_ITERATE);
+ sweep_to_iterate_pages_.push_back(p);
+ }
+ }
+ new_space_evacuation_pages_.Rewind(0);
+ }
+
+ {
+ TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_EPILOGUE);
+ EvacuateEpilogue();
+ }
}
void MarkCompactCollector::MarkLiveObjects() {
@@ -2502,7 +3082,7 @@ void MarkCompactCollector::MarkLiveObjects() {
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
- ProcessEphemeralMarking(&root_visitor, false);
+ ProcessEphemeralMarking(false);
}
// The objects reachable from the roots, weak maps or object groups
@@ -2535,7 +3115,7 @@ void MarkCompactCollector::MarkLiveObjects() {
// processed and no weakly reachable node can discover new objects groups.
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
- ProcessEphemeralMarking(&root_visitor, true);
+ ProcessEphemeralMarking(true);
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
heap()->local_embedder_heap_tracer()->TraceEpilogue();
@@ -2559,7 +3139,7 @@ void MarkCompactCollector::ClearNonLiveReferences() {
string_table->IterateElements(&internalized_visitor);
string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
- ExternalStringTableCleaner external_visitor(heap(), nullptr);
+ ExternalStringTableCleaner external_visitor(heap());
heap()->external_string_table_.IterateAll(&external_visitor);
heap()->external_string_table_.CleanUpAll();
}
@@ -3030,33 +3610,41 @@ static inline SlotCallbackResult UpdateSlot(Object** slot) {
return REMOVE_SLOT;
}
-// Visitor for updating pointers from live objects in old spaces to new space.
+// Visitor for updating root pointers and to-space pointers.
// It does not expect to encounter pointers to dead objects.
-class PointersUpdatingVisitor : public ObjectVisitor {
+// TODO(ulan): Remove code object specific functions. This visitor
+// nevers visits code objects.
+class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
public:
- void VisitPointer(Object** p) override { UpdateSlot(p); }
+ void VisitPointer(HeapObject* host, Object** p) override { UpdateSlot(p); }
+
+ void VisitPointers(HeapObject* host, Object** start, Object** end) override {
+ for (Object** p = start; p < end; p++) UpdateSlot(p);
+ }
- void VisitPointers(Object** start, Object** end) override {
+ void VisitRootPointer(Root root, Object** p) override { UpdateSlot(p); }
+
+ void VisitRootPointers(Root root, Object** start, Object** end) override {
for (Object** p = start; p < end; p++) UpdateSlot(p);
}
- void VisitCell(RelocInfo* rinfo) override {
+ void VisitCellPointer(Code* host, RelocInfo* rinfo) override {
UpdateTypedSlotHelper::UpdateCell(rinfo, UpdateSlot);
}
- void VisitEmbeddedPointer(RelocInfo* rinfo) override {
+ void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
UpdateTypedSlotHelper::UpdateEmbeddedPointer(rinfo, UpdateSlot);
}
- void VisitCodeTarget(RelocInfo* rinfo) override {
+ void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
UpdateTypedSlotHelper::UpdateCodeTarget(rinfo, UpdateSlot);
}
- void VisitCodeEntry(Address entry_address) override {
+ void VisitCodeEntry(JSFunction* host, Address entry_address) override {
UpdateTypedSlotHelper::UpdateCodeEntry(entry_address, UpdateSlot);
}
- void VisitDebugTarget(RelocInfo* rinfo) override {
+ void VisitDebugTarget(Code* host, RelocInfo* rinfo) override {
UpdateTypedSlotHelper::UpdateDebugTarget(rinfo, UpdateSlot);
}
};
@@ -3140,7 +3728,12 @@ class Evacuator : public Malloced {
virtual ~Evacuator() {}
- virtual bool EvacuatePage(Page* page, const MarkingState& state) = 0;
+ bool EvacuatePage(Page* page);
+
+ void AddObserver(MigrationObserver* observer) {
+ new_space_visitor_.AddObserver(observer);
+ old_space_visitor_.AddObserver(observer);
+ }
// Merge back locally cached info sequentially. Note that this method needs
// to be called from the main thread.
@@ -3152,6 +3745,9 @@ class Evacuator : public Malloced {
protected:
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
+ // |saved_live_bytes| returns the live bytes of the page that was processed.
+ virtual bool RawEvacuatePage(Page* page, intptr_t* saved_live_bytes) = 0;
+
inline Heap* heap() { return heap_; }
void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
@@ -3178,6 +3774,33 @@ class Evacuator : public Malloced {
intptr_t bytes_compacted_;
};
+bool Evacuator::EvacuatePage(Page* page) {
+ bool success = false;
+ DCHECK(page->SweepingDone());
+ intptr_t saved_live_bytes = 0;
+ double evacuation_time = 0.0;
+ {
+ AlwaysAllocateScope always_allocate(heap()->isolate());
+ TimedScope timed_scope(&evacuation_time);
+ success = RawEvacuatePage(page, &saved_live_bytes);
+ }
+ ReportCompactionProgress(evacuation_time, saved_live_bytes);
+ if (FLAG_trace_evacuation) {
+ PrintIsolate(heap()->isolate(),
+ "evacuation[%p]: page=%p new_space=%d "
+ "page_evacuation=%d executable=%d contains_age_mark=%d "
+ "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
+ static_cast<void*>(this), static_cast<void*>(page),
+ page->InNewSpace(),
+ page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
+ page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
+ page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
+ page->Contains(heap()->new_space()->age_mark()),
+ saved_live_bytes, evacuation_time, success);
+ }
+ return success;
+}
+
void Evacuator::Finalize() {
heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
heap()->code_space()->MergeCompactionSpace(
@@ -3198,210 +3821,172 @@ void Evacuator::Finalize() {
class FullEvacuator : public Evacuator {
public:
- FullEvacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor)
- : Evacuator(heap, record_visitor) {}
+ FullEvacuator(MarkCompactCollector* collector,
+ RecordMigratedSlotVisitor* record_visitor)
+ : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
+
+ protected:
+ bool RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
- bool EvacuatePage(Page* page, const MarkingState& state) override;
+ MarkCompactCollector* collector_;
};
-bool FullEvacuator::EvacuatePage(Page* page, const MarkingState& state) {
+bool FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
bool success = false;
- DCHECK(page->SweepingDone());
- intptr_t saved_live_bytes = state.live_bytes();
- double evacuation_time = 0.0;
- {
- AlwaysAllocateScope always_allocate(heap()->isolate());
- TimedScope timed_scope(&evacuation_time);
- LiveObjectVisitor object_visitor;
- switch (ComputeEvacuationMode(page)) {
- case kObjectsNewToOld:
- success =
- object_visitor.VisitBlackObjects(page, state, &new_space_visitor_,
- LiveObjectVisitor::kClearMarkbits);
+ LiveObjectVisitor object_visitor;
+ const MarkingState state = collector_->marking_state(page);
+ *live_bytes = state.live_bytes();
+ switch (ComputeEvacuationMode(page)) {
+ case kObjectsNewToOld:
+ success = object_visitor.VisitBlackObjects(
+ page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits);
+ DCHECK(success);
+ ArrayBufferTracker::ProcessBuffers(
+ page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
+ break;
+ case kPageNewToOld:
+ success = object_visitor.VisitBlackObjects(
+ page, state, &new_to_old_page_visitor_,
+ LiveObjectVisitor::kKeepMarking);
+ DCHECK(success);
+ new_to_old_page_visitor_.account_moved_bytes(state.live_bytes());
+ // ArrayBufferTracker will be updated during sweeping.
+ break;
+ case kPageNewToNew:
+ success = object_visitor.VisitBlackObjects(
+ page, state, &new_to_new_page_visitor_,
+ LiveObjectVisitor::kKeepMarking);
+ DCHECK(success);
+ new_to_new_page_visitor_.account_moved_bytes(state.live_bytes());
+ // ArrayBufferTracker will be updated during sweeping.
+ break;
+ case kObjectsOldToOld:
+ success = object_visitor.VisitBlackObjects(
+ page, state, &old_space_visitor_, LiveObjectVisitor::kClearMarkbits);
+ if (!success) {
+ // Aborted compaction page. We have to record slots here, since we
+ // might not have recorded them in first place.
+ // Note: We mark the page as aborted here to be able to record slots
+ // for code objects in |RecordMigratedSlotVisitor| and to be able
+ // to identify the page later on for post processing.
+ page->SetFlag(Page::COMPACTION_WAS_ABORTED);
+ EvacuateRecordOnlyVisitor record_visitor(heap());
+ success = object_visitor.VisitBlackObjects(
+ page, state, &record_visitor, LiveObjectVisitor::kKeepMarking);
+ ArrayBufferTracker::ProcessBuffers(
+ page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
DCHECK(success);
+ success = false;
+ } else {
ArrayBufferTracker::ProcessBuffers(
page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
- break;
- case kPageNewToOld:
- success = object_visitor.VisitBlackObjects(
- page, state, &new_to_old_page_visitor_,
- LiveObjectVisitor::kKeepMarking);
- DCHECK(success);
- new_to_old_page_visitor_.account_moved_bytes(
- MarkingState::Internal(page).live_bytes());
- // ArrayBufferTracker will be updated during sweeping.
- break;
- case kPageNewToNew:
- success = object_visitor.VisitBlackObjects(
- page, state, &new_to_new_page_visitor_,
- LiveObjectVisitor::kKeepMarking);
- DCHECK(success);
- new_to_new_page_visitor_.account_moved_bytes(
- MarkingState::Internal(page).live_bytes());
- // ArrayBufferTracker will be updated during sweeping.
- break;
- case kObjectsOldToOld:
- success =
- object_visitor.VisitBlackObjects(page, state, &old_space_visitor_,
- LiveObjectVisitor::kClearMarkbits);
- if (!success) {
- // Aborted compaction page. We have to record slots here, since we
- // might not have recorded them in first place.
- // Note: We mark the page as aborted here to be able to record slots
- // for code objects in |RecordMigratedSlotVisitor|.
- page->SetFlag(Page::COMPACTION_WAS_ABORTED);
- EvacuateRecordOnlyVisitor record_visitor(heap());
- success = object_visitor.VisitBlackObjects(
- page, state, &record_visitor, LiveObjectVisitor::kKeepMarking);
- ArrayBufferTracker::ProcessBuffers(
- page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
- DCHECK(success);
- // We need to return failure here to indicate that we want this page
- // added to the sweeper.
- success = false;
- } else {
- ArrayBufferTracker::ProcessBuffers(
- page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
- }
- break;
- }
- }
- ReportCompactionProgress(evacuation_time, saved_live_bytes);
- if (FLAG_trace_evacuation) {
- PrintIsolate(heap()->isolate(),
- "evacuation[%p]: page=%p new_space=%d "
- "page_evacuation=%d executable=%d contains_age_mark=%d "
- "live_bytes=%" V8PRIdPTR " time=%f\n",
- static_cast<void*>(this), static_cast<void*>(page),
- page->InNewSpace(),
- page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
- page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
- page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
- page->Contains(heap()->new_space()->age_mark()),
- saved_live_bytes, evacuation_time);
+ }
+ break;
}
return success;
}
-int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
- intptr_t live_bytes) {
- if (!FLAG_parallel_compaction) return 1;
- // Compute the number of needed tasks based on a target compaction time, the
- // profiled compaction speed and marked live memory.
- //
- // The number of parallel compaction tasks is limited by:
- // - #evacuation pages
- // - #cores
- const double kTargetCompactionTimeInMs = .5;
+class YoungGenerationEvacuator : public Evacuator {
+ public:
+ YoungGenerationEvacuator(MinorMarkCompactCollector* collector,
+ RecordMigratedSlotVisitor* record_visitor)
+ : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
- double compaction_speed =
- heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
+ protected:
+ bool RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
- const int available_cores = Max(
- 1, static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
- int tasks;
- if (compaction_speed > 0) {
- tasks = 1 + static_cast<int>(live_bytes / compaction_speed /
- kTargetCompactionTimeInMs);
- } else {
- tasks = pages;
+ MinorMarkCompactCollector* collector_;
+};
+
+bool YoungGenerationEvacuator::RawEvacuatePage(Page* page,
+ intptr_t* live_bytes) {
+ bool success = false;
+ LiveObjectVisitor object_visitor;
+ const MarkingState state = collector_->marking_state(page);
+ *live_bytes = state.live_bytes();
+ switch (ComputeEvacuationMode(page)) {
+ case kObjectsNewToOld:
+ success = object_visitor.VisitBlackObjects(
+ page, state, &new_space_visitor_, LiveObjectVisitor::kClearMarkbits);
+ DCHECK(success);
+ ArrayBufferTracker::ProcessBuffers(
+ page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
+ break;
+ case kPageNewToOld:
+ success = object_visitor.VisitBlackObjects(
+ page, state, &new_to_old_page_visitor_,
+ LiveObjectVisitor::kKeepMarking);
+ DCHECK(success);
+ new_to_old_page_visitor_.account_moved_bytes(state.live_bytes());
+ // TODO(mlippautz): If cleaning array buffers is too slow here we can
+ // delay it until the next GC.
+ ArrayBufferTracker::FreeDead(page, state);
+ if (heap()->ShouldZapGarbage())
+ collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
+ ZAP_FREE_SPACE);
+ break;
+ case kPageNewToNew:
+ success = object_visitor.VisitBlackObjects(
+ page, state, &new_to_new_page_visitor_,
+ LiveObjectVisitor::kKeepMarking);
+ DCHECK(success);
+ new_to_new_page_visitor_.account_moved_bytes(state.live_bytes());
+ // TODO(mlippautz): If cleaning array buffers is too slow here we can
+ // delay it until the next GC.
+ ArrayBufferTracker::FreeDead(page, state);
+ if (heap()->ShouldZapGarbage())
+ collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
+ ZAP_FREE_SPACE);
+ break;
+ case kObjectsOldToOld:
+ UNREACHABLE();
+ break;
}
- const int tasks_capped_pages = Min(pages, tasks);
- return Min(available_cores, tasks_capped_pages);
+ return success;
}
class EvacuationJobTraits {
public:
- typedef int* PerPageData; // Pointer to number of aborted pages.
- typedef Evacuator* PerTaskData;
+ struct PageData {
+ MarkingState marking_state;
+ };
- static const bool NeedSequentialFinalization = true;
+ typedef PageData PerPageData;
+ typedef Evacuator* PerTaskData;
- static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
+ static void ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
MemoryChunk* chunk, PerPageData) {
- return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk),
- MarkingState::Internal(chunk));
- }
-
- static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
- bool success, PerPageData data) {
- Page* p = static_cast<Page*>(chunk);
- switch (Evacuator::ComputeEvacuationMode(p)) {
- case Evacuator::kPageNewToOld:
- break;
- case Evacuator::kPageNewToNew:
- DCHECK(success);
- break;
- case Evacuator::kObjectsNewToOld:
- DCHECK(success);
- break;
- case Evacuator::kObjectsOldToOld:
- if (success) {
- DCHECK(p->IsEvacuationCandidate());
- DCHECK(p->SweepingDone());
- p->Unlink();
- } else {
- // We have partially compacted the page, i.e., some objects may have
- // moved, others are still in place.
- p->ClearEvacuationCandidate();
- // Slots have already been recorded so we just need to add it to the
- // sweeper, which will happen after updating pointers.
- *data += 1;
- }
- break;
- default:
- UNREACHABLE();
- }
+ evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
}
};
-void MarkCompactCollector::EvacuatePagesInParallel() {
- PageParallelJob<EvacuationJobTraits> job(
- heap_, heap_->isolate()->cancelable_task_manager(),
- &page_parallel_job_semaphore_);
-
- int abandoned_pages = 0;
- intptr_t live_bytes = 0;
- for (Page* page : old_space_evacuation_pages_) {
- live_bytes += MarkingState::Internal(page).live_bytes();
- job.AddPage(page, &abandoned_pages);
- }
-
- const bool reduce_memory = heap()->ShouldReduceMemory();
- const Address age_mark = heap()->new_space()->age_mark();
- for (Page* page : new_space_evacuation_pages_) {
- intptr_t live_bytes_on_page = MarkingState::Internal(page).live_bytes();
- live_bytes += live_bytes_on_page;
- if (!reduce_memory && !page->NeverEvacuate() &&
- (live_bytes_on_page > Evacuator::PageEvacuationThreshold()) &&
- !page->Contains(age_mark) &&
- heap()->CanExpandOldGeneration(live_bytes_on_page)) {
- if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
- EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
- } else {
- EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
- }
- }
-
- job.AddPage(page, &abandoned_pages);
- }
- DCHECK_GE(job.NumberOfPages(), 1);
-
+template <class Evacuator, class Collector>
+void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
+ Collector* collector, PageParallelJob<EvacuationJobTraits>* job,
+ RecordMigratedSlotVisitor* record_visitor,
+ MigrationObserver* migration_observer, const intptr_t live_bytes) {
// Used for trace summary.
double compaction_speed = 0;
if (FLAG_trace_evacuation) {
compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
}
+ const bool profiling =
+ heap()->isolate()->is_profiling() ||
+ heap()->isolate()->logger()->is_logging_code_events() ||
+ heap()->isolate()->heap_profiler()->is_tracking_object_moves();
+ ProfilingMigrationObserver profiling_observer(heap());
+
const int wanted_num_tasks =
- NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes);
- FullEvacuator** evacuators = new FullEvacuator*[wanted_num_tasks];
- RecordMigratedSlotVisitor record_visitor(this);
+ NumberOfParallelCompactionTasks(job->NumberOfPages());
+ Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
for (int i = 0; i < wanted_num_tasks; i++) {
- evacuators[i] = new FullEvacuator(heap(), &record_visitor);
+ evacuators[i] = new Evacuator(collector, record_visitor);
+ if (profiling) evacuators[i]->AddObserver(&profiling_observer);
+ if (migration_observer != nullptr)
+ evacuators[i]->AddObserver(migration_observer);
}
- job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; });
+ job->Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; });
const Address top = heap()->new_space()->top();
for (int i = 0; i < wanted_num_tasks; i++) {
evacuators[i]->Finalize();
@@ -3419,16 +4004,83 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
if (FLAG_trace_evacuation) {
PrintIsolate(isolate(),
"%8.0f ms: evacuation-summary: parallel=%s pages=%d "
- "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS
+ "wanted_tasks=%d tasks=%d cores=%" PRIuS
" live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
isolate()->time_millis_since_init(),
- FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(),
- abandoned_pages, wanted_num_tasks, job.NumberOfTasks(),
+ FLAG_parallel_compaction ? "yes" : "no", job->NumberOfPages(),
+ wanted_num_tasks, job->NumberOfTasks(),
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
live_bytes, compaction_speed);
}
}
+bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) {
+ const bool reduce_memory = heap()->ShouldReduceMemory();
+ const Address age_mark = heap()->new_space()->age_mark();
+ return !reduce_memory && !p->NeverEvacuate() &&
+ (live_bytes > Evacuator::PageEvacuationThreshold()) &&
+ !p->Contains(age_mark) && heap()->CanExpandOldGeneration(live_bytes);
+}
+
+void MarkCompactCollector::EvacuatePagesInParallel() {
+ PageParallelJob<EvacuationJobTraits> job(
+ heap_, heap_->isolate()->cancelable_task_manager(),
+ &page_parallel_job_semaphore_);
+ intptr_t live_bytes = 0;
+
+ for (Page* page : old_space_evacuation_pages_) {
+ live_bytes += MarkingState::Internal(page).live_bytes();
+ job.AddPage(page, {marking_state(page)});
+ }
+
+ for (Page* page : new_space_evacuation_pages_) {
+ intptr_t live_bytes_on_page = MarkingState::Internal(page).live_bytes();
+ live_bytes += live_bytes_on_page;
+ if (ShouldMovePage(page, live_bytes_on_page)) {
+ if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
+ EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
+ } else {
+ EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
+ }
+ }
+ job.AddPage(page, {marking_state(page)});
+ }
+ DCHECK_GE(job.NumberOfPages(), 1);
+
+ RecordMigratedSlotVisitor record_visitor(this);
+ CreateAndExecuteEvacuationTasks<FullEvacuator>(this, &job, &record_visitor,
+ nullptr, live_bytes);
+ PostProcessEvacuationCandidates();
+}
+
+void MinorMarkCompactCollector::EvacuatePagesInParallel() {
+ PageParallelJob<EvacuationJobTraits> job(
+ heap_, heap_->isolate()->cancelable_task_manager(),
+ &page_parallel_job_semaphore_);
+ intptr_t live_bytes = 0;
+
+ for (Page* page : new_space_evacuation_pages_) {
+ intptr_t live_bytes_on_page = marking_state(page).live_bytes();
+ live_bytes += live_bytes_on_page;
+ if (ShouldMovePage(page, live_bytes_on_page)) {
+ if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
+ EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
+ } else {
+ EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
+ }
+ }
+ job.AddPage(page, {marking_state(page)});
+ }
+ DCHECK_GE(job.NumberOfPages(), 1);
+
+ YoungGenerationMigrationObserver observer(heap(),
+ heap()->mark_compact_collector());
+ YoungGenerationRecordMigratedSlotVisitor record_visitor(
+ heap()->mark_compact_collector());
+ CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
+ this, &job, &record_visitor, &observer, live_bytes);
+}
+
class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
public:
virtual Object* RetainAs(Object* object) {
@@ -3464,6 +4116,9 @@ int MarkCompactCollector::Sweeper::RawSweep(
space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
+ // Sweeper takes the marking state of the full collector.
+ const MarkingState state = MarkingState::Internal(p);
+
// If there are old-to-new slots in that page, we have to filter out slots
// that are in dead memory which is freed by the sweeper.
ClearOldToNewSlotsMode slots_clearing_mode = GetClearOldToNewSlotsMode(p);
@@ -3473,7 +4128,7 @@ int MarkCompactCollector::Sweeper::RawSweep(
// Before we sweep objects on the page, we free dead array buffers which
// requires valid mark bits.
- ArrayBufferTracker::FreeDead(p);
+ ArrayBufferTracker::FreeDead(p, state);
Address free_start = p->area_start();
DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
@@ -3492,11 +4147,11 @@ int MarkCompactCollector::Sweeper::RawSweep(
intptr_t max_freed_bytes = 0;
int curr_region = -1;
- LiveObjectIterator<kBlackObjects> it(p, MarkingState::Internal(p));
+ LiveObjectIterator<kBlackObjects> it(p, state);
HeapObject* object = NULL;
while ((object = it.Next()) != NULL) {
- DCHECK(ObjectMarking::IsBlack(object, MarkingState::Internal(object)));
+ DCHECK(ObjectMarking::IsBlack(object, state));
Address free_end = object->address();
if (free_end != free_start) {
CHECK_GT(free_end, free_start);
@@ -3570,7 +4225,7 @@ int MarkCompactCollector::Sweeper::RawSweep(
}
// Clear the mark bits of that page and reset live bytes count.
- MarkingState::Internal(p).ClearLiveness();
+ state.ClearLiveness();
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
if (free_list_mode == IGNORE_FREE_LIST) return 0;
@@ -3661,9 +4316,9 @@ void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space,
swept_list_[space->identity()].Add(page);
}
-void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
+void MarkCompactCollector::Evacuate() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
- Heap::RelocationLock relocation_lock(heap());
+ base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
@@ -3737,24 +4392,23 @@ template <RememberedSetType type>
class PointerUpdateJobTraits {
public:
typedef int PerPageData; // Per page data is not used in this job.
- typedef int PerTaskData; // Per task data is not used in this job.
+ typedef const MarkCompactCollectorBase* PerTaskData;
- static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk,
- PerPageData) {
- UpdateUntypedPointers(heap, chunk);
- UpdateTypedPointers(heap, chunk);
- return true;
- }
- static const bool NeedSequentialFinalization = false;
- static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
+ static void ProcessPageInParallel(Heap* heap, PerTaskData task_data,
+ MemoryChunk* chunk, PerPageData) {
+ UpdateUntypedPointers(heap, chunk, task_data);
+ UpdateTypedPointers(heap, chunk, task_data);
}
private:
- static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
+ static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk,
+ const MarkCompactCollectorBase* collector) {
+ base::LockGuard<base::RecursiveMutex> guard(chunk->mutex());
if (type == OLD_TO_NEW) {
- RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) {
- return CheckAndUpdateOldToNewSlot(heap, slot);
- });
+ RememberedSet<OLD_TO_NEW>::Iterate(
+ chunk, [heap, collector](Address slot) {
+ return CheckAndUpdateOldToNewSlot(heap, slot, collector);
+ });
} else {
RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) {
return UpdateSlot(reinterpret_cast<Object**>(slot));
@@ -3762,7 +4416,8 @@ class PointerUpdateJobTraits {
}
}
- static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) {
+ static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk,
+ const MarkCompactCollectorBase* collector) {
if (type == OLD_TO_OLD) {
Isolate* isolate = heap->isolate();
RememberedSet<OLD_TO_OLD>::IterateTyped(
@@ -3774,19 +4429,20 @@ class PointerUpdateJobTraits {
} else {
Isolate* isolate = heap->isolate();
RememberedSet<OLD_TO_NEW>::IterateTyped(
- chunk,
- [isolate, heap](SlotType slot_type, Address host_addr, Address slot) {
+ chunk, [isolate, heap, collector](SlotType slot_type,
+ Address host_addr, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
- isolate, slot_type, slot, [heap](Object** slot) {
+ isolate, slot_type, slot, [heap, collector](Object** slot) {
return CheckAndUpdateOldToNewSlot(
- heap, reinterpret_cast<Address>(slot));
+ heap, reinterpret_cast<Address>(slot), collector);
});
});
}
}
- static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap,
- Address slot_address) {
+ static SlotCallbackResult CheckAndUpdateOldToNewSlot(
+ Heap* heap, Address slot_address,
+ const MarkCompactCollectorBase* collector) {
// There may be concurrent action on slots in dead objects. Concurrent
// sweeper threads may overwrite the slot content with a free space object.
// Moreover, the pointed-to object may also get concurrently overwritten
@@ -3826,7 +4482,7 @@ class PointerUpdateJobTraits {
// markbits to determine liveness.
HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference);
if (ObjectMarking::IsBlack(heap_object,
- MarkingState::Internal(heap_object)))
+ collector->marking_state(heap_object)))
return KEEP_SLOT;
} else {
DCHECK(!heap->InNewSpace(slot_reference));
@@ -3835,52 +4491,46 @@ class PointerUpdateJobTraits {
}
};
-int NumberOfPointerUpdateTasks(int pages) {
- if (!FLAG_parallel_pointer_update) return 1;
- const int available_cores = Max(
- 1, static_cast<int>(
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
- const int kPagesPerTask = 4;
- return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask);
-}
-
template <RememberedSetType type>
-void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
+void MarkCompactCollectorBase::UpdatePointersInParallel(
+ Heap* heap, base::Semaphore* semaphore,
+ const MarkCompactCollectorBase* collector) {
PageParallelJob<PointerUpdateJobTraits<type> > job(
heap, heap->isolate()->cancelable_task_manager(), semaphore);
RememberedSet<type>::IterateMemoryChunks(
heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
int num_pages = job.NumberOfPages();
int num_tasks = NumberOfPointerUpdateTasks(num_pages);
- job.Run(num_tasks, [](int i) { return 0; });
+ job.Run(num_tasks, [collector](int i) { return collector; });
}
class ToSpacePointerUpdateJobTraits {
public:
- typedef std::pair<Address, Address> PerPageData;
+ struct PageData {
+ Address start;
+ Address end;
+ MarkingState marking_state;
+ };
+
+ typedef PageData PerPageData;
typedef PointersUpdatingVisitor* PerTaskData;
- static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
- MemoryChunk* chunk, PerPageData limits) {
+ static void ProcessPageInParallel(Heap* heap, PerTaskData visitor,
+ MemoryChunk* chunk, PerPageData page_data) {
if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
// New->new promoted pages contain garbage so they require iteration
// using markbits.
- ProcessPageInParallelVisitLive(heap, visitor, chunk, limits);
+ ProcessPageInParallelVisitLive(heap, visitor, chunk, page_data);
} else {
- ProcessPageInParallelVisitAll(heap, visitor, chunk, limits);
+ ProcessPageInParallelVisitAll(heap, visitor, chunk, page_data);
}
- return true;
- }
-
- static const bool NeedSequentialFinalization = false;
- static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
}
private:
static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor,
MemoryChunk* chunk,
- PerPageData limits) {
- for (Address cur = limits.first; cur < limits.second;) {
+ PerPageData page_data) {
+ for (Address cur = page_data.start; cur < page_data.end;) {
HeapObject* object = HeapObject::FromAddress(cur);
Map* map = object->map();
int size = object->SizeFromMap(map);
@@ -3891,8 +4541,8 @@ class ToSpacePointerUpdateJobTraits {
static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor,
MemoryChunk* chunk,
- PerPageData limits) {
- LiveObjectIterator<kBlackObjects> it(chunk, MarkingState::Internal(chunk));
+ PerPageData page_data) {
+ LiveObjectIterator<kBlackObjects> it(chunk, page_data.marking_state);
HeapObject* object = NULL;
while ((object = it.Next()) != NULL) {
Map* map = object->map();
@@ -3902,7 +4552,10 @@ class ToSpacePointerUpdateJobTraits {
}
};
-void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
+template <class MarkingStateProvider>
+void UpdateToSpacePointersInParallel(
+ Heap* heap, base::Semaphore* semaphore,
+ const MarkingStateProvider& marking_state_provider) {
PageParallelJob<ToSpacePointerUpdateJobTraits> job(
heap, heap->isolate()->cancelable_task_manager(), semaphore);
Address space_start = heap->new_space()->bottom();
@@ -3911,7 +4564,7 @@ void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
Address start =
page->Contains(space_start) ? space_start : page->area_start();
Address end = page->Contains(space_end) ? space_end : page->area_end();
- job.AddPage(page, std::make_pair(start, end));
+ job.AddPage(page, {start, end, marking_state_provider.marking_state(page)});
}
PointersUpdatingVisitor visitor;
int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1;
@@ -3921,22 +4574,25 @@ void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
void MarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
- PointersUpdatingVisitor updating_visitor;
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
- UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_);
+ UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_,
+ *this);
// Update roots.
+ PointersUpdatingVisitor updating_visitor;
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
- UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_);
+ UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_,
+ this);
}
{
Heap* heap = this->heap();
TRACE_GC(heap->tracer(),
GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
- UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_);
+ UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_,
+ this);
}
{
@@ -3951,6 +4607,66 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
}
}
+void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
+
+ PointersUpdatingVisitor updating_visitor;
+
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
+ {
+ TRACE_GC(
+ heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_TOSPACE);
+ UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_,
+ *this);
+ }
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
+ heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
+ }
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_OLD);
+ UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_,
+ this);
+ }
+ }
+
+ {
+ TRACE_GC(heap()->tracer(),
+ GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK);
+
+ EvacuationWeakObjectRetainer evacuation_object_retainer;
+ heap()->ProcessWeakListRoots(&evacuation_object_retainer);
+
+ // Update pointers from external string table.
+ heap()->UpdateNewSpaceReferencesInExternalStringTable(
+ &UpdateReferenceInExternalStringTableEntry);
+ heap()->IterateEncounteredWeakCollections(&updating_visitor);
+ }
+}
+
+void MarkCompactCollector::PostProcessEvacuationCandidates() {
+ int aborted_pages = 0;
+ for (Page* p : old_space_evacuation_pages_) {
+ if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
+ p->ClearEvacuationCandidate();
+ aborted_pages++;
+ } else {
+ DCHECK(p->IsEvacuationCandidate());
+ DCHECK(p->SweepingDone());
+ p->Unlink();
+ }
+ }
+ if (FLAG_trace_evacuation && (aborted_pages > 0)) {
+ PrintIsolate(isolate(), "%8.0f ms: evacuation: aborted=%d\n",
+ isolate()->time_millis_since_init(), aborted_pages);
+ }
+}
void MarkCompactCollector::ReleaseEvacuationCandidates() {
for (Page* p : old_space_evacuation_pages_) {
@@ -3993,7 +4709,7 @@ int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
DCHECK_EQ(Page::kSweepingPending,
page->concurrent_sweeping_state().Value());
page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
- const Sweeper::FreeSpaceTreatmentMode free_space_mode =
+ const FreeSpaceTreatmentMode free_space_mode =
Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
if (identity == NEW_SPACE) {
RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
@@ -4066,7 +4782,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
DCHECK(p->SweepingDone());
if (p->IsEvacuationCandidate()) {
- // Will be processed in EvacuateNewSpaceAndCandidates.
+ // Will be processed in Evacuate.
DCHECK(evacuation_candidates_.length() > 0);
continue;
}
@@ -4078,8 +4794,9 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
// testing this is fine.
p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Sweeper::RawSweep(p, Sweeper::IGNORE_FREE_LIST,
- Heap::ShouldZapGarbage() ? Sweeper::ZAP_FREE_SPACE
- : Sweeper::IGNORE_FREE_SPACE);
+ Heap::ShouldZapGarbage()
+ ? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
+ : FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
continue;
}
@@ -4136,9 +4853,6 @@ void MarkCompactCollector::StartSweepSpaces() {
heap_->lo_space()->FreeUnmarkedObjects();
}
-Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
-
-
void MarkCompactCollector::Initialize() {
MarkCompactMarkingVisitor::Initialize();
IncrementalMarking::Initialize();
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 63afd834ea..24ec7043ab 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -10,27 +10,36 @@
#include "src/base/bits.h"
#include "src/base/platform/condition-variable.h"
#include "src/cancelable-task.h"
+#include "src/heap/concurrent-marking-deque.h"
#include "src/heap/marking.h"
+#include "src/heap/sequential-marking-deque.h"
#include "src/heap/spaces.h"
#include "src/heap/store-buffer.h"
namespace v8 {
namespace internal {
-// Callback function, returns whether an object is alive. The heap size
-// of the object is returned in size. It optionally updates the offset
-// to the first live object in the page (only used for old and map objects).
-typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
-
-// Callback function to mark an object in a given heap.
-typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object);
-
// Forward declarations.
class CodeFlusher;
+class EvacuationJobTraits;
class HeapObjectVisitor;
+class LocalWorkStealingMarkingDeque;
class MarkCompactCollector;
class MinorMarkCompactCollector;
class MarkingVisitor;
+class MigrationObserver;
+template <typename JobTraits>
+class PageParallelJob;
+class RecordMigratedSlotVisitor;
+class ThreadLocalTop;
+class WorkStealingMarkingDeque;
+class YoungGenerationMarkingVisitor;
+
+#ifdef V8_CONCURRENT_MARKING
+using MarkingDeque = ConcurrentMarkingDeque;
+#else
+using MarkingDeque = SequentialMarkingDeque;
+#endif
class ObjectMarking : public AllStatic {
public:
@@ -76,38 +85,31 @@ class ObjectMarking : public AllStatic {
template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
V8_INLINE static bool BlackToGrey(HeapObject* obj,
const MarkingState& state) {
- DCHECK(
- (access_mode == MarkBit::ATOMIC || IsBlack<access_mode>(obj, state)));
MarkBit markbit = MarkBitFrom(obj, state);
if (!Marking::BlackToGrey<access_mode>(markbit)) return false;
- state.IncrementLiveBytes(-obj->Size());
+ state.IncrementLiveBytes<access_mode>(-obj->Size());
return true;
}
template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
V8_INLINE static bool WhiteToGrey(HeapObject* obj,
const MarkingState& state) {
- DCHECK(
- (access_mode == MarkBit::ATOMIC || IsWhite<access_mode>(obj, state)));
return Marking::WhiteToGrey<access_mode>(MarkBitFrom(obj, state));
}
template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
V8_INLINE static bool WhiteToBlack(HeapObject* obj,
const MarkingState& state) {
- DCHECK(
- (access_mode == MarkBit::ATOMIC || IsWhite<access_mode>(obj, state)));
- if (!ObjectMarking::WhiteToGrey<access_mode>(obj, state)) return false;
- return ObjectMarking::GreyToBlack<access_mode>(obj, state);
+ return ObjectMarking::WhiteToGrey<access_mode>(obj, state) &&
+ ObjectMarking::GreyToBlack<access_mode>(obj, state);
}
template <MarkBit::AccessMode access_mode = MarkBit::NON_ATOMIC>
V8_INLINE static bool GreyToBlack(HeapObject* obj,
const MarkingState& state) {
- DCHECK((access_mode == MarkBit::ATOMIC || IsGrey<access_mode>(obj, state)));
MarkBit markbit = MarkBitFrom(obj, state);
if (!Marking::GreyToBlack<access_mode>(markbit)) return false;
- state.IncrementLiveBytes(obj->Size());
+ state.IncrementLiveBytes<access_mode>(obj->Size());
return true;
}
@@ -115,148 +117,6 @@ class ObjectMarking : public AllStatic {
DISALLOW_IMPLICIT_CONSTRUCTORS(ObjectMarking);
};
-// ----------------------------------------------------------------------------
-// Marking deque for tracing live objects.
-class MarkingDeque {
- public:
- explicit MarkingDeque(Heap* heap)
- : backing_store_(nullptr),
- backing_store_committed_size_(0),
- array_(nullptr),
- top_(0),
- bottom_(0),
- mask_(0),
- overflowed_(false),
- in_use_(false),
- uncommit_task_pending_(false),
- heap_(heap) {}
-
- void SetUp();
- void TearDown();
-
- // Ensures that the marking deque is committed and will stay committed until
- // StopUsing() is called.
- void StartUsing();
- void StopUsing();
- void Clear();
-
- inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
-
- inline bool IsEmpty() { return top_ == bottom_; }
-
- bool overflowed() const { return overflowed_; }
-
- void ClearOverflowed() { overflowed_ = false; }
-
- void SetOverflowed() { overflowed_ = true; }
-
- // Push the object on the marking stack if there is room, otherwise mark the
- // deque as overflowed and wait for a rescan of the heap.
- INLINE(bool Push(HeapObject* object)) {
- DCHECK(object->IsHeapObject());
- if (IsFull()) {
- SetOverflowed();
- return false;
- } else {
- array_[top_] = object;
- top_ = ((top_ + 1) & mask_);
- return true;
- }
- }
-
- INLINE(HeapObject* Pop()) {
- DCHECK(!IsEmpty());
- top_ = ((top_ - 1) & mask_);
- HeapObject* object = array_[top_];
- DCHECK(object->IsHeapObject());
- return object;
- }
-
- // Unshift the object into the marking stack if there is room, otherwise mark
- // the deque as overflowed and wait for a rescan of the heap.
- INLINE(bool Unshift(HeapObject* object)) {
- DCHECK(object->IsHeapObject());
- if (IsFull()) {
- SetOverflowed();
- return false;
- } else {
- bottom_ = ((bottom_ - 1) & mask_);
- array_[bottom_] = object;
- return true;
- }
- }
-
- template <typename Callback>
- void Iterate(Callback callback) {
- int i = bottom_;
- while (i != top_) {
- callback(array_[i]);
- i = (i + 1) & mask_;
- }
- }
-
- HeapObject** array() { return array_; }
- int bottom() { return bottom_; }
- int top() { return top_; }
- int mask() { return mask_; }
- void set_top(int top) { top_ = top; }
-
- private:
- // This task uncommits the marking_deque backing store if
- // markin_deque->in_use_ is false.
- class UncommitTask : public CancelableTask {
- public:
- explicit UncommitTask(Isolate* isolate, MarkingDeque* marking_deque)
- : CancelableTask(isolate), marking_deque_(marking_deque) {}
-
- private:
- // CancelableTask override.
- void RunInternal() override {
- base::LockGuard<base::Mutex> guard(&marking_deque_->mutex_);
- if (!marking_deque_->in_use_) {
- marking_deque_->Uncommit();
- }
- marking_deque_->uncommit_task_pending_ = false;
- }
-
- MarkingDeque* marking_deque_;
- DISALLOW_COPY_AND_ASSIGN(UncommitTask);
- };
-
- static const size_t kMaxSize = 4 * MB;
- static const size_t kMinSize = 256 * KB;
-
- // Must be called with mutex lock.
- void EnsureCommitted();
-
- // Must be called with mutex lock.
- void Uncommit();
-
- // Must be called with mutex lock.
- void StartUncommitTask();
-
- base::Mutex mutex_;
-
- base::VirtualMemory* backing_store_;
- size_t backing_store_committed_size_;
- HeapObject** array_;
- // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
- // empty when top_ == bottom_. It is full when top_ + 1 == bottom
- // (mod mask + 1).
- int top_;
- int bottom_;
- int mask_;
- bool overflowed_;
- // in_use_ == true after taking mutex lock implies that the marking deque is
- // committed and will stay committed at least until in_use_ == false.
- bool in_use_;
- bool uncommit_task_pending_;
- Heap* heap_;
-
- DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
-};
-
-
// CodeFlusher collects candidates for code flushing during marking and
// processes those candidates after marking has completed in order to
// reset those functions referencing code objects that would otherwise
@@ -284,7 +144,10 @@ class CodeFlusher {
ProcessJSFunctionCandidates();
}
- void IteratePointersToFromSpace(ObjectVisitor* v);
+ inline void VisitListHeads(RootVisitor* v);
+
+ template <typename StaticVisitor>
+ inline void IteratePointersToFromSpace();
private:
void ProcessJSFunctionCandidates();
@@ -310,10 +173,6 @@ class CodeFlusher {
DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
};
-
-// Defined in isolate.h.
-class ThreadLocalTop;
-
class MarkBitCellIterator BASE_EMBEDDED {
public:
MarkBitCellIterator(MemoryChunk* chunk, MarkingState state) : chunk_(chunk) {
@@ -421,42 +280,129 @@ class LiveObjectVisitor BASE_EMBEDDED {
};
enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
+enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
+enum MarkingTreatmentMode { KEEP, CLEAR };
-class MinorMarkCompactCollector {
+// Base class for minor and full MC collectors.
+class MarkCompactCollectorBase {
public:
- explicit MinorMarkCompactCollector(Heap* heap)
- : heap_(heap), marking_deque_(heap) {}
+ virtual ~MarkCompactCollectorBase() {}
- void SetUp();
- void TearDown();
+ // Note: Make sure to refer to the instances by their concrete collector
+ // type to avoid vtable lookups marking state methods when used in hot paths.
+ virtual MarkingState marking_state(HeapObject* object) const = 0;
+ virtual MarkingState marking_state(MemoryChunk* chunk) const = 0;
- void CollectGarbage();
+ virtual void SetUp() = 0;
+ virtual void TearDown() = 0;
+ virtual void CollectGarbage() = 0;
inline Heap* heap() const { return heap_; }
+ inline Isolate* isolate() { return heap()->isolate(); }
+
+ protected:
+ explicit MarkCompactCollectorBase(Heap* heap) : heap_(heap) {}
+
+ // Marking operations for objects reachable from roots.
+ virtual void MarkLiveObjects() = 0;
+ // Mark objects reachable (transitively) from objects in the marking
+ // stack.
+ virtual void EmptyMarkingDeque() = 0;
+ virtual void ProcessMarkingDeque() = 0;
+ // Clear non-live references held in side data structures.
+ virtual void ClearNonLiveReferences() = 0;
+ virtual void EvacuatePrologue() = 0;
+ virtual void EvacuateEpilogue() = 0;
+ virtual void Evacuate() = 0;
+ virtual void EvacuatePagesInParallel() = 0;
+ virtual void UpdatePointersAfterEvacuation() = 0;
+
+ // The number of parallel compaction tasks, including the main thread.
+ int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
+
+ template <class Evacuator, class Collector>
+ void CreateAndExecuteEvacuationTasks(
+ Collector* collector, PageParallelJob<EvacuationJobTraits>* job,
+ RecordMigratedSlotVisitor* record_visitor,
+ MigrationObserver* migration_observer, const intptr_t live_bytes);
+
+ // Returns whether this page should be moved according to heuristics.
+ bool ShouldMovePage(Page* p, intptr_t live_bytes);
+
+ template <RememberedSetType type>
+ void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore,
+ const MarkCompactCollectorBase* collector);
+
+ int NumberOfParallelCompactionTasks(int pages);
+ int NumberOfPointerUpdateTasks(int pages);
+
+ Heap* heap_;
+};
+
+// Collector for young-generation only.
+class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
+ public:
+ explicit MinorMarkCompactCollector(Heap* heap);
+ ~MinorMarkCompactCollector();
+
+ MarkingState marking_state(HeapObject* object) const override {
+ return MarkingState::External(object);
+ }
+
+ MarkingState marking_state(MemoryChunk* chunk) const override {
+ return MarkingState::External(chunk);
+ }
+
+ void SetUp() override;
+ void TearDown() override;
+ void CollectGarbage() override;
+
+ void MakeIterable(Page* page, MarkingTreatmentMode marking_mode,
+ FreeSpaceTreatmentMode free_space_mode);
+ void CleanupSweepToIteratePages();
private:
+ class RootMarkingVisitorSeedOnly;
class RootMarkingVisitor;
- inline Isolate* isolate() { return heap()->isolate(); }
- inline MarkingDeque* marking_deque() { return &marking_deque_; }
+ static const int kNumMarkers = 4;
+ static const int kMainMarker = 0;
- V8_INLINE void MarkObject(HeapObject* obj);
- V8_INLINE void PushBlack(HeapObject* obj);
+ inline WorkStealingMarkingDeque* marking_deque() { return marking_deque_; }
- SlotCallbackResult CheckAndMarkObject(Heap* heap, Address slot_address);
- void MarkLiveObjects();
- void ProcessMarkingDeque();
- void EmptyMarkingDeque();
+ inline YoungGenerationMarkingVisitor* marking_visitor(int index) {
+ DCHECK_LT(index, kNumMarkers);
+ return marking_visitor_[index];
+ }
- Heap* heap_;
- MarkingDeque marking_deque_;
+ SlotCallbackResult CheckAndMarkObject(Heap* heap, Address slot_address);
+ void MarkLiveObjects() override;
+ void MarkRootSetInParallel();
+ void ProcessMarkingDeque() override;
+ void EmptyMarkingDeque() override;
+ void ClearNonLiveReferences() override;
+
+ void EvacuatePrologue() override;
+ void EvacuateEpilogue() override;
+ void Evacuate() override;
+ void EvacuatePagesInParallel() override;
+ void UpdatePointersAfterEvacuation() override;
+
+ int NumberOfMarkingTasks();
+
+ WorkStealingMarkingDeque* marking_deque_;
+ YoungGenerationMarkingVisitor* marking_visitor_[kNumMarkers];
+ base::Semaphore page_parallel_job_semaphore_;
+ List<Page*> new_space_evacuation_pages_;
+ std::vector<Page*> sweep_to_iterate_pages_;
- friend class StaticYoungGenerationMarkingVisitor;
+ friend class MarkYoungGenerationJobTraits;
+ friend class YoungGenerationMarkingTask;
+ friend class YoungGenerationMarkingVisitor;
};
-// -------------------------------------------------------------------------
-// Mark-Compact collector
-class MarkCompactCollector {
+// Collector for young and old generation.
+class MarkCompactCollector final : public MarkCompactCollectorBase {
public:
class RootMarkingVisitor;
@@ -465,7 +411,6 @@ class MarkCompactCollector {
class SweeperTask;
enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
- enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
enum ClearOldToNewSlotsMode {
DO_NOT_CLEAR,
CLEAR_REGULAR_SLOTS,
@@ -543,12 +488,18 @@ class MarkCompactCollector {
static void Initialize();
- static SlotCallbackResult CheckAndMarkObject(Heap* heap,
- Address slot_address);
+ MarkingState marking_state(HeapObject* object) const override {
+ return MarkingState::Internal(object);
+ }
- void SetUp();
+ MarkingState marking_state(MemoryChunk* chunk) const override {
+ return MarkingState::Internal(chunk);
+ }
- void TearDown();
+ void SetUp() override;
+ void TearDown() override;
+ // Performs a global garbage collection.
+ void CollectGarbage() override;
void CollectEvacuationCandidates(PagedSpace* space);
@@ -558,24 +509,10 @@ class MarkCompactCollector {
// choosing spaces to compact.
void Prepare();
- // Performs a global garbage collection.
- void CollectGarbage();
-
bool StartCompaction();
void AbortCompaction();
- // Determine type of object and emit deletion log event.
- static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
-
- // Distinguishable invalid map encodings (for single word and multiple words)
- // that indicate free regions.
- static const uint32_t kSingleFreeEncoding = 0;
- static const uint32_t kMultiFreeEncoding = 1;
-
- inline Heap* heap() const { return heap_; }
- inline Isolate* isolate() const;
-
CodeFlusher* code_flusher() { return code_flusher_; }
inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
@@ -659,31 +596,13 @@ class MarkCompactCollector {
// Finishes GC, performs heap verification if enabled.
void Finish();
- // -----------------------------------------------------------------------
- // Phase 1: Marking live objects.
- //
- // Before: The heap has been prepared for garbage collection by
- // MarkCompactCollector::Prepare() and is otherwise in its
- // normal state.
- //
- // After: Live objects are marked and non-live objects are unmarked.
-
- friend class CodeMarkingVisitor;
- friend class IncrementalMarkingMarkingVisitor;
- friend class MarkCompactMarkingVisitor;
- friend class MarkingVisitor;
- friend class RecordMigratedSlotVisitor;
- friend class SharedFunctionInfoMarkingVisitor;
- friend class StaticYoungGenerationMarkingVisitor;
-
// Mark code objects that are active on the stack to prevent them
// from being flushed.
void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
void PrepareForCodeFlushing();
- // Marking operations for objects reachable from roots.
- void MarkLiveObjects();
+ void MarkLiveObjects() override;
// Pushes a black object onto the marking stack and accounts for live bytes.
// Note that this assumes live bytes have not yet been counted.
@@ -704,9 +623,7 @@ class MarkCompactCollector {
// the string table are weak.
void MarkStringTable(RootMarkingVisitor* visitor);
- // Mark objects reachable (transitively) from objects in the marking stack
- // or overflowed in the heap.
- void ProcessMarkingDeque();
+ void ProcessMarkingDeque() override;
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap. This respects references only considered in
@@ -714,22 +631,19 @@ class MarkCompactCollector {
// - Processing of objects reachable through Harmony WeakMaps.
// - Objects reachable due to host application logic like object groups,
// implicit references' groups, or embedder heap tracing.
- void ProcessEphemeralMarking(ObjectVisitor* visitor,
- bool only_process_harmony_weak_collections);
+ void ProcessEphemeralMarking(bool only_process_harmony_weak_collections);
// If the call-site of the top optimized code was not prepared for
// deoptimization, then treat the maps in the code as strong pointers,
// otherwise a map can die and deoptimize the code.
- void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
+ void ProcessTopOptimizedFrame(RootMarkingVisitor* visitor);
// Collects a list of dependent code from maps embedded in optimize code.
DependentCode* DependentCodeListFromNonLiveMaps();
- // Mark objects reachable (transitively) from objects in the marking
- // stack. This function empties the marking stack, but may leave
- // overflowed objects in the heap, in which case the marking stack's
- // overflow flag will be set.
- void EmptyMarkingDeque();
+ // This function empties the marking stack, but may leave overflowed objects
+ // in the heap, in which case the marking stack's overflow flag will be set.
+ void EmptyMarkingDeque() override;
// Refill the marking stack with overflowed objects from the heap. This
// function either leaves the marking stack full or clears the overflow
@@ -750,7 +664,7 @@ class MarkCompactCollector {
// Clear non-live references in weak cells, transition and descriptor arrays,
// and deoptimize dependent code of non-live maps.
- void ClearNonLiveReferences();
+ void ClearNonLiveReferences() override;
void MarkDependentCodeForDeoptimization(DependentCode* list);
// Find non-live targets of simple transitions in the given list. Clear
// transitions to non-live targets and if needed trim descriptors arrays.
@@ -789,29 +703,14 @@ class MarkCompactCollector {
void StartSweepSpaces();
void StartSweepSpace(PagedSpace* space);
- void EvacuatePrologue();
- void EvacuateEpilogue();
- void EvacuatePagesInParallel();
-
- // The number of parallel compaction tasks, including the main thread.
- int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
-
- void EvacuateNewSpaceAndCandidates();
-
- void UpdatePointersAfterEvacuation();
+ void EvacuatePrologue() override;
+ void EvacuateEpilogue() override;
+ void Evacuate() override;
+ void EvacuatePagesInParallel() override;
+ void UpdatePointersAfterEvacuation() override;
void ReleaseEvacuationCandidates();
-
-
-#ifdef DEBUG
- friend class MarkObjectVisitor;
- static void VisitObject(HeapObject* obj);
-
- friend class UnmarkObjectVisitor;
- static void UnmarkObject(HeapObject* obj);
-#endif
-
- Heap* heap_;
+ void PostProcessEvacuationCandidates();
base::Semaphore page_parallel_job_semaphore_;
@@ -854,11 +753,16 @@ class MarkCompactCollector {
Sweeper sweeper_;
+ friend class CodeMarkingVisitor;
friend class Heap;
+ friend class IncrementalMarkingMarkingVisitor;
+ friend class MarkCompactMarkingVisitor;
+ friend class MarkingVisitor;
+ friend class RecordMigratedSlotVisitor;
+ friend class SharedFunctionInfoMarkingVisitor;
friend class StoreBuffer;
};
-
class EvacuationScope BASE_EMBEDDED {
public:
explicit EvacuationScope(MarkCompactCollector* collector)
@@ -872,7 +776,6 @@ class EvacuationScope BASE_EMBEDDED {
MarkCompactCollector* collector_;
};
-V8_EXPORT_PRIVATE const char* AllocationSpaceName(AllocationSpace space);
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/marking.h b/deps/v8/src/heap/marking.h
index b20a4d86f1..ab98a124bc 100644
--- a/deps/v8/src/heap/marking.h
+++ b/deps/v8/src/heap/marking.h
@@ -38,12 +38,16 @@ class MarkBit {
}
}
+ // The function returns true if it succeeded to
+ // transition the bit from 0 to 1.
template <AccessMode mode = NON_ATOMIC>
inline bool Set();
template <AccessMode mode = NON_ATOMIC>
inline bool Get();
+ // The function returns true if it succeeded to
+ // transition the bit from 1 to 0.
template <AccessMode mode = NON_ATOMIC>
inline bool Clear();
@@ -57,8 +61,9 @@ class MarkBit {
template <>
inline bool MarkBit::Set<MarkBit::NON_ATOMIC>() {
- *cell_ |= mask_;
- return true;
+ base::Atomic32 old_value = *cell_;
+ *cell_ = old_value | mask_;
+ return (old_value & mask_) == 0;
}
template <>
@@ -86,8 +91,9 @@ inline bool MarkBit::Get<MarkBit::ATOMIC>() {
template <>
inline bool MarkBit::Clear<MarkBit::NON_ATOMIC>() {
- *cell_ &= ~mask_;
- return true;
+ base::Atomic32 old_value = *cell_;
+ *cell_ = old_value & ~mask_;
+ return (old_value & mask_) == mask_;
}
template <>
@@ -412,24 +418,17 @@ class Marking : public AllStatic {
template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
INLINE(static bool WhiteToGrey(MarkBit markbit)) {
- DCHECK(mode == MarkBit::ATOMIC || IsWhite(markbit));
return markbit.Set<mode>();
}
- // Warning: this method is not safe in general in concurrent scenarios.
- // If you know that nobody else will change the bits on the given location
- // then you may use it.
template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
- INLINE(static void WhiteToBlack(MarkBit markbit)) {
- DCHECK(mode == MarkBit::ATOMIC || IsWhite(markbit));
- markbit.Set<mode>();
- markbit.Next().Set<mode>();
+ INLINE(static bool WhiteToBlack(MarkBit markbit)) {
+ return markbit.Set<mode>() && markbit.Next().Set<mode>();
}
template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
INLINE(static bool GreyToBlack(MarkBit markbit)) {
- DCHECK(mode == MarkBit::ATOMIC || IsGrey(markbit));
- return markbit.Next().Set<mode>();
+ return markbit.Get<mode>() && markbit.Next().Set<mode>();
}
enum ObjectColor {
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 3645547ef5..46b7b576d2 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -66,7 +66,8 @@ void MemoryReducer::NotifyTimer(const Event& event) {
state_.started_gcs);
}
heap()->StartIdleIncrementalMarking(
- GarbageCollectionReason::kMemoryReducer);
+ GarbageCollectionReason::kMemoryReducer,
+ kGCCallbackFlagCollectAllExternalMemory);
} else if (state_.action == kWait) {
if (!heap()->incremental_marking()->IsStopped() &&
heap()->ShouldOptimizeForMemoryUsage()) {
diff --git a/deps/v8/src/heap/object-stats.cc b/deps/v8/src/heap/object-stats.cc
index e9067937aa..a9f50cdfbf 100644
--- a/deps/v8/src/heap/object-stats.cc
+++ b/deps/v8/src/heap/object-stats.cc
@@ -271,13 +271,12 @@ void ObjectStatsCollector::CollectStatistics(HeapObject* obj) {
if (obj->IsScript()) RecordScriptDetails(Script::cast(obj));
}
-class ObjectStatsCollector::CompilationCacheTableVisitor
- : public ObjectVisitor {
+class ObjectStatsCollector::CompilationCacheTableVisitor : public RootVisitor {
public:
explicit CompilationCacheTableVisitor(ObjectStatsCollector* parent)
: parent_(parent) {}
- void VisitPointers(Object** start, Object** end) override {
+ void VisitRootPointers(Root root, Object** start, Object** end) override {
for (Object** current = start; current < end; current++) {
HeapObject* obj = HeapObject::cast(*current);
if (obj->IsUndefined(parent_->heap_->isolate())) continue;
@@ -548,13 +547,6 @@ void ObjectStatsCollector::RecordSharedFunctionInfoDetails(
RecordFixedArrayHelper(sfi, feedback_metadata, FEEDBACK_METADATA_SUB_TYPE,
0);
}
-
- if (!sfi->OptimizedCodeMapIsCleared()) {
- FixedArray* optimized_code_map = sfi->optimized_code_map();
- RecordFixedArrayHelper(sfi, optimized_code_map, OPTIMIZED_CODE_MAP_SUB_TYPE,
- 0);
- // Optimized code map should be small, so skip accounting.
- }
}
void ObjectStatsCollector::RecordJSFunctionDetails(JSFunction* function) {
diff --git a/deps/v8/src/heap/objects-visiting-inl.h b/deps/v8/src/heap/objects-visiting-inl.h
index 2b82c6b29e..11bf679ec4 100644
--- a/deps/v8/src/heap/objects-visiting-inl.h
+++ b/deps/v8/src/heap/objects-visiting-inl.h
@@ -50,7 +50,7 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
table_.Register(
- kVisitFixedTypedArray,
+ kVisitFixedTypedArrayBase,
&FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
int>::Visit);
@@ -136,7 +136,7 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit);
table_.Register(
- kVisitFixedTypedArray,
+ kVisitFixedTypedArrayBase,
&FlexibleBodyVisitor<StaticVisitor, FixedTypedArrayBase::BodyDescriptor,
void>::Visit);
@@ -628,7 +628,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionStrongCode(
JSFunctionStrongCodeBodyVisitor::Visit(map, object);
}
-
template <typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode(
Map* map, HeapObject* object) {
@@ -637,6 +636,139 @@ void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode(
JSFunctionWeakCodeBodyVisitor::Visit(map, object);
}
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit(HeapObject* object) {
+ Map* map = object->map();
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ switch (static_cast<VisitorId>(map->visitor_id())) {
+#define CASE(type) \
+ case kVisit##type: \
+ return visitor->Visit##type(map, type::cast(object));
+ TYPED_VISITOR_ID_LIST(CASE)
+#undef CASE
+ case kVisitShortcutCandidate:
+ return visitor->VisitShortcutCandidate(map, ConsString::cast(object));
+ case kVisitNativeContext:
+ return visitor->VisitNativeContext(map, Context::cast(object));
+ case kVisitDataObject:
+ return visitor->VisitDataObject(map, HeapObject::cast(object));
+ case kVisitJSObjectFast:
+ return visitor->VisitJSObjectFast(map, JSObject::cast(object));
+ case kVisitJSApiObject:
+ return visitor->VisitJSApiObject(map, JSObject::cast(object));
+ case kVisitStruct:
+ return visitor->VisitStruct(map, HeapObject::cast(object));
+ case kVisitFreeSpace:
+ return visitor->VisitFreeSpace(map, FreeSpace::cast(object));
+ case kVisitorIdCount:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ // Make the compiler happy.
+ return ResultType();
+}
+
+template <typename ResultType, typename ConcreteVisitor>
+void HeapVisitor<ResultType, ConcreteVisitor>::VisitMapPointer(
+ HeapObject* host, HeapObject** map) {
+ static_cast<ConcreteVisitor*>(this)->VisitPointer(
+ host, reinterpret_cast<Object**>(map));
+}
+
+template <typename ResultType, typename ConcreteVisitor>
+bool HeapVisitor<ResultType, ConcreteVisitor>::ShouldVisit(HeapObject* object) {
+ return true;
+}
+
+#define VISIT(type) \
+ template <typename ResultType, typename ConcreteVisitor> \
+ ResultType HeapVisitor<ResultType, ConcreteVisitor>::Visit##type( \
+ Map* map, type* object) { \
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this); \
+ if (!visitor->ShouldVisit(object)) return ResultType(); \
+ int size = type::BodyDescriptor::SizeOf(map, object); \
+ visitor->VisitMapPointer(object, object->map_slot()); \
+ type::BodyDescriptor::IterateBody(object, size, visitor); \
+ return static_cast<ResultType>(size); \
+ }
+TYPED_VISITOR_ID_LIST(VISIT)
+#undef VISIT
+
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitShortcutCandidate(
+ Map* map, ConsString* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ int size = ConsString::BodyDescriptor::SizeOf(map, object);
+ visitor->VisitMapPointer(object, object->map_slot());
+ ConsString::BodyDescriptor::IterateBody(object, size,
+ static_cast<ConcreteVisitor*>(this));
+ return static_cast<ResultType>(size);
+}
+
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitNativeContext(
+ Map* map, Context* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ int size = Context::BodyDescriptor::SizeOf(map, object);
+ visitor->VisitMapPointer(object, object->map_slot());
+ Context::BodyDescriptor::IterateBody(object, size,
+ static_cast<ConcreteVisitor*>(this));
+ return static_cast<ResultType>(size);
+}
+
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitDataObject(
+ Map* map, HeapObject* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ int size = map->instance_size();
+ visitor->VisitMapPointer(object, object->map_slot());
+ return static_cast<ResultType>(size);
+}
+
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSObjectFast(
+ Map* map, JSObject* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ int size = JSObject::FastBodyDescriptor::SizeOf(map, object);
+ visitor->VisitMapPointer(object, object->map_slot());
+ JSObject::FastBodyDescriptor::IterateBody(
+ object, size, static_cast<ConcreteVisitor*>(this));
+ return static_cast<ResultType>(size);
+}
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitJSApiObject(
+ Map* map, JSObject* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ int size = JSObject::BodyDescriptor::SizeOf(map, object);
+ visitor->VisitMapPointer(object, object->map_slot());
+ JSObject::BodyDescriptor::IterateBody(object, size,
+ static_cast<ConcreteVisitor*>(this));
+ return static_cast<ResultType>(size);
+}
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitStruct(
+ Map* map, HeapObject* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ int size = map->instance_size();
+ visitor->VisitMapPointer(object, object->map_slot());
+ StructBodyDescriptor::IterateBody(object, size,
+ static_cast<ConcreteVisitor*>(this));
+ return static_cast<ResultType>(size);
+}
+template <typename ResultType, typename ConcreteVisitor>
+ResultType HeapVisitor<ResultType, ConcreteVisitor>::VisitFreeSpace(
+ Map* map, FreeSpace* object) {
+ ConcreteVisitor* visitor = static_cast<ConcreteVisitor*>(this);
+ if (!visitor->ShouldVisit(object)) return ResultType();
+ visitor->VisitMapPointer(object, object->map_slot());
+ return static_cast<ResultType>(FreeSpace::cast(object)->size());
+}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index eae609e9ee..5849fcb882 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -11,15 +11,13 @@
namespace v8 {
namespace internal {
-
-StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(Map* map) {
+VisitorId StaticVisitorBase::GetVisitorId(Map* map) {
return GetVisitorId(map->instance_type(), map->instance_size(),
FLAG_unbox_double_fields && !map->HasFastPointerLayout());
}
-
-StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
- int instance_type, int instance_size, bool has_unboxed_fields) {
+VisitorId StaticVisitorBase::GetVisitorId(int instance_type, int instance_size,
+ bool has_unboxed_fields) {
if (instance_type < FIRST_NONSTRING_TYPE) {
switch (instance_type & kStringRepresentationMask) {
case kSeqStringTag:
@@ -187,7 +185,7 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case FIXED_INT32_ARRAY_TYPE:
case FIXED_FLOAT32_ARRAY_TYPE:
case FIXED_UINT8_CLAMPED_ARRAY_TYPE:
- return kVisitFixedTypedArray;
+ return kVisitFixedTypedArrayBase;
case FIXED_FLOAT64_ARRAY_TYPE:
return kVisitFixedFloat64Array;
diff --git a/deps/v8/src/heap/objects-visiting.h b/deps/v8/src/heap/objects-visiting.h
index abbb27a326..c578a42d64 100644
--- a/deps/v8/src/heap/objects-visiting.h
+++ b/deps/v8/src/heap/objects-visiting.h
@@ -24,10 +24,6 @@
namespace v8 {
namespace internal {
-
-// Base class for all static visitors.
-class StaticVisitorBase : public AllStatic {
- public:
#define VISITOR_ID_LIST(V) \
V(SeqOneByteString) \
V(SeqTwoByteString) \
@@ -37,7 +33,7 @@ class StaticVisitorBase : public AllStatic {
V(FreeSpace) \
V(FixedArray) \
V(FixedDoubleArray) \
- V(FixedTypedArray) \
+ V(FixedTypedArrayBase) \
V(FixedFloat64Array) \
V(NativeContext) \
V(AllocationSite) \
@@ -63,22 +59,25 @@ class StaticVisitorBase : public AllStatic {
V(JSArrayBuffer) \
V(JSRegExp)
- // For data objects, JS objects and structs along with generic visitor which
- // can visit object of any size we provide visitors specialized by
- // object size in words.
- // Ids of specialized visitors are declared in a linear order (without
- // holes) starting from the id of visitor specialized for 2 words objects
- // (base visitor id) and ending with the id of generic visitor.
- // Method GetVisitorIdForSize depends on this ordering to calculate visitor
- // id of specialized visitor from given instance size, base visitor id and
- // generic visitor's id.
- enum VisitorId {
+// For data objects, JS objects and structs along with generic visitor which
+// can visit object of any size we provide visitors specialized by
+// object size in words.
+// Ids of specialized visitors are declared in a linear order (without
+// holes) starting from the id of visitor specialized for 2 words objects
+// (base visitor id) and ending with the id of generic visitor.
+// Method GetVisitorIdForSize depends on this ordering to calculate visitor
+// id of specialized visitor from given instance size, base visitor id and
+// generic visitor's id.
+enum VisitorId {
#define VISITOR_ID_ENUM_DECL(id) kVisit##id,
- VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
+ VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
#undef VISITOR_ID_ENUM_DECL
- kVisitorIdCount
- };
+ kVisitorIdCount
+};
+// Base class for all static visitors.
+class StaticVisitorBase : public AllStatic {
+ public:
// Visitor ID should fit in one byte.
STATIC_ASSERT(kVisitorIdCount <= 256);
@@ -99,24 +98,24 @@ class VisitorDispatchTable {
// We are not using memcpy to guarantee that during update
// every element of callbacks_ array will remain correct
// pointer (memcpy might be implemented as a byte copying loop).
- for (int i = 0; i < StaticVisitorBase::kVisitorIdCount; i++) {
+ for (int i = 0; i < kVisitorIdCount; i++) {
base::NoBarrier_Store(&callbacks_[i], other->callbacks_[i]);
}
}
inline Callback GetVisitor(Map* map);
- inline Callback GetVisitorById(StaticVisitorBase::VisitorId id) {
+ inline Callback GetVisitorById(VisitorId id) {
return reinterpret_cast<Callback>(callbacks_[id]);
}
- void Register(StaticVisitorBase::VisitorId id, Callback callback) {
- DCHECK(id < StaticVisitorBase::kVisitorIdCount); // id is unsigned.
+ void Register(VisitorId id, Callback callback) {
+ DCHECK(id < kVisitorIdCount); // id is unsigned.
callbacks_[id] = reinterpret_cast<base::AtomicWord>(callback);
}
private:
- base::AtomicWord callbacks_[StaticVisitorBase::kVisitorIdCount];
+ base::AtomicWord callbacks_[kVisitorIdCount];
};
@@ -349,10 +348,77 @@ template <typename StaticVisitor>
VisitorDispatchTable<typename StaticMarkingVisitor<StaticVisitor>::Callback>
StaticMarkingVisitor<StaticVisitor>::table_;
+#define TYPED_VISITOR_ID_LIST(V) \
+ V(AllocationSite) \
+ V(ByteArray) \
+ V(BytecodeArray) \
+ V(Cell) \
+ V(Code) \
+ V(ConsString) \
+ V(FixedArray) \
+ V(FixedDoubleArray) \
+ V(FixedFloat64Array) \
+ V(FixedTypedArrayBase) \
+ V(JSArrayBuffer) \
+ V(JSFunction) \
+ V(JSObject) \
+ V(JSRegExp) \
+ V(JSWeakCollection) \
+ V(Map) \
+ V(Oddball) \
+ V(PropertyCell) \
+ V(SeqOneByteString) \
+ V(SeqTwoByteString) \
+ V(SharedFunctionInfo) \
+ V(SlicedString) \
+ V(Symbol) \
+ V(TransitionArray) \
+ V(ThinString) \
+ V(WeakCell)
+
+// The base class for visitors that need to dispatch on object type.
+// It is similar to StaticVisitor except it uses virtual dispatch
+// instead of static dispatch table. The default behavour of all
+// visit functions is to iterate body of the given object using
+// the BodyDescriptor of the object.
+//
+// The visit functions return the size of the object cast to ResultType.
+//
+// This class is intended to be used in the following way:
+//
+// class SomeVisitor : public HeapVisitor<ResultType, SomeVisitor> {
+// ...
+// }
+//
+// This is an example of Curiously recurring template pattern.
+// TODO(ulan): replace static visitors with the HeapVisitor.
+template <typename ResultType, typename ConcreteVisitor>
+class HeapVisitor : public ObjectVisitor {
+ public:
+ ResultType Visit(HeapObject* object);
+
+ protected:
+ // A guard predicate for visiting the object.
+ // If it returns false then the default implementations of the Visit*
+ // functions bailout from iterating the object pointers.
+ virtual bool ShouldVisit(HeapObject* object);
+ // A callback for visiting the map pointer in the object header.
+ virtual void VisitMapPointer(HeapObject* host, HeapObject** map);
+
+#define VISIT(type) virtual ResultType Visit##type(Map* map, type* object);
+ TYPED_VISITOR_ID_LIST(VISIT)
+#undef VISIT
+ virtual ResultType VisitShortcutCandidate(Map* map, ConsString* object);
+ virtual ResultType VisitNativeContext(Map* map, Context* object);
+ virtual ResultType VisitDataObject(Map* map, HeapObject* object);
+ virtual ResultType VisitJSObjectFast(Map* map, JSObject* object);
+ virtual ResultType VisitJSApiObject(Map* map, JSObject* object);
+ virtual ResultType VisitStruct(Map* map, HeapObject* object);
+ virtual ResultType VisitFreeSpace(Map* map, FreeSpace* object);
+};
class WeakObjectRetainer;
-
// A weak list is single linked list where each element has a weak pointer to
// the next element. Given the head of the list, this function removes dead
// elements from the list and if requested records slots for next-element
diff --git a/deps/v8/src/heap/page-parallel-job.h b/deps/v8/src/heap/page-parallel-job.h
index fffbbd2e40..eb215efbb4 100644
--- a/deps/v8/src/heap/page-parallel-job.h
+++ b/deps/v8/src/heap/page-parallel-job.h
@@ -20,16 +20,10 @@ class Isolate;
// The JobTraits class needs to define:
// - PerPageData type - state associated with each page.
// - PerTaskData type - state associated with each task.
-// - static bool ProcessPageInParallel(Heap* heap,
+// - static void ProcessPageInParallel(Heap* heap,
// PerTaskData task_data,
// MemoryChunk* page,
// PerPageData page_data)
-// The function should return true iff processing succeeded.
-// - static const bool NeedSequentialFinalization
-// - static void FinalizePageSequentially(Heap* heap,
-// bool processing_succeeded,
-// MemoryChunk* page,
-// PerPageData page_data)
template <typename JobTraits>
class PageParallelJob {
public:
@@ -108,21 +102,12 @@ class PageParallelJob {
pending_tasks_->Wait();
}
}
- if (JobTraits::NeedSequentialFinalization) {
- Item* item = items_;
- while (item != nullptr) {
- bool success = (item->state.Value() == kFinished);
- JobTraits::FinalizePageSequentially(heap_, item->chunk, success,
- item->data);
- item = item->next;
- }
- }
}
private:
- static const int kMaxNumberOfTasks = 10;
+ static const int kMaxNumberOfTasks = 32;
- enum ProcessingState { kAvailable, kProcessing, kFinished, kFailed };
+ enum ProcessingState { kAvailable, kProcessing, kFinished };
struct Item : public Malloced {
Item(MemoryChunk* chunk, typename JobTraits::PerPageData data, Item* next)
@@ -158,9 +143,9 @@ class PageParallelJob {
}
for (int i = 0; i < num_items_; i++) {
if (current->state.TrySetValue(kAvailable, kProcessing)) {
- bool success = JobTraits::ProcessPageInParallel(
- heap_, data_, current->chunk, current->data);
- current->state.SetValue(success ? kFinished : kFailed);
+ JobTraits::ProcessPageInParallel(heap_, data_, current->chunk,
+ current->data);
+ current->state.SetValue(kFinished);
}
current = current->next;
// Wrap around if needed.
diff --git a/deps/v8/src/heap/scavenger.cc b/deps/v8/src/heap/scavenger.cc
index 7a8e55fd19..e211388729 100644
--- a/deps/v8/src/heap/scavenger.cc
+++ b/deps/v8/src/heap/scavenger.cc
@@ -36,7 +36,7 @@ class ScavengingVisitor : public StaticVisitorBase {
table_.Register(kVisitByteArray, &EvacuateByteArray);
table_.Register(kVisitFixedArray, &EvacuateFixedArray);
table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
- table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
+ table_.Register(kVisitFixedTypedArrayBase, &EvacuateFixedTypedArray);
table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
table_.Register(kVisitJSArrayBuffer,
&ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
@@ -148,7 +148,7 @@ class ScavengingVisitor : public StaticVisitorBase {
}
if (marks_handling == TRANSFER_MARKS) {
- IncrementalMarking::TransferColor(source, target);
+ heap->incremental_marking()->TransferColor(source, target);
}
}
@@ -191,6 +191,8 @@ class ScavengingVisitor : public StaticVisitorBase {
HeapObject* target = NULL; // Initialization to please compiler.
if (allocation.To(&target)) {
+ DCHECK(ObjectMarking::IsWhite(
+ target, heap->mark_compact_collector()->marking_state(target)));
MigrateObject(heap, object, target, object_size);
// Update slot to new target using CAS. A concurrent sweeper thread my
@@ -201,10 +203,7 @@ class ScavengingVisitor : public StaticVisitorBase {
reinterpret_cast<base::AtomicWord>(target));
if (object_contents == POINTER_OBJECT) {
- // TODO(mlippautz): Query collector for marking state.
- heap->promotion_queue()->insert(
- target, object_size,
- ObjectMarking::IsBlack(object, MarkingState::Internal(object)));
+ heap->promotion_queue()->insert(target, object_size);
}
heap->IncrementPromotedObjectsSize(object_size);
return true;
@@ -446,11 +445,10 @@ void Scavenger::SelectScavengingVisitorsTable() {
// can't be evacuated into evacuation candidate but
// short-circuiting violates this assumption.
scavenging_visitors_table_.Register(
- StaticVisitorBase::kVisitShortcutCandidate,
- scavenging_visitors_table_.GetVisitorById(
- StaticVisitorBase::kVisitConsString));
+ kVisitShortcutCandidate,
+ scavenging_visitors_table_.GetVisitorById(kVisitConsString));
scavenging_visitors_table_.Register(
- StaticVisitorBase::kVisitThinString,
+ kVisitThinString,
&ScavengingVisitor<TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::
EvacuateThinStringNoShortcut);
}
@@ -460,17 +458,17 @@ void Scavenger::SelectScavengingVisitorsTable() {
Isolate* Scavenger::isolate() { return heap()->isolate(); }
+void RootScavengeVisitor::VisitRootPointer(Root root, Object** p) {
+ ScavengePointer(p);
+}
-void ScavengeVisitor::VisitPointer(Object** p) { ScavengePointer(p); }
-
-
-void ScavengeVisitor::VisitPointers(Object** start, Object** end) {
+void RootScavengeVisitor::VisitRootPointers(Root root, Object** start,
+ Object** end) {
// Copy all HeapObject pointers in [start, end)
for (Object** p = start; p < end; p++) ScavengePointer(p);
}
-
-void ScavengeVisitor::ScavengePointer(Object** p) {
+void RootScavengeVisitor::ScavengePointer(Object** p) {
Object* object = *p;
if (!heap_->InNewSpace(object)) return;
diff --git a/deps/v8/src/heap/scavenger.h b/deps/v8/src/heap/scavenger.h
index 54fe6ffdf9..09f2955651 100644
--- a/deps/v8/src/heap/scavenger.h
+++ b/deps/v8/src/heap/scavenger.h
@@ -44,15 +44,14 @@ class Scavenger {
VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
};
-
// Helper class for turning the scavenger into an object visitor that is also
// filtering out non-HeapObjects and objects which do not reside in new space.
-class ScavengeVisitor : public ObjectVisitor {
+class RootScavengeVisitor : public RootVisitor {
public:
- explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
+ explicit RootScavengeVisitor(Heap* heap) : heap_(heap) {}
- void VisitPointer(Object** p) override;
- void VisitPointers(Object** start, Object** end) override;
+ void VisitRootPointer(Root root, Object** p) override;
+ void VisitRootPointers(Root root, Object** start, Object** end) override;
private:
inline void ScavengePointer(Object** p);
diff --git a/deps/v8/src/heap/sequential-marking-deque.cc b/deps/v8/src/heap/sequential-marking-deque.cc
new file mode 100644
index 0000000000..a715b3fd85
--- /dev/null
+++ b/deps/v8/src/heap/sequential-marking-deque.cc
@@ -0,0 +1,98 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/sequential-marking-deque.h"
+
+#include "src/allocation.h"
+#include "src/base/bits.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+
+namespace v8 {
+namespace internal {
+
+void SequentialMarkingDeque::SetUp() {
+ backing_store_ = new base::VirtualMemory(kMaxSize);
+ backing_store_committed_size_ = 0;
+ if (backing_store_ == nullptr) {
+ V8::FatalProcessOutOfMemory("SequentialMarkingDeque::SetUp");
+ }
+}
+
+void SequentialMarkingDeque::TearDown() { delete backing_store_; }
+
+void SequentialMarkingDeque::StartUsing() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ if (in_use_) {
+ // This can happen in mark-compact GC if the incremental marker already
+ // started using the marking deque.
+ return;
+ }
+ in_use_ = true;
+ EnsureCommitted();
+ array_ = reinterpret_cast<HeapObject**>(backing_store_->address());
+ size_t size = FLAG_force_marking_deque_overflows
+ ? 64 * kPointerSize
+ : backing_store_committed_size_;
+ DCHECK(
+ base::bits::IsPowerOfTwo32(static_cast<uint32_t>(size / kPointerSize)));
+ mask_ = static_cast<int>((size / kPointerSize) - 1);
+ top_ = bottom_ = 0;
+ overflowed_ = false;
+}
+
+void SequentialMarkingDeque::StopUsing() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ if (!in_use_) return;
+ DCHECK(IsEmpty());
+ DCHECK(!overflowed_);
+ top_ = bottom_ = mask_ = 0;
+ in_use_ = false;
+ if (FLAG_concurrent_sweeping) {
+ StartUncommitTask();
+ } else {
+ Uncommit();
+ }
+}
+
+void SequentialMarkingDeque::Clear() {
+ DCHECK(in_use_);
+ top_ = bottom_ = 0;
+ overflowed_ = false;
+}
+
+void SequentialMarkingDeque::Uncommit() {
+ DCHECK(!in_use_);
+ bool success = backing_store_->Uncommit(backing_store_->address(),
+ backing_store_committed_size_);
+ backing_store_committed_size_ = 0;
+ CHECK(success);
+}
+
+void SequentialMarkingDeque::EnsureCommitted() {
+ DCHECK(in_use_);
+ if (backing_store_committed_size_ > 0) return;
+
+ for (size_t size = kMaxSize; size >= kMinSize; size /= 2) {
+ if (backing_store_->Commit(backing_store_->address(), size, false)) {
+ backing_store_committed_size_ = size;
+ break;
+ }
+ }
+ if (backing_store_committed_size_ == 0) {
+ V8::FatalProcessOutOfMemory("SequentialMarkingDeque::EnsureCommitted");
+ }
+}
+
+void SequentialMarkingDeque::StartUncommitTask() {
+ if (!uncommit_task_pending_) {
+ uncommit_task_pending_ = true;
+ UncommitTask* task = new UncommitTask(heap_->isolate(), this);
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ }
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/heap/sequential-marking-deque.h b/deps/v8/src/heap/sequential-marking-deque.h
new file mode 100644
index 0000000000..86098dd730
--- /dev/null
+++ b/deps/v8/src/heap/sequential-marking-deque.h
@@ -0,0 +1,172 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_SEQUENTIAL_MARKING_DEQUE_
+#define V8_HEAP_SEQUENTIAL_MARKING_DEQUE_
+
+#include <deque>
+
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/platform.h"
+#include "src/cancelable-task.h"
+
+namespace v8 {
+namespace internal {
+
+class Heap;
+class Isolate;
+class HeapObject;
+
+// ----------------------------------------------------------------------------
+// Marking deque for tracing live objects.
+class SequentialMarkingDeque {
+ public:
+ explicit SequentialMarkingDeque(Heap* heap)
+ : backing_store_(nullptr),
+ backing_store_committed_size_(0),
+ array_(nullptr),
+ top_(0),
+ bottom_(0),
+ mask_(0),
+ overflowed_(false),
+ in_use_(false),
+ uncommit_task_pending_(false),
+ heap_(heap) {}
+
+ void SetUp();
+ void TearDown();
+
+ // Ensures that the marking deque is committed and will stay committed until
+ // StopUsing() is called.
+ void StartUsing();
+ void StopUsing();
+ void Clear();
+
+ inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
+
+ inline bool IsEmpty() { return top_ == bottom_; }
+
+ int Size() {
+ // Return (top - bottom + capacity) % capacity, where capacity = mask + 1.
+ return (top_ - bottom_ + mask_ + 1) & mask_;
+ }
+
+ bool overflowed() const { return overflowed_; }
+
+ void ClearOverflowed() { overflowed_ = false; }
+
+ void SetOverflowed() { overflowed_ = true; }
+
+ // Push the object on the marking stack if there is room, otherwise mark the
+ // deque as overflowed and wait for a rescan of the heap.
+ INLINE(bool Push(HeapObject* object)) {
+ if (IsFull()) {
+ SetOverflowed();
+ return false;
+ } else {
+ array_[top_] = object;
+ top_ = ((top_ + 1) & mask_);
+ return true;
+ }
+ }
+
+ INLINE(HeapObject* Pop()) {
+ DCHECK(!IsEmpty());
+ top_ = ((top_ - 1) & mask_);
+ HeapObject* object = array_[top_];
+ return object;
+ }
+
+ // Unshift the object into the marking stack if there is room, otherwise mark
+ // the deque as overflowed and wait for a rescan of the heap.
+ INLINE(bool Unshift(HeapObject* object)) {
+ if (IsFull()) {
+ SetOverflowed();
+ return false;
+ } else {
+ bottom_ = ((bottom_ - 1) & mask_);
+ array_[bottom_] = object;
+ return true;
+ }
+ }
+
+ // Calls the specified callback on each element of the deque and replaces
+ // the element with the result of the callback. If the callback returns
+ // nullptr then the element is removed from the deque.
+ // The callback must accept HeapObject* and return HeapObject*.
+ template <typename Callback>
+ void Update(Callback callback) {
+ int i = bottom_;
+ int new_top = bottom_;
+ while (i != top_) {
+ HeapObject* object = callback(array_[i]);
+ if (object) {
+ array_[new_top] = object;
+ new_top = (new_top + 1) & mask_;
+ }
+ i = (i + 1) & mask_;
+ }
+ top_ = new_top;
+ }
+
+ private:
+ // This task uncommits the marking_deque backing store if
+ // markin_deque->in_use_ is false.
+ class UncommitTask : public CancelableTask {
+ public:
+ explicit UncommitTask(Isolate* isolate,
+ SequentialMarkingDeque* marking_deque)
+ : CancelableTask(isolate), marking_deque_(marking_deque) {}
+
+ private:
+ // CancelableTask override.
+ void RunInternal() override {
+ base::LockGuard<base::Mutex> guard(&marking_deque_->mutex_);
+ if (!marking_deque_->in_use_) {
+ marking_deque_->Uncommit();
+ }
+ marking_deque_->uncommit_task_pending_ = false;
+ }
+
+ SequentialMarkingDeque* marking_deque_;
+ DISALLOW_COPY_AND_ASSIGN(UncommitTask);
+ };
+
+ static const size_t kMaxSize = 4 * MB;
+ static const size_t kMinSize = 256 * KB;
+
+ // Must be called with mutex lock.
+ void EnsureCommitted();
+
+ // Must be called with mutex lock.
+ void Uncommit();
+
+ // Must be called with mutex lock.
+ void StartUncommitTask();
+
+ base::Mutex mutex_;
+
+ base::VirtualMemory* backing_store_;
+ size_t backing_store_committed_size_;
+ HeapObject** array_;
+ // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
+ // empty when top_ == bottom_. It is full when top_ + 1 == bottom
+ // (mod mask + 1).
+ int top_;
+ int bottom_;
+ int mask_;
+ bool overflowed_;
+ // in_use_ == true after taking mutex lock implies that the marking deque is
+ // committed and will stay committed at least until in_use_ == false.
+ bool in_use_;
+ bool uncommit_task_pending_;
+ Heap* heap_;
+
+ DISALLOW_COPY_AND_ASSIGN(SequentialMarkingDeque);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_SEQUENTIAL_MARKING_DEQUE_
diff --git a/deps/v8/src/heap/spaces-inl.h b/deps/v8/src/heap/spaces-inl.h
index 52c572cc4a..5b44d1dc10 100644
--- a/deps/v8/src/heap/spaces-inl.h
+++ b/deps/v8/src/heap/spaces-inl.h
@@ -337,8 +337,13 @@ MemoryChunk* MemoryChunkIterator::next() {
return nullptr;
}
-Page* FreeListCategory::page() {
- return Page::FromAddress(reinterpret_cast<Address>(this));
+Page* FreeListCategory::page() const {
+ return Page::FromAddress(
+ reinterpret_cast<Address>(const_cast<FreeListCategory*>(this)));
+}
+
+Page* FreeList::GetPageForCategoryType(FreeListCategoryType type) {
+ return top(type) ? top(type)->page() : nullptr;
}
FreeList* FreeListCategory::owner() {
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 070b72c7bd..71e1b60be9 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -20,6 +20,7 @@
#include "src/objects-inl.h"
#include "src/snapshot/snapshot.h"
#include "src/v8.h"
+#include "src/vm-state-inl.h"
namespace v8 {
namespace internal {
@@ -55,10 +56,14 @@ bool HeapObjectIterator::AdvanceToNextPage() {
DCHECK_EQ(cur_addr_, cur_end_);
if (current_page_ == page_range_.end()) return false;
Page* cur_page = *(current_page_++);
- space_->heap()
- ->mark_compact_collector()
- ->sweeper()
- .SweepOrWaitUntilSweepingCompleted(cur_page);
+ Heap* heap = space_->heap();
+
+ heap->mark_compact_collector()->sweeper().SweepOrWaitUntilSweepingCompleted(
+ cur_page);
+ if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
+ heap->minor_mark_compact_collector()->MakeIterable(
+ cur_page, MarkingTreatmentMode::CLEAR,
+ FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end();
DCHECK(cur_page->SweepingDone());
@@ -291,18 +296,14 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate)
: isolate_(isolate),
code_range_(nullptr),
capacity_(0),
- capacity_executable_(0),
size_(0),
size_executable_(0),
lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
highest_ever_allocated_(reinterpret_cast<void*>(0)),
unmapper_(this) {}
-bool MemoryAllocator::SetUp(size_t capacity, size_t capacity_executable,
- size_t code_range_size) {
+bool MemoryAllocator::SetUp(size_t capacity, size_t code_range_size) {
capacity_ = RoundUp(capacity, Page::kPageSize);
- capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
- DCHECK_GE(capacity_, capacity_executable_);
size_ = 0;
size_executable_ = 0;
@@ -322,7 +323,6 @@ void MemoryAllocator::TearDown() {
// TODO(gc) this will be true again when we fix FreeMemory.
// DCHECK(size_executable_ == 0);
capacity_ = 0;
- capacity_executable_ = 0;
if (last_chunk_.IsReserved()) {
last_chunk_.Release();
@@ -698,13 +698,6 @@ MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
GetCommitPageSize()) +
CodePageGuardSize();
- // Check executable memory limit.
- if ((size_executable_.Value() + chunk_size) > capacity_executable_) {
- LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
- "V8 Executable Allocation capacity exceeded"));
- return NULL;
- }
-
// Size of header (not executable) plus area (executable).
size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
GetCommitPageSize());
@@ -861,6 +854,17 @@ void Page::CreateBlackArea(Address start, Address end) {
static_cast<int>(end - start));
}
+void Page::DestroyBlackArea(Address start, Address end) {
+ DCHECK(heap()->incremental_marking()->black_allocation());
+ DCHECK_EQ(Page::FromAddress(start), this);
+ DCHECK_NE(start, end);
+ DCHECK_EQ(Page::FromAddress(end - 1), this);
+ MarkingState::Internal(this).bitmap()->ClearRange(
+ AddressToMarkbitIndex(start), AddressToMarkbitIndex(end));
+ MarkingState::Internal(this).IncrementLiveBytes(
+ -static_cast<int>(end - start));
+}
+
void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk,
Address start_free) {
// We do not allow partial shrink for code.
@@ -1356,6 +1360,39 @@ bool PagedSpace::ContainsSlow(Address addr) {
return false;
}
+Page* PagedSpace::RemovePageSafe(int size_in_bytes) {
+ base::LockGuard<base::Mutex> guard(mutex());
+
+ // Check for pages that still contain free list entries. Bail out for smaller
+ // categories.
+ const int minimum_category =
+ static_cast<int>(FreeList::SelectFreeListCategoryType(size_in_bytes));
+ Page* page = free_list()->GetPageForCategoryType(kHuge);
+ if (!page && static_cast<int>(kLarge) >= minimum_category)
+ page = free_list()->GetPageForCategoryType(kLarge);
+ if (!page && static_cast<int>(kMedium) >= minimum_category)
+ page = free_list()->GetPageForCategoryType(kMedium);
+ if (!page && static_cast<int>(kSmall) >= minimum_category)
+ page = free_list()->GetPageForCategoryType(kSmall);
+ if (!page) return nullptr;
+
+ AccountUncommitted(page->size());
+ accounting_stats_.DeallocateBytes(page->LiveBytesFromFreeList());
+ accounting_stats_.DecreaseCapacity(page->area_size());
+ page->Unlink();
+ UnlinkFreeListCategories(page);
+ return page;
+}
+
+void PagedSpace::AddPage(Page* page) {
+ AccountCommitted(page->size());
+ accounting_stats_.IncreaseCapacity(page->area_size());
+ accounting_stats_.AllocateBytes(page->LiveBytesFromFreeList());
+ page->set_owner(this);
+ RelinkFreeListCategories(page);
+ page->InsertAfter(anchor()->prev_page());
+}
+
void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(!heap()->deserialization_complete());
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
@@ -1366,11 +1403,17 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
size_t unused = page->ShrinkToHighWaterMark();
accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
- AccountUncommitted(unused);
+ // Do not account for the unused space as uncommitted because the counter
+ // is kept in sync with page size which is also not adjusted for those
+ // chunks.
}
}
bool PagedSpace::Expand() {
+ // Always lock against the main space as we can only adjust capacity and
+ // pages concurrently for the main paged space.
+ base::LockGuard<base::Mutex> guard(heap()->paged_space(identity())->mutex());
+
const int size = AreaSize();
if (!heap()->CanExpandOldGeneration(size)) return false;
@@ -1425,6 +1468,15 @@ void PagedSpace::MarkAllocationInfoBlack() {
}
}
+void PagedSpace::UnmarkAllocationInfo() {
+ Address current_top = top();
+ Address current_limit = limit();
+ if (current_top != nullptr && current_top != current_limit) {
+ Page::FromAllocationAreaAddress(current_top)
+ ->DestroyBlackArea(current_top, current_limit);
+ }
+}
+
// Empty space allocation info, returning unused area to free list.
void PagedSpace::EmptyAllocationInfo() {
// Mark the old linear allocation area with a free space map so it can be
@@ -1517,6 +1569,10 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// The object itself should look OK.
object->ObjectVerify();
+ if (!FLAG_verify_heap_skip_remembered_set) {
+ heap()->VerifyRememberedSetFor(object);
+ }
+
// All the interior pointers should be contained in the heap.
int size = object->Size();
object->IterateBody(map->instance_type(), size, visitor);
@@ -2888,10 +2944,20 @@ HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
}
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
- DCHECK_GE(size_in_bytes, 0);
- const int kMaxPagesToSweep = 1;
+ VMState<GC> state(heap()->isolate());
+ RuntimeCallTimerScope runtime_timer(heap()->isolate(),
+ &RuntimeCallStats::GC_SlowAllocateRaw);
+ return RawSlowAllocateRaw(size_in_bytes);
+}
+HeapObject* CompactionSpace::SlowAllocateRaw(int size_in_bytes) {
+ return RawSlowAllocateRaw(size_in_bytes);
+}
+
+HeapObject* PagedSpace::RawSlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
+ DCHECK_GE(size_in_bytes, 0);
+ const int kMaxPagesToSweep = 1;
MarkCompactCollector* collector = heap()->mark_compact_collector();
// Sweeping is still in progress.
@@ -2918,6 +2984,17 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
object = free_list_.Allocate(static_cast<size_t>(size_in_bytes));
if (object != nullptr) return object;
}
+ } else if (is_local()) {
+ // Sweeping not in progress and we are on a {CompactionSpace}. This can
+ // only happen when we are evacuating for the young generation.
+ PagedSpace* main_space = heap()->paged_space(identity());
+ Page* page = main_space->RemovePageSafe(size_in_bytes);
+ if (page != nullptr) {
+ AddPage(page);
+ HeapObject* object =
+ free_list_.Allocate(static_cast<size_t>(size_in_bytes));
+ if (object != nullptr) return object;
+ }
}
if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
@@ -3248,6 +3325,10 @@ void LargeObjectSpace::Verify() {
// The object itself should look OK.
object->ObjectVerify();
+ if (!FLAG_verify_heap_skip_remembered_set) {
+ heap()->VerifyRememberedSetFor(object);
+ }
+
// Byte arrays and strings don't have interior pointers.
if (object->IsAbstractCode()) {
VerifyPointersVisitor code_visitor;
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index ff27d09c3a..dc49f3d4a0 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -21,6 +21,7 @@
#include "src/heap/marking.h"
#include "src/list.h"
#include "src/objects.h"
+#include "src/objects/map.h"
#include "src/utils.h"
namespace v8 {
@@ -190,6 +191,7 @@ class FreeListCategory {
FreeSpace* SearchForNodeInList(size_t minimum_size, size_t* node_size);
inline FreeList* owner();
+ inline Page* page() const;
inline bool is_linked();
bool is_empty() { return top() == nullptr; }
size_t available() const { return available_; }
@@ -204,8 +206,6 @@ class FreeListCategory {
// {kVeryLongFreeList} by manually walking the list.
static const int kVeryLongFreeList = 500;
- inline Page* page();
-
FreeSpace* top() { return top_; }
void set_top(FreeSpace* top) { top_ = top; }
FreeListCategory* prev() { return prev_; }
@@ -290,6 +290,10 @@ class MemoryChunk {
// |ANCHOR|: Flag is set if page is an anchor.
ANCHOR = 1u << 17,
+
+ // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
+ // to iterate the page.
+ SWEEP_TO_ITERATE = 1u << 18,
};
typedef base::Flags<Flag, uintptr_t> Flags;
@@ -670,9 +674,9 @@ class MarkingState {
MarkingState(Bitmap* bitmap, intptr_t* live_bytes)
: bitmap_(bitmap), live_bytes_(live_bytes) {}
- void IncrementLiveBytes(intptr_t by) const {
- *live_bytes_ += static_cast<int>(by);
- }
+ template <MarkBit::AccessMode mode = MarkBit::NON_ATOMIC>
+ inline void IncrementLiveBytes(intptr_t by) const;
+
void SetLiveBytes(intptr_t value) const {
*live_bytes_ = static_cast<int>(value);
}
@@ -690,6 +694,18 @@ class MarkingState {
intptr_t* live_bytes_;
};
+template <>
+inline void MarkingState::IncrementLiveBytes<MarkBit::NON_ATOMIC>(
+ intptr_t by) const {
+ *live_bytes_ += by;
+}
+
+template <>
+inline void MarkingState::IncrementLiveBytes<MarkBit::ATOMIC>(
+ intptr_t by) const {
+ reinterpret_cast<base::AtomicNumber<intptr_t>*>(live_bytes_)->Increment(by);
+}
+
// -----------------------------------------------------------------------------
// A page is a memory chunk of a size 1MB. Large object pages may be larger.
//
@@ -808,6 +824,7 @@ class Page : public MemoryChunk {
size_t ShrinkToHighWaterMark();
V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
+ void DestroyBlackArea(Address start, Address end);
#ifdef DEBUG
void Print();
@@ -1261,8 +1278,7 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// Initializes its internal bookkeeping structures.
// Max capacity of the total space and executable memory limit.
- bool SetUp(size_t max_capacity, size_t capacity_executable,
- size_t code_range_size);
+ bool SetUp(size_t max_capacity, size_t code_range_size);
void TearDown();
@@ -1293,13 +1309,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
return capacity_ < size ? 0 : capacity_ - size;
}
- // Returns the maximum available executable bytes of heaps.
- size_t AvailableExecutable() {
- const size_t executable_size = SizeExecutable();
- if (capacity_executable_ < executable_size) return 0;
- return capacity_executable_ - executable_size;
- }
-
// Returns maximum available bytes that the old space can have.
size_t MaxAvailable() {
return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
@@ -1398,8 +1407,6 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
// Maximum space size in bytes.
size_t capacity_;
- // Maximum subset of capacity_ that can be executable
- size_t capacity_executable_;
// Allocated space size in bytes.
base::AtomicNumber<size_t> size_;
@@ -1719,6 +1726,21 @@ class V8_EXPORT_PRIVATE FreeList {
return maximum_freed;
}
+ static FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
+ if (size_in_bytes <= kTiniestListMax) {
+ return kTiniest;
+ } else if (size_in_bytes <= kTinyListMax) {
+ return kTiny;
+ } else if (size_in_bytes <= kSmallListMax) {
+ return kSmall;
+ } else if (size_in_bytes <= kMediumListMax) {
+ return kMedium;
+ } else if (size_in_bytes <= kLargeListMax) {
+ return kLarge;
+ }
+ return kHuge;
+ }
+
explicit FreeList(PagedSpace* owner);
// Adds a node on the free list. The block of size {size_in_bytes} starting
@@ -1790,6 +1812,9 @@ class V8_EXPORT_PRIVATE FreeList {
void RemoveCategory(FreeListCategory* category);
void PrintCategories(FreeListCategoryType type);
+ // Returns a page containing an entry for a given type, or nullptr otherwise.
+ inline Page* GetPageForCategoryType(FreeListCategoryType type);
+
#ifdef DEBUG
size_t SumFreeLists();
bool IsVeryLong();
@@ -1843,21 +1868,6 @@ class V8_EXPORT_PRIVATE FreeList {
FreeSpace* SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
size_t minimum_size);
- FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
- if (size_in_bytes <= kTiniestListMax) {
- return kTiniest;
- } else if (size_in_bytes <= kTinyListMax) {
- return kTiny;
- } else if (size_in_bytes <= kSmallListMax) {
- return kSmall;
- } else if (size_in_bytes <= kMediumListMax) {
- return kMedium;
- } else if (size_in_bytes <= kLargeListMax) {
- return kLarge;
- }
- return kHuge;
- }
-
// The tiny categories are not used for fast allocation.
FreeListCategoryType SelectFastAllocationFreeListCategoryType(
size_t size_in_bytes) {
@@ -1871,7 +1881,9 @@ class V8_EXPORT_PRIVATE FreeList {
return kHuge;
}
- FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
+ FreeListCategory* top(FreeListCategoryType type) const {
+ return categories_[type];
+ }
PagedSpace* owner_;
base::AtomicNumber<size_t> wasted_bytes_;
@@ -2074,6 +2086,7 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
void EmptyAllocationInfo();
void MarkAllocationInfoBlack();
+ void UnmarkAllocationInfo();
void AccountAllocatedBytes(size_t bytes) {
accounting_stats_.AllocateBytes(bytes);
@@ -2146,6 +2159,11 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
std::unique_ptr<ObjectIterator> GetObjectIterator() override;
+ // Remove a page if it has at least |size_in_bytes| bytes available that can
+ // be used for allocation.
+ Page* RemovePageSafe(int size_in_bytes);
+ void AddPage(Page* page);
+
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
@@ -2179,7 +2197,9 @@ class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent.
- MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
+ MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
+
+ MUST_USE_RESULT HeapObject* RawSlowAllocateRaw(int size_in_bytes);
size_t area_size_;
@@ -2738,6 +2758,8 @@ class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
int size_in_bytes) override;
+
+ MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes) override;
};
diff --git a/deps/v8/src/heap/workstealing-marking-deque.h b/deps/v8/src/heap/workstealing-marking-deque.h
new file mode 100644
index 0000000000..1a3dc865e4
--- /dev/null
+++ b/deps/v8/src/heap/workstealing-marking-deque.h
@@ -0,0 +1,167 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_WORKSTEALING_MARKING_DEQUE_
+#define V8_HEAP_WORKSTEALING_MARKING_DEQUE_
+
+#include <cstddef>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+class HeapObject;
+
+class StackSegment {
+ public:
+ static const int kNumEntries = 64;
+
+ StackSegment(StackSegment* next, StackSegment* prev)
+ : next_(next), prev_(prev), index_(0) {}
+
+ bool Push(HeapObject* object) {
+ if (IsFull()) return false;
+
+ objects_[index_++] = object;
+ return true;
+ }
+
+ bool Pop(HeapObject** object) {
+ if (IsEmpty()) return false;
+
+ *object = objects_[--index_];
+ return true;
+ }
+
+ size_t Size() { return index_; }
+ bool IsEmpty() { return index_ == 0; }
+ bool IsFull() { return index_ == kNumEntries; }
+ void Clear() { index_ = 0; }
+
+ StackSegment* next() { return next_; }
+ StackSegment* prev() { return prev_; }
+ void set_next(StackSegment* next) { next_ = next; }
+ void set_prev(StackSegment* prev) { prev_ = prev; }
+
+ void Unlink() {
+ if (next() != nullptr) next()->set_prev(prev());
+ if (prev() != nullptr) prev()->set_next(next());
+ }
+
+ private:
+ StackSegment* next_;
+ StackSegment* prev_;
+ size_t index_;
+ HeapObject* objects_[kNumEntries];
+};
+
+class SegmentedStack {
+ public:
+ SegmentedStack()
+ : front_(new StackSegment(nullptr, nullptr)), back_(front_) {}
+
+ ~SegmentedStack() {
+ CHECK(IsEmpty());
+ delete front_;
+ }
+
+ bool Push(HeapObject* object) {
+ if (!front_->Push(object)) {
+ NewFront();
+ bool success = front_->Push(object);
+ USE(success);
+ DCHECK(success);
+ }
+ return true;
+ }
+
+ bool Pop(HeapObject** object) {
+ if (!front_->Pop(object)) {
+ if (IsEmpty()) return false;
+ DeleteFront();
+ bool success = front_->Pop(object);
+ USE(success);
+ DCHECK(success);
+ }
+ return object;
+ }
+
+ bool IsEmpty() { return front_ == back_ && front_->IsEmpty(); }
+
+ private:
+ void NewFront() {
+ StackSegment* s = new StackSegment(front_, nullptr);
+ front_->set_prev(s);
+ front_ = s;
+ }
+
+ void DeleteFront() { delete Unlink(front_); }
+
+ StackSegment* Unlink(StackSegment* segment) {
+ CHECK_NE(front_, back_);
+ if (segment == front_) front_ = front_->next();
+ if (segment == back_) back_ = back_->prev();
+ segment->Unlink();
+ return segment;
+ }
+
+ StackSegment* front_;
+ StackSegment* back_;
+};
+
+// TODO(mlippautz): Implement actual work stealing.
+class WorkStealingMarkingDeque {
+ public:
+ static const int kMaxNumTasks = 4;
+
+ bool Push(int task_id, HeapObject* object) {
+ DCHECK_LT(task_id, kMaxNumTasks);
+ return private_stacks_[task_id].Push(object);
+ }
+
+ bool Pop(int task_id, HeapObject** object) {
+ DCHECK_LT(task_id, kMaxNumTasks);
+ return private_stacks_[task_id].Pop(object);
+ }
+
+ bool IsLocalEmpty(int task_id) { return private_stacks_[task_id].IsEmpty(); }
+
+ private:
+ SegmentedStack private_stacks_[kMaxNumTasks];
+};
+
+class LocalWorkStealingMarkingDeque {
+ public:
+ LocalWorkStealingMarkingDeque(WorkStealingMarkingDeque* deque, int task_id)
+ : deque_(deque), task_id_(task_id) {}
+
+ // Pushes an object onto the marking deque.
+ bool Push(HeapObject* object) { return deque_->Push(task_id_, object); }
+
+ // Pops an object onto the marking deque.
+ bool Pop(HeapObject** object) { return deque_->Pop(task_id_, object); }
+
+ // Returns true if the local portion of the marking deque is empty.
+ bool IsEmpty() { return deque_->IsLocalEmpty(task_id_); }
+
+ // Blocks if there are no more objects available. Returns execution with
+ // |true| once new objects are available and |false| otherwise.
+ bool WaitForMoreObjects() {
+ // Return false once the local portion of the marking deque is drained.
+ // TODO(mlippautz): Implement a barrier that can be used to synchronize
+ // work stealing and emptiness.
+ return !IsEmpty();
+ }
+
+ private:
+ WorkStealingMarkingDeque* deque_;
+ int task_id_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_WORKSTEALING_MARKING_DEQUE_
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index f9f1dc920d..d83e02522a 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -248,23 +248,23 @@ template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
+ visitor->VisitEmbeddedPointer(host(), this);
Assembler::FlushICache(isolate, pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
+ visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::CELL) {
- visitor->VisitCell(this);
+ visitor->VisitCellPointer(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(this);
+ visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
- visitor->VisitInternalReference(this);
+ visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
+ visitor->VisitCodeAgeSequence(host(), this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
- visitor->VisitDebugTarget(this);
+ visitor->VisitDebugTarget(host(), this);
} else if (IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(this);
+ visitor->VisitRuntimeEntry(host(), this);
}
}
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index 11750a395d..5ef07489e9 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -2162,6 +2162,20 @@ void Assembler::cvtsd2ss(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
+void Assembler::cvtdq2ps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x5B);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::cvttps2dq(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0xF3);
+ EMIT(0x0F);
+ EMIT(0x5B);
+ emit_sse_operand(dst, src);
+}
void Assembler::addsd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
@@ -2263,6 +2277,20 @@ void Assembler::divps(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
+void Assembler::rcpps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x53);
+ emit_sse_operand(dst, src);
+}
+
+void Assembler::rsqrtps(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0x52);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::minps(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
EMIT(0x0F);
@@ -2277,6 +2305,14 @@ void Assembler::maxps(XMMRegister dst, const Operand& src) {
emit_sse_operand(dst, src);
}
+void Assembler::cmpps(XMMRegister dst, const Operand& src, int8_t cmp) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x0F);
+ EMIT(0xC2);
+ emit_sse_operand(dst, src);
+ EMIT(cmp);
+}
+
void Assembler::sqrtsd(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
EMIT(0xF2);
@@ -2795,6 +2831,12 @@ void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1,
vinstr(op, dst, src1, src2, k66, k0F, kWIG);
}
+void Assembler::vcmpps(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ int8_t cmp) {
+ vps(0xC2, dst, src1, src2);
+ EMIT(cmp);
+}
+
void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int8_t imm8) {
XMMRegister iop = {6};
vinstr(0x71, iop, dst, Operand(src), k66, k0F, kWIG);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index e47ad086bc..cbb8ba2761 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -980,12 +980,30 @@ class Assembler : public AssemblerBase {
void mulps(XMMRegister dst, XMMRegister src) { mulps(dst, Operand(src)); }
void divps(XMMRegister dst, const Operand& src);
void divps(XMMRegister dst, XMMRegister src) { divps(dst, Operand(src)); }
+ void rcpps(XMMRegister dst, const Operand& src);
+ void rcpps(XMMRegister dst, XMMRegister src) { rcpps(dst, Operand(src)); }
+ void rsqrtps(XMMRegister dst, const Operand& src);
+ void rsqrtps(XMMRegister dst, XMMRegister src) { rsqrtps(dst, Operand(src)); }
void minps(XMMRegister dst, const Operand& src);
void minps(XMMRegister dst, XMMRegister src) { minps(dst, Operand(src)); }
void maxps(XMMRegister dst, const Operand& src);
void maxps(XMMRegister dst, XMMRegister src) { maxps(dst, Operand(src)); }
+ void cmpps(XMMRegister dst, const Operand& src, int8_t cmp);
+#define SSE_CMP_P(instr, imm8) \
+ void instr##ps(XMMRegister dst, XMMRegister src) { \
+ cmpps(dst, Operand(src), imm8); \
+ } \
+ void instr##ps(XMMRegister dst, const Operand& src) { cmpps(dst, src, imm8); }
+
+ SSE_CMP_P(cmpeq, 0x0);
+ SSE_CMP_P(cmplt, 0x1);
+ SSE_CMP_P(cmple, 0x2);
+ SSE_CMP_P(cmpneq, 0x4);
+
+#undef SSE_CMP_P
+
// SSE2 instructions
void cvttss2si(Register dst, const Operand& src);
void cvttss2si(Register dst, XMMRegister src) {
@@ -1009,6 +1027,15 @@ class Assembler : public AssemblerBase {
void cvtsd2ss(XMMRegister dst, XMMRegister src) {
cvtsd2ss(dst, Operand(src));
}
+ void cvtdq2ps(XMMRegister dst, XMMRegister src) {
+ cvtdq2ps(dst, Operand(src));
+ }
+ void cvtdq2ps(XMMRegister dst, const Operand& src);
+ void cvttps2dq(XMMRegister dst, XMMRegister src) {
+ cvttps2dq(dst, Operand(src));
+ }
+ void cvttps2dq(XMMRegister dst, const Operand& src);
+
void addsd(XMMRegister dst, XMMRegister src) { addsd(dst, Operand(src)); }
void addsd(XMMRegister dst, const Operand& src);
void subsd(XMMRegister dst, XMMRegister src) { subsd(dst, Operand(src)); }
@@ -1317,6 +1344,17 @@ class Assembler : public AssemblerBase {
}
void vss(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vrcpps(XMMRegister dst, XMMRegister src) { vrcpps(dst, Operand(src)); }
+ void vrcpps(XMMRegister dst, const Operand& src) {
+ vinstr(0x53, dst, xmm0, src, kNone, k0F, kWIG);
+ }
+ void vrsqrtps(XMMRegister dst, XMMRegister src) {
+ vrsqrtps(dst, Operand(src));
+ }
+ void vrsqrtps(XMMRegister dst, const Operand& src) {
+ vinstr(0x52, dst, xmm0, src, kNone, k0F, kWIG);
+ }
+
void vpsllw(XMMRegister dst, XMMRegister src, int8_t imm8);
void vpslld(XMMRegister dst, XMMRegister src, int8_t imm8);
void vpsrlw(XMMRegister dst, XMMRegister src, int8_t imm8);
@@ -1324,6 +1362,19 @@ class Assembler : public AssemblerBase {
void vpsraw(XMMRegister dst, XMMRegister src, int8_t imm8);
void vpsrad(XMMRegister dst, XMMRegister src, int8_t imm8);
+ void vcvtdq2ps(XMMRegister dst, XMMRegister src) {
+ vcvtdq2ps(dst, Operand(src));
+ }
+ void vcvtdq2ps(XMMRegister dst, const Operand& src) {
+ vinstr(0x5B, dst, xmm0, src, kNone, k0F, kWIG);
+ }
+ void vcvttps2dq(XMMRegister dst, XMMRegister src) {
+ vcvttps2dq(dst, Operand(src));
+ }
+ void vcvttps2dq(XMMRegister dst, const Operand& src) {
+ vinstr(0x5B, dst, xmm0, src, kF3, k0F, kWIG);
+ }
+
// BMI instruction
void andn(Register dst, Register src1, Register src2) {
andn(dst, src1, Operand(src2));
@@ -1438,6 +1489,23 @@ class Assembler : public AssemblerBase {
void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
void vpd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+ void vcmpps(XMMRegister dst, XMMRegister src1, const Operand& src2,
+ int8_t cmp);
+#define AVX_CMP_P(instr, imm8) \
+ void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
+ vcmpps(dst, src1, Operand(src2), imm8); \
+ } \
+ void instr##ps(XMMRegister dst, XMMRegister src1, const Operand& src2) { \
+ vcmpps(dst, src1, src2, imm8); \
+ }
+
+ AVX_CMP_P(vcmpeq, 0x0);
+ AVX_CMP_P(vcmplt, 0x1);
+ AVX_CMP_P(vcmple, 0x2);
+ AVX_CMP_P(vcmpneq, 0x4);
+
+#undef AVX_CMP_P
+
// Other SSE and AVX instructions
#define DECLARE_SSE2_INSTRUCTION(instruction, prefix, escape, opcode) \
void instruction(XMMRegister dst, XMMRegister src) { \
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 56ce2c5213..6550d6e016 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -462,69 +462,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ ret(0);
}
-void RegExpExecStub::Generate(MacroAssembler* masm) {
-#ifdef V8_INTERPRETED_REGEXP
- // This case is handled prior to the RegExpExecStub call.
- __ Abort(kUnexpectedRegExpExecCall);
-#else // V8_INTERPRETED_REGEXP
- // Isolates: note we add an additional parameter here (isolate pointer).
- static const int kRegExpExecuteArguments = 9;
- __ EnterApiExitFrame(kRegExpExecuteArguments);
-
- // Argument 9: Pass current isolate address.
- __ mov(Operand(esp, 8 * kPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
-
- // Argument 8: Indicate that this is a direct call from JavaScript.
- __ mov(Operand(esp, 7 * kPointerSize), Immediate(1));
-
- // Argument 7: Start (high end) of backtracking stack memory area.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate());
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate());
- __ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address));
- __ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ mov(Operand(esp, 6 * kPointerSize), esi);
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(Operand(esp, 5 * kPointerSize), Immediate(0));
-
- // Argument 5: static offsets vector buffer.
- __ mov(Operand(esp, 4 * kPointerSize),
- Immediate(ExternalReference::address_of_static_offsets_vector(
- isolate())));
-
- // Argument 4: End of string data
- // Argument 3: Start of string data
- __ mov(Operand(esp, 3 * kPointerSize),
- RegExpExecDescriptor::StringEndRegister());
- __ mov(Operand(esp, 2 * kPointerSize),
- RegExpExecDescriptor::StringStartRegister());
-
- // Argument 2: Previous index.
- __ mov(Operand(esp, 1 * kPointerSize),
- RegExpExecDescriptor::LastIndexRegister());
-
- // Argument 1: Original subject string.
- __ mov(Operand(esp, 0 * kPointerSize),
- RegExpExecDescriptor::StringRegister());
-
- // Locate the code entry and call it.
- __ add(RegExpExecDescriptor::CodeRegister(),
- Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(RegExpExecDescriptor::CodeRegister());
-
- // Drop arguments and come back to JS mode.
- __ LeaveApiExitFrame(true);
-
- // TODO(jgruber): Don't tag return value once this is supported by stubs.
- __ SmiTag(eax);
- __ ret(0 * kPointerSize);
-#endif // V8_INTERPRETED_REGEXP
-}
-
static int NegativeComparisonResult(Condition cc) {
DCHECK(cc != equal);
@@ -2843,23 +2780,16 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// call data
__ push(call_data);
- Register scratch = call_data;
- if (!call_data_undefined()) {
- // return value
- __ push(Immediate(masm->isolate()->factory()->undefined_value()));
- // return value default
- __ push(Immediate(masm->isolate()->factory()->undefined_value()));
- } else {
- // return value
- __ push(scratch);
- // return value default
- __ push(scratch);
- }
+ // return value
+ __ push(Immediate(masm->isolate()->factory()->undefined_value()));
+ // return value default
+ __ push(Immediate(masm->isolate()->factory()->undefined_value()));
// isolate
__ push(Immediate(reinterpret_cast<int>(masm->isolate())));
// holder
__ push(holder);
+ Register scratch = call_data;
__ mov(scratch, esp);
// push return address
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 25c76885c1..1f6785352a 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -94,25 +94,22 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
- if (FLAG_zap_code_space) {
- // Fail hard and early if we enter this code object again.
- byte* pointer = code->FindCodeAgeSequence();
- if (pointer != NULL) {
- pointer += kNoCodeAgeSequenceLength;
- } else {
- pointer = code->instruction_start();
- }
- CodePatcher patcher(isolate, pointer, 1);
- patcher.masm()->int3();
-
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- int osr_offset = data->OsrPcOffset()->value();
- if (osr_offset > 0) {
- CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
- 1);
- osr_patcher.masm()->int3();
- }
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(isolate, pointer, 1);
+ patcher.masm()->int3();
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(isolate, code_start_address + osr_offset, 1);
+ osr_patcher.masm()->int3();
}
// We will overwrite the code's relocation info in-place. Relocation info
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 789e7ba9fe..36acd1e05d 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -868,6 +868,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x5b:
+ AppendToBuffer("vcvttps2dq %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x5c:
AppendToBuffer("vsubss %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -988,6 +992,14 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
int mod, regop, rm, vvvv = vex_vreg();
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
+ case 0x52:
+ AppendToBuffer("vrsqrtps %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
+ case 0x53:
+ AppendToBuffer("vrcpps %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x54:
AppendToBuffer("vandps %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -1008,6 +1020,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0x5B:
+ AppendToBuffer("vcvtdq2ps %s,", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ break;
case 0x5C:
AppendToBuffer("vsubps %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
@@ -1028,6 +1044,16 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
+ case 0xC2: {
+ const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
+ "neq", "nlt", "nle", "ord"};
+ AppendToBuffer("vcmpps %s,%s,", NameOfXMMRegister(regop),
+ NameOfXMMRegister(vvvv));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(", (%s)", pseudo_op[*current]);
+ current++;
+ break;
+ }
default:
UnimplementedInstruction();
}
@@ -1537,28 +1563,17 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
get_modrm(*data, &mod, &regop, &rm);
AppendToBuffer("ucomiss %s,", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- } else if (f0byte >= 0x53 && f0byte <= 0x5F) {
+ } else if (f0byte >= 0x52 && f0byte <= 0x5F) {
const char* const pseudo_op[] = {
- "rcpps",
- "andps",
- "andnps",
- "orps",
- "xorps",
- "addps",
- "mulps",
- "cvtps2pd",
- "cvtdq2ps",
- "subps",
- "minps",
- "divps",
- "maxps",
+ "rsqrtps", "rcpps", "andps", "andnps", "orps",
+ "xorps", "addps", "mulps", "cvtps2pd", "cvtdq2ps",
+ "subps", "minps", "divps", "maxps",
};
data += 2;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("%s %s,",
- pseudo_op[f0byte - 0x53],
+ AppendToBuffer("%s %s,", pseudo_op[f0byte - 0x52],
NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
} else if (f0byte == 0x50) {
@@ -1569,6 +1584,16 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfCPURegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (f0byte == 0xC2) {
+ data += 2;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
+ "neq", "nlt", "nle", "ord"};
+ AppendToBuffer("cmpps %s, ", NameOfXMMRegister(regop));
+ data += PrintRightXMMOperand(data);
+ AppendToBuffer(", (%s)", pseudo_op[*data]);
+ data++;
} else if (f0byte== 0xC6) {
// shufps xmm, xmm/m128, imm8
data += 2;
@@ -2246,6 +2271,9 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0x59:
mnem = "mulss";
break;
+ case 0x5B:
+ mnem = "cvttps2dq";
+ break;
case 0x5C:
mnem = "subss";
break;
diff --git a/deps/v8/src/ia32/interface-descriptors-ia32.cc b/deps/v8/src/ia32/interface-descriptors-ia32.cc
index 304191bbd2..b8547d0194 100644
--- a/deps/v8/src/ia32/interface-descriptors-ia32.cc
+++ b/deps/v8/src/ia32/interface-descriptors-ia32.cc
@@ -56,11 +56,6 @@ const Register MathPowIntegerDescriptor::exponent() {
return MathPowTaggedDescriptor::exponent();
}
-const Register RegExpExecDescriptor::StringRegister() { return eax; }
-const Register RegExpExecDescriptor::LastIndexRegister() { return ecx; }
-const Register RegExpExecDescriptor::StringStartRegister() { return edx; }
-const Register RegExpExecDescriptor::StringEndRegister() { return ebx; }
-const Register RegExpExecDescriptor::CodeRegister() { return edi; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
@@ -161,9 +156,20 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
+ // eax : number of arguments
+ // ecx : start index (to support rest parameters)
+ // edi : the target to call
+ Register registers[] = {edi, eax, ecx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // eax : number of arguments
+ // edx : the new target
// ecx : start index (to support rest parameters)
// edi : the target to call
- Register registers[] = {edi, ecx};
+ Register registers[] = {edi, edx, eax, ecx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 137c34c15b..a87b2425fb 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -420,7 +420,7 @@ void MacroAssembler::RecordWriteField(
lea(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
Label ok;
- test_b(dst, Immediate((1 << kPointerSizeLog2) - 1));
+ test_b(dst, Immediate(kPointerSize - 1));
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
@@ -2480,6 +2480,7 @@ void MacroAssembler::CallCFunction(ExternalReference function,
void MacroAssembler::CallCFunction(Register function,
int num_arguments) {
+ DCHECK_LE(num_arguments, kMaxCParameters);
DCHECK(has_frame());
// Check stack alignment.
if (emit_debug_code()) {
diff --git a/deps/v8/src/ic/accessor-assembler.cc b/deps/v8/src/ic/accessor-assembler.cc
index eb3971c54f..6508169558 100644
--- a/deps/v8/src/ic/accessor-assembler.cc
+++ b/deps/v8/src/ic/accessor-assembler.cc
@@ -984,15 +984,7 @@ void AccessorAssembler::HandleStoreFieldAndReturn(Node* handler_word,
BIND(&if_out_of_object);
{
if (transition_to_field) {
- Label storage_extended(this);
- GotoIfNot(IsSetWord<StoreHandler::ExtendStorageBits>(handler_word),
- &storage_extended);
- Comment("[ Extend storage");
- ExtendPropertiesBackingStore(holder);
- Comment("] Extend storage");
- Goto(&storage_extended);
-
- BIND(&storage_extended);
+ ExtendPropertiesBackingStore(holder, handler_word);
}
StoreNamedField(handler_word, holder, false, representation, prepared_value,
@@ -1053,7 +1045,12 @@ Node* AccessorAssembler::PrepareValueForStore(Node* handler_word, Node* holder,
return value;
}
-void AccessorAssembler::ExtendPropertiesBackingStore(Node* object) {
+void AccessorAssembler::ExtendPropertiesBackingStore(Node* object,
+ Node* handler_word) {
+ Label done(this);
+ GotoIfNot(IsSetWord<StoreHandler::ExtendStorageBits>(handler_word), &done);
+ Comment("[ Extend storage");
+
ParameterMode mode = OptimalParameterMode();
Node* properties = LoadProperties(object);
@@ -1061,6 +1058,14 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object) {
? LoadAndUntagFixedArrayBaseLength(properties)
: LoadFixedArrayBaseLength(properties);
+ // Previous property deletion could have left behind unused backing store
+ // capacity even for a map that think it doesn't have any unused fields.
+ // Perform a bounds check to see if we actually have to grow the array.
+ Node* offset = DecodeWord<StoreHandler::FieldOffsetBits>(handler_word);
+ Node* size = ElementOffsetFromIndex(length, FAST_ELEMENTS, mode,
+ FixedArray::kHeaderSize);
+ GotoIf(UintPtrLessThan(offset, size), &done);
+
Node* delta = IntPtrOrSmiConstant(JSObject::kFieldsAdded, mode);
Node* new_capacity = IntPtrOrSmiAdd(length, delta, mode);
@@ -1088,6 +1093,10 @@ void AccessorAssembler::ExtendPropertiesBackingStore(Node* object) {
SKIP_WRITE_BARRIER, mode);
StoreObjectField(object, JSObject::kPropertiesOffset, new_properties);
+ Comment("] Extend storage");
+ Goto(&done);
+
+ BIND(&done);
}
void AccessorAssembler::StoreNamedField(Node* handler_word, Node* object,
@@ -1512,7 +1521,10 @@ void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
TryProbeStubCache(isolate()->load_stub_cache(), receiver, key,
&found_handler, &var_handler, &stub_cache_miss);
BIND(&found_handler);
- { HandleLoadICHandlerCase(p, var_handler.value(), slow, &direct_exit); }
+ {
+ HandleLoadICHandlerCase(p, var_handler.value(), &stub_cache_miss,
+ &direct_exit);
+ }
BIND(&stub_cache_miss);
{
@@ -1865,33 +1877,19 @@ void AccessorAssembler::LoadIC_Uninitialized(const LoadICParameters* p) {
LoadRoot(Heap::kpremonomorphic_symbolRootIndex),
SKIP_WRITE_BARRIER, 0, SMI_PARAMETERS);
- Label not_function_prototype(this);
- GotoIf(Word32NotEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE)),
- &not_function_prototype);
- GotoIfNot(WordEqual(p->name, LoadRoot(Heap::kprototype_stringRootIndex)),
- &not_function_prototype);
- Node* bit_field = LoadMapBitField(receiver_map);
- GotoIf(IsSetWord32(bit_field, 1 << Map::kHasNonInstancePrototype),
- &not_function_prototype);
- // Function.prototype load.
- {
- // TODO(jkummerow): Unify with LoadIC_FunctionPrototype builtin
- // (when we have a shared CSA base class for all builtins).
- Node* proto_or_map =
- LoadObjectField(receiver, JSFunction::kPrototypeOrInitialMapOffset);
- GotoIf(IsTheHole(proto_or_map), &miss);
-
- VARIABLE(var_result, MachineRepresentation::kTagged, proto_or_map);
- Label done(this, &var_result);
- GotoIfNot(IsMap(proto_or_map), &done);
-
- var_result.Bind(LoadMapPrototype(proto_or_map));
- Goto(&done);
-
- BIND(&done);
- Return(var_result.value());
+ {
+ // Special case for Function.prototype load, because it's very common
+ // for ICs that are only executed once (MyFunc.prototype.foo = ...).
+ Label not_function_prototype(this);
+ GotoIf(Word32NotEqual(instance_type, Int32Constant(JS_FUNCTION_TYPE)),
+ &not_function_prototype);
+ GotoIfNot(IsPrototypeString(p->name), &not_function_prototype);
+ Node* bit_field = LoadMapBitField(receiver_map);
+ GotoIf(IsSetWord32(bit_field, 1 << Map::kHasNonInstancePrototype),
+ &not_function_prototype);
+ Return(LoadJSFunctionPrototype(receiver, &miss));
+ BIND(&not_function_prototype);
}
- BIND(&not_function_prototype);
GenericPropertyLoad(receiver, receiver_map, instance_type, p->name, p, &miss,
kDontUseStubCache);
@@ -2090,15 +2088,15 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
VARIABLE(var_index, MachineType::PointerRepresentation());
VARIABLE(var_unique, MachineRepresentation::kTagged);
var_unique.Bind(p->name); // Dummy initialization.
- Label if_index(this), if_unique_name(this), slow(this);
+ Label if_index(this), if_unique_name(this), if_notunique(this), slow(this);
Node* receiver = p->receiver;
GotoIf(TaggedIsSmi(receiver), &slow);
Node* receiver_map = LoadMap(receiver);
Node* instance_type = LoadMapInstanceType(receiver_map);
- TryToName(p->name, &if_index, &var_index, &if_unique_name, &var_unique,
- &slow);
+ TryToName(p->name, &if_index, &var_index, &if_unique_name, &var_unique, &slow,
+ &if_notunique);
BIND(&if_index);
{
@@ -2112,6 +2110,22 @@ void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
var_unique.value(), p, &slow);
}
+ BIND(&if_notunique);
+ {
+ if (FLAG_internalize_on_the_fly) {
+ Label not_in_string_table(this);
+ TryInternalizeString(p->name, &if_index, &var_index, &if_unique_name,
+ &var_unique, &not_in_string_table, &slow);
+
+ BIND(&not_in_string_table);
+ // If the string was not found in the string table, then no object can
+ // have a property with that name.
+ Return(UndefinedConstant());
+ } else {
+ Goto(&slow);
+ }
+ }
+
BIND(&slow);
{
Comment("KeyedLoadGeneric_slow");
diff --git a/deps/v8/src/ic/accessor-assembler.h b/deps/v8/src/ic/accessor-assembler.h
index f2cafdb128..5644fa8ae8 100644
--- a/deps/v8/src/ic/accessor-assembler.h
+++ b/deps/v8/src/ic/accessor-assembler.h
@@ -193,7 +193,7 @@ class AccessorAssembler : public CodeStubAssembler {
Node* value, Label* bailout);
// Extends properties backing store by JSObject::kFieldsAdded elements.
- void ExtendPropertiesBackingStore(Node* object);
+ void ExtendPropertiesBackingStore(Node* object, Node* handler_word);
void StoreNamedField(Node* handler_word, Node* object, bool is_inobject,
Representation representation, Node* value,
diff --git a/deps/v8/src/ic/arm/handler-compiler-arm.cc b/deps/v8/src/ic/arm/handler-compiler-arm.cc
index dd99a21219..317a95146f 100644
--- a/deps/v8/src/ic/arm/handler-compiler-arm.cc
+++ b/deps/v8/src/ic/arm/handler-compiler-arm.cc
@@ -221,10 +221,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Isolate* isolate = masm->isolate();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- bool call_data_undefined = false;
// Put call data in place.
if (api_call_info->data()->IsUndefined(isolate)) {
- call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
if (optimization.is_constant_call()) {
@@ -249,8 +247,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ mov(api_function_address, Operand(ref));
// Jump to stub.
- CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
- !optimization.is_constant_call());
+ CallApiCallbackStub stub(isolate, is_store, !optimization.is_constant_call());
__ TailCallStub(&stub);
}
diff --git a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
index 68bd393cab..db6dc639a1 100644
--- a/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
+++ b/deps/v8/src/ic/arm64/handler-compiler-arm64.cc
@@ -156,10 +156,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Isolate* isolate = masm->isolate();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- bool call_data_undefined = false;
// Put call data in place.
if (api_call_info->data()->IsUndefined(isolate)) {
- call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
if (optimization.is_constant_call()) {
@@ -184,8 +182,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ Mov(api_function_address, ref);
// Jump to stub.
- CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
- !optimization.is_constant_call());
+ CallApiCallbackStub stub(isolate, is_store, !optimization.is_constant_call());
__ TailCallStub(&stub);
}
diff --git a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
index 324dc10d03..2cfa49b15b 100644
--- a/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
+++ b/deps/v8/src/ic/ia32/handler-compiler-ia32.cc
@@ -161,10 +161,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Isolate* isolate = masm->isolate();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- bool call_data_undefined = false;
// Put call data in place.
if (api_call_info->data()->IsUndefined(isolate)) {
- call_data_undefined = true;
__ mov(data, Immediate(isolate->factory()->undefined_value()));
} else {
if (optimization.is_constant_call()) {
@@ -182,8 +180,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ mov(api_function_address, Immediate(function_address));
// Jump to stub.
- CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
- !optimization.is_constant_call());
+ CallApiCallbackStub stub(isolate, is_store, !optimization.is_constant_call());
__ TailCallStub(&stub);
}
diff --git a/deps/v8/src/ic/ic.cc b/deps/v8/src/ic/ic.cc
index ca3f70df2a..5dca55ed3e 100644
--- a/deps/v8/src/ic/ic.cc
+++ b/deps/v8/src/ic/ic.cc
@@ -549,7 +549,7 @@ void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
OnFeedbackChanged(isolate(), GetHostFunction());
}
-void IC::ConfigureVectorState(Handle<Name> name, MapHandleList* maps,
+void IC::ConfigureVectorState(Handle<Name> name, MapHandles const& maps,
List<Handle<Object>>* handlers) {
DCHECK(!IsLoadGlobalIC());
// Non-keyed ICs don't track the name explicitly.
@@ -636,16 +636,15 @@ MaybeHandle<Object> LoadGlobalIC::Load(Handle<Name> name) {
return LoadIC::Load(global, name);
}
-static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
+static bool AddOneReceiverMapIfMissing(MapHandles* receiver_maps,
Handle<Map> new_receiver_map) {
DCHECK(!new_receiver_map.is_null());
- for (int current = 0; current < receiver_maps->length(); ++current) {
- if (!receiver_maps->at(current).is_null() &&
- receiver_maps->at(current).is_identical_to(new_receiver_map)) {
+ for (Handle<Map> map : *receiver_maps) {
+ if (!map.is_null() && map.is_identical_to(new_receiver_map)) {
return false;
}
}
- receiver_maps->Add(new_receiver_map);
+ receiver_maps->push_back(new_receiver_map);
return true;
}
@@ -653,11 +652,11 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Object> handler) {
DCHECK(IsHandler(*handler));
if (is_keyed() && state() != RECOMPUTE_HANDLER) return false;
Handle<Map> map = receiver_map();
- MapHandleList maps;
+ MapHandles maps;
List<Handle<Object>> handlers;
TargetMaps(&maps);
- int number_of_maps = maps.length();
+ int number_of_maps = static_cast<int>(maps.size());
int deprecated_maps = 0;
int handler_to_overwrite = -1;
@@ -684,7 +683,9 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Object> handler) {
if (number_of_maps == 0 && state() != MONOMORPHIC && state() != POLYMORPHIC) {
return false;
}
- if (!nexus()->FindHandlers(&handlers, maps.length())) return false;
+ if (!nexus()->FindHandlers(&handlers, static_cast<int>(maps.size()))) {
+ return false;
+ }
number_of_valid_maps++;
if (number_of_valid_maps > 1 && is_keyed()) return false;
@@ -694,14 +695,14 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Object> handler) {
if (handler_to_overwrite >= 0) {
handlers.Set(handler_to_overwrite, handler);
if (!map.is_identical_to(maps.at(handler_to_overwrite))) {
- maps.Set(handler_to_overwrite, map);
+ maps[handler_to_overwrite] = map;
}
} else {
- maps.Add(map);
+ maps.push_back(map);
handlers.Add(handler);
}
- ConfigureVectorState(name, &maps, &handlers);
+ ConfigureVectorState(name, maps, &handlers);
}
return true;
@@ -714,11 +715,11 @@ void IC::UpdateMonomorphicIC(Handle<Object> handler, Handle<Name> name) {
void IC::CopyICToMegamorphicCache(Handle<Name> name) {
- MapHandleList maps;
+ MapHandles maps;
List<Handle<Object>> handlers;
TargetMaps(&maps);
- if (!nexus()->FindHandlers(&handlers, maps.length())) return;
- for (int i = 0; i < maps.length(); i++) {
+ if (!nexus()->FindHandlers(&handlers, static_cast<int>(maps.size()))) return;
+ for (int i = 0; i < static_cast<int>(maps.size()); i++) {
UpdateMegamorphicCache(*maps.at(i), *name, *handlers.at(i));
}
}
@@ -732,9 +733,9 @@ bool IC::IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map) {
source_map->elements_kind(), target_elements_kind);
Map* transitioned_map = nullptr;
if (more_general_transition) {
- MapHandleList map_list;
- map_list.Add(handle(target_map));
- transitioned_map = source_map->FindElementsKindTransitionedMap(&map_list);
+ MapHandles map_list;
+ map_list.push_back(handle(target_map));
+ transitioned_map = source_map->FindElementsKindTransitionedMap(map_list);
}
return transitioned_map == target_map;
}
@@ -1334,16 +1335,15 @@ void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver) {
Handle<Map> receiver_map(receiver->map(), isolate());
DCHECK(receiver_map->instance_type() != JS_VALUE_TYPE &&
receiver_map->instance_type() != JS_PROXY_TYPE); // Checked by caller.
- MapHandleList target_receiver_maps;
+ MapHandles target_receiver_maps;
TargetMaps(&target_receiver_maps);
- if (target_receiver_maps.length() == 0) {
+ if (target_receiver_maps.empty()) {
Handle<Object> handler = LoadElementHandler(receiver_map);
return ConfigureVectorState(Handle<Name>(), receiver_map, handler);
}
- for (int i = 0; i < target_receiver_maps.length(); i++) {
- Handle<Map> map = target_receiver_maps.at(i);
+ for (Handle<Map> map : target_receiver_maps) {
if (map.is_null()) continue;
if (map->instance_type() == JS_VALUE_TYPE) {
TRACE_GENERIC_IC("JSValue");
@@ -1383,14 +1383,20 @@ void KeyedLoadIC::UpdateLoadElement(Handle<HeapObject> receiver) {
// If the maximum number of receiver maps has been exceeded, use the generic
// version of the IC.
- if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
+ if (target_receiver_maps.size() > kMaxKeyedPolymorphism) {
TRACE_GENERIC_IC("max polymorph exceeded");
return;
}
- List<Handle<Object>> handlers(target_receiver_maps.length());
+ List<Handle<Object>> handlers(static_cast<int>(target_receiver_maps.size()));
LoadElementPolymorphicHandlers(&target_receiver_maps, &handlers);
- ConfigureVectorState(Handle<Name>(), &target_receiver_maps, &handlers);
+ DCHECK_LE(1, target_receiver_maps.size());
+ if (target_receiver_maps.size() == 1) {
+ ConfigureVectorState(Handle<Name>(), target_receiver_maps[0],
+ handlers.at(0));
+ } else {
+ ConfigureVectorState(Handle<Name>(), target_receiver_maps, &handlers);
+ }
}
Handle<Object> KeyedLoadIC::LoadElementHandler(Handle<Map> receiver_map) {
@@ -1434,15 +1440,20 @@ Handle<Object> KeyedLoadIC::LoadElementHandler(Handle<Map> receiver_map) {
}
void KeyedLoadIC::LoadElementPolymorphicHandlers(
- MapHandleList* receiver_maps, List<Handle<Object>>* handlers) {
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Handle<Map> receiver_map(receiver_maps->at(i));
-
+ MapHandles* receiver_maps, List<Handle<Object>>* handlers) {
+ // Filter out deprecated maps to ensure their instances get migrated.
+ receiver_maps->erase(
+ std::remove_if(
+ receiver_maps->begin(), receiver_maps->end(),
+ [](const Handle<Map>& map) { return map->is_deprecated(); }),
+ receiver_maps->end());
+
+ for (Handle<Map> receiver_map : *receiver_maps) {
// Mark all stable receiver maps that have elements kind transition map
// among receiver_maps as unstable because the optimizing compilers may
// generate an elements kind transition for this kind of receivers.
if (receiver_map->is_stable()) {
- Map* tmap = receiver_map->FindElementsKindTransitionedMap(receiver_maps);
+ Map* tmap = receiver_map->FindElementsKindTransitionedMap(*receiver_maps);
if (tmap != nullptr) {
receiver_map->NotifyLeafMapLayoutChange();
}
@@ -1925,9 +1936,9 @@ Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup) {
void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
KeyedAccessStoreMode store_mode) {
- MapHandleList target_receiver_maps;
+ MapHandles target_receiver_maps;
TargetMaps(&target_receiver_maps);
- if (target_receiver_maps.length() == 0) {
+ if (target_receiver_maps.empty()) {
Handle<Map> monomorphic_map =
ComputeTransitionedMap(receiver_map, store_mode);
store_mode = GetNonTransitioningStoreMode(store_mode);
@@ -1935,9 +1946,8 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
return ConfigureVectorState(Handle<Name>(), monomorphic_map, handler);
}
- for (int i = 0; i < target_receiver_maps.length(); i++) {
- if (!target_receiver_maps.at(i).is_null() &&
- target_receiver_maps.at(i)->instance_type() == JS_VALUE_TYPE) {
+ for (Handle<Map> map : target_receiver_maps) {
+ if (!map.is_null() && map->instance_type() == JS_VALUE_TYPE) {
TRACE_GENERIC_IC("JSValue");
return;
}
@@ -2002,7 +2012,7 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
// If the maximum number of receiver maps has been exceeded, use the
// megamorphic version of the IC.
- if (target_receiver_maps.length() > kMaxKeyedPolymorphism) return;
+ if (target_receiver_maps.size() > kMaxKeyedPolymorphism) return;
// Make sure all polymorphic handlers have the same store mode, otherwise the
// megamorphic stub must be used.
@@ -2020,22 +2030,28 @@ void KeyedStoreIC::UpdateStoreElement(Handle<Map> receiver_map,
// receivers are either external arrays, or all "normal" arrays. Otherwise,
// use the megamorphic stub.
if (store_mode != STANDARD_STORE) {
- int external_arrays = 0;
- for (int i = 0; i < target_receiver_maps.length(); ++i) {
- if (target_receiver_maps[i]->has_fixed_typed_array_elements()) {
+ size_t external_arrays = 0;
+ for (Handle<Map> map : target_receiver_maps) {
+ if (map->has_fixed_typed_array_elements()) {
external_arrays++;
}
}
if (external_arrays != 0 &&
- external_arrays != target_receiver_maps.length()) {
+ external_arrays != target_receiver_maps.size()) {
TRACE_GENERIC_IC("unsupported combination of external and normal arrays");
return;
}
}
- List<Handle<Object>> handlers(target_receiver_maps.length());
+ List<Handle<Object>> handlers(static_cast<int>(target_receiver_maps.size()));
StoreElementPolymorphicHandlers(&target_receiver_maps, &handlers, store_mode);
- ConfigureVectorState(Handle<Name>(), &target_receiver_maps, &handlers);
+ DCHECK_LE(1, target_receiver_maps.size());
+ if (target_receiver_maps.size() == 1) {
+ ConfigureVectorState(Handle<Name>(), target_receiver_maps[0],
+ handlers.at(0));
+ } else {
+ ConfigureVectorState(Handle<Name>(), target_receiver_maps, &handlers);
+ }
}
@@ -2100,15 +2116,21 @@ Handle<Object> KeyedStoreIC::StoreElementHandler(
}
void KeyedStoreIC::StoreElementPolymorphicHandlers(
- MapHandleList* receiver_maps, List<Handle<Object>>* handlers,
+ MapHandles* receiver_maps, List<Handle<Object>>* handlers,
KeyedAccessStoreMode store_mode) {
DCHECK(store_mode == STANDARD_STORE ||
store_mode == STORE_AND_GROW_NO_TRANSITION ||
store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
store_mode == STORE_NO_TRANSITION_HANDLE_COW);
- for (int i = 0; i < receiver_maps->length(); ++i) {
- Handle<Map> receiver_map(receiver_maps->at(i));
+ // Filter out deprecated maps to ensure their instances get migrated.
+ receiver_maps->erase(
+ std::remove_if(
+ receiver_maps->begin(), receiver_maps->end(),
+ [](const Handle<Map>& map) { return map->is_deprecated(); }),
+ receiver_maps->end());
+
+ for (Handle<Map> receiver_map : *receiver_maps) {
Handle<Object> handler;
Handle<Map> transitioned_map;
@@ -2122,7 +2144,7 @@ void KeyedStoreIC::StoreElementPolymorphicHandlers(
} else {
{
Map* tmap =
- receiver_map->FindElementsKindTransitionedMap(receiver_maps);
+ receiver_map->FindElementsKindTransitionedMap(*receiver_maps);
if (tmap != nullptr) {
if (receiver_map->is_stable()) {
receiver_map->NotifyLeafMapLayoutChange();
diff --git a/deps/v8/src/ic/ic.h b/deps/v8/src/ic/ic.h
index 4649bc0b0e..9ea8905757 100644
--- a/deps/v8/src/ic/ic.h
+++ b/deps/v8/src/ic/ic.h
@@ -10,6 +10,7 @@
#include "src/ic/ic-state.h"
#include "src/macro-assembler.h"
#include "src/messages.h"
+#include "src/objects/map.h"
namespace v8 {
namespace internal {
@@ -96,7 +97,7 @@ class IC {
void ConfigureVectorState(Handle<Name> name, Handle<Map> map,
Handle<Object> handler);
// Configure the vector for POLYMORPHIC.
- void ConfigureVectorState(Handle<Name> name, MapHandleList* maps,
+ void ConfigureVectorState(Handle<Name> name, MapHandles const& maps,
List<Handle<Object>>* handlers);
char TransitionMarkFromState(IC::State state);
@@ -164,16 +165,16 @@ class IC {
}
}
- void TargetMaps(MapHandleList* list) {
+ void TargetMaps(MapHandles* list) {
FindTargetMaps();
- for (int i = 0; i < target_maps_.length(); i++) {
- list->Add(target_maps_.at(i));
+ for (Handle<Map> map : target_maps_) {
+ list->push_back(map);
}
}
Map* FirstTargetMap() {
FindTargetMaps();
- return target_maps_.length() > 0 ? *target_maps_.at(0) : NULL;
+ return !target_maps_.empty() ? *target_maps_[0] : NULL;
}
Handle<FeedbackVector> vector() const { return nexus()->vector_handle(); }
@@ -223,7 +224,7 @@ class IC {
MaybeHandle<Object> maybe_handler_;
ExtraICState extra_ic_state_;
- MapHandleList target_maps_;
+ MapHandles target_maps_;
bool target_maps_set_;
const char* slow_stub_reason_;
@@ -328,7 +329,7 @@ class KeyedLoadIC : public LoadIC {
Handle<Object> LoadElementHandler(Handle<Map> receiver_map);
- void LoadElementPolymorphicHandlers(MapHandleList* receiver_maps,
+ void LoadElementPolymorphicHandlers(MapHandles* receiver_maps,
List<Handle<Object>>* handlers);
};
@@ -414,7 +415,7 @@ class KeyedStoreIC : public StoreIC {
Handle<Object> StoreElementHandler(Handle<Map> receiver_map,
KeyedAccessStoreMode store_mode);
- void StoreElementPolymorphicHandlers(MapHandleList* receiver_maps,
+ void StoreElementPolymorphicHandlers(MapHandles* receiver_maps,
List<Handle<Object>>* handlers,
KeyedAccessStoreMode store_mode);
diff --git a/deps/v8/src/ic/keyed-store-generic.cc b/deps/v8/src/ic/keyed-store-generic.cc
index 29d666d620..19c7e47caa 100644
--- a/deps/v8/src/ic/keyed-store-generic.cc
+++ b/deps/v8/src/ic/keyed-store-generic.cc
@@ -697,10 +697,9 @@ void KeyedStoreGenericAssembler::OverwriteExistingFastProperty(
BIND(&inobject);
{
- Node* field_offset =
- IntPtrMul(IntPtrSub(LoadMapInstanceSize(object_map),
- IntPtrSub(inobject_properties, field_index)),
- IntPtrConstant(kPointerSize));
+ Node* field_offset = TimesPointerSize(IntPtrAdd(
+ IntPtrSub(LoadMapInstanceSize(object_map), inobject_properties),
+ field_index));
Label tagged_rep(this), double_rep(this);
Branch(Word32Equal(representation, Int32Constant(Representation::kDouble)),
&double_rep, &tagged_rep);
@@ -789,6 +788,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&data_property);
{
+ CheckForAssociatedProtector(p->name, slow);
OverwriteExistingFastProperty(receiver, receiver_map, properties,
descriptors, name_index, details,
p->value, slow);
@@ -822,6 +822,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&overwrite);
{
+ CheckForAssociatedProtector(p->name, slow);
StoreValueByKeyIndex<NameDictionary>(properties, var_name_index.value(),
p->value);
Return(p->value);
@@ -830,6 +831,7 @@ void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
BIND(&not_found);
{
+ CheckForAssociatedProtector(p->name, slow);
Label extensible(this);
GotoIf(IsPrivateSymbol(p->name), &extensible);
Node* bitfield2 = LoadMapBitField2(receiver_map);
diff --git a/deps/v8/src/ic/mips/OWNERS b/deps/v8/src/ic/mips/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/src/ic/mips/OWNERS
+++ b/deps/v8/src/ic/mips/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/src/ic/mips/handler-compiler-mips.cc b/deps/v8/src/ic/mips/handler-compiler-mips.cc
index 6e581a5bf2..9f0174f44d 100644
--- a/deps/v8/src/ic/mips/handler-compiler-mips.cc
+++ b/deps/v8/src/ic/mips/handler-compiler-mips.cc
@@ -211,10 +211,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Isolate* isolate = masm->isolate();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- bool call_data_undefined = false;
// Put call data in place.
if (api_call_info->data()->IsUndefined(isolate)) {
- call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
if (optimization.is_constant_call()) {
@@ -238,8 +236,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ li(api_function_address, Operand(ref));
// Jump to stub.
- CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
- !optimization.is_constant_call());
+ CallApiCallbackStub stub(isolate, is_store, !optimization.is_constant_call());
__ TailCallStub(&stub);
}
diff --git a/deps/v8/src/ic/mips64/OWNERS b/deps/v8/src/ic/mips64/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/src/ic/mips64/OWNERS
+++ b/deps/v8/src/ic/mips64/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
index 99ca45a136..99638f5493 100644
--- a/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
+++ b/deps/v8/src/ic/mips64/handler-compiler-mips64.cc
@@ -50,7 +50,7 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
// Call the JavaScript setter with receiver and value on the stack.
if (map->IsJSGlobalObjectMap()) {
// Swap in the global receiver.
- __ ld(scratch,
+ __ Ld(scratch,
FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
receiver = scratch;
}
@@ -115,27 +115,26 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
// Bail out if the receiver has a named interceptor or requires access checks.
Register map = scratch1;
- __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ Ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
__ Branch(miss_label, ne, scratch0, Operand(zero_reg));
// Check that receiver is a JSObject.
- __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(miss_label, lt, scratch0, Operand(FIRST_JS_RECEIVER_TYPE));
// Load properties array.
Register properties = scratch0;
- __ ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
// Check that the properties array is a dictionary.
- __ ld(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+ __ Ld(map, FieldMemOperand(properties, HeapObject::kMapOffset));
Register tmp = properties;
__ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
__ Branch(miss_label, ne, map, Operand(tmp));
// Restore the temporarily used register.
- __ ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
+ __ Ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
NameDictionaryLookupStub::GenerateNegativeLookup(
masm, miss_label, &done, receiver, properties, name, scratch1);
@@ -155,7 +154,7 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell(
DCHECK(cell->value()->IsTheHole(isolate));
Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(cell);
__ LoadWeakValue(scratch, weak_cell, miss);
- __ ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
+ __ Ld(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Branch(miss, ne, scratch, Operand(at));
}
@@ -197,11 +196,11 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ Move(holder, receiver);
break;
case CallOptimization::kHolderFound:
- __ ld(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ld(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
+ __ Ld(holder, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ Ld(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
for (int i = 1; i < holder_depth; i++) {
- __ ld(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
- __ ld(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
+ __ Ld(holder, FieldMemOperand(holder, HeapObject::kMapOffset));
+ __ Ld(holder, FieldMemOperand(holder, Map::kPrototypeOffset));
}
break;
case CallOptimization::kHolderNotFound:
@@ -211,23 +210,21 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Isolate* isolate = masm->isolate();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- bool call_data_undefined = false;
// Put call data in place.
if (api_call_info->data()->IsUndefined(isolate)) {
- call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
if (optimization.is_constant_call()) {
- __ ld(data,
+ __ Ld(data,
FieldMemOperand(callee, JSFunction::kSharedFunctionInfoOffset));
- __ ld(data,
+ __ Ld(data,
FieldMemOperand(data, SharedFunctionInfo::kFunctionDataOffset));
- __ ld(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
+ __ Ld(data, FieldMemOperand(data, FunctionTemplateInfo::kCallCodeOffset));
} else {
- __ ld(data,
+ __ Ld(data,
FieldMemOperand(callee, FunctionTemplateInfo::kCallCodeOffset));
}
- __ ld(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
+ __ Ld(data, FieldMemOperand(data, CallHandlerInfo::kDataOffset));
}
// Put api_function_address in place.
@@ -238,8 +235,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ li(api_function_address, Operand(ref));
// Jump to stub.
- CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
- !optimization.is_constant_call());
+ CallApiCallbackStub stub(isolate, is_store, !optimization.is_constant_call());
__ TailCallStub(&stub);
}
@@ -260,7 +256,7 @@ void PropertyHandlerCompiler::GenerateAccessCheck(
Label* miss, bool compare_native_contexts_only) {
Label done;
// Load current native context.
- __ ld(scratch1, NativeContextMemOperand());
+ __ Ld(scratch1, NativeContextMemOperand());
// Load expected native context.
__ LoadWeakValue(scratch2, native_context_cell, miss);
@@ -268,8 +264,8 @@ void PropertyHandlerCompiler::GenerateAccessCheck(
__ Branch(&done, eq, scratch1, Operand(scratch2));
// Compare security tokens of current and expected native contexts.
- __ ld(scratch1, ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
- __ ld(scratch2, ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
+ __ Ld(scratch1, ContextMemOperand(scratch1, Context::SECURITY_TOKEN_INDEX));
+ __ Ld(scratch2, ContextMemOperand(scratch2, Context::SECURITY_TOKEN_INDEX));
}
__ Branch(miss, ne, scratch1, Operand(scratch2));
@@ -291,7 +287,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
if (!validity_cell.is_null()) {
DCHECK_EQ(Smi::FromInt(Map::kPrototypeChainValid), validity_cell->value());
__ li(scratch1, Operand(validity_cell));
- __ ld(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
+ __ Ld(scratch1, FieldMemOperand(scratch1, Cell::kValueOffset));
__ Branch(miss, ne, scratch1,
Operand(Smi::FromInt(Map::kPrototypeChainValid)));
}
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
index 5736c12ffc..877e3996e0 100644
--- a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
+++ b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
@@ -216,10 +216,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Isolate* isolate = masm->isolate();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- bool call_data_undefined = false;
// Put call data in place.
if (api_call_info->data()->IsUndefined(isolate)) {
- call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
if (optimization.is_constant_call()) {
@@ -244,8 +242,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ mov(api_function_address, Operand(ref));
// Jump to stub.
- CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
- !optimization.is_constant_call());
+ CallApiCallbackStub stub(isolate, is_store, !optimization.is_constant_call());
__ TailCallStub(&stub);
}
diff --git a/deps/v8/src/ic/s390/handler-compiler-s390.cc b/deps/v8/src/ic/s390/handler-compiler-s390.cc
index bfca871bab..718b24d608 100644
--- a/deps/v8/src/ic/s390/handler-compiler-s390.cc
+++ b/deps/v8/src/ic/s390/handler-compiler-s390.cc
@@ -208,10 +208,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Isolate* isolate = masm->isolate();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- bool call_data_undefined = false;
// Put call data in place.
if (api_call_info->data()->IsUndefined(isolate)) {
- call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
if (optimization.is_constant_call()) {
@@ -236,8 +234,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
__ mov(api_function_address, Operand(ref));
// Jump to stub.
- CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
- !optimization.is_constant_call());
+ CallApiCallbackStub stub(isolate, is_store, !optimization.is_constant_call());
__ TailCallStub(&stub);
}
diff --git a/deps/v8/src/ic/stub-cache.cc b/deps/v8/src/ic/stub-cache.cc
index d62aceec96..6396c57061 100644
--- a/deps/v8/src/ic/stub-cache.cc
+++ b/deps/v8/src/ic/stub-cache.cc
@@ -111,43 +111,5 @@ void StubCache::Clear() {
}
}
-
-void StubCache::CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
- Handle<Context> native_context,
- Zone* zone) {
- for (int i = 0; i < kPrimaryTableSize; i++) {
- if (primary_[i].key == *name) {
- Map* map = primary_[i].map;
- // Map can be nullptr, if the stub is constant function call
- // with a primitive receiver.
- if (map == nullptr) continue;
-
- int offset = PrimaryOffset(*name, map);
- if (entry(primary_, offset) == &primary_[i] &&
- TypeFeedbackOracle::IsRelevantFeedback(map, *native_context)) {
- types->AddMapIfMissing(Handle<Map>(map), zone);
- }
- }
- }
-
- for (int i = 0; i < kSecondaryTableSize; i++) {
- if (secondary_[i].key == *name) {
- Map* map = secondary_[i].map;
- // Map can be nullptr, if the stub is constant function call
- // with a primitive receiver.
- if (map == nullptr) continue;
-
- // Lookup in primary table and skip duplicates.
- int primary_offset = PrimaryOffset(*name, map);
-
- // Lookup in secondary table and add matches.
- int offset = SecondaryOffset(*name, primary_offset);
- if (entry(secondary_, offset) == &secondary_[i] &&
- TypeFeedbackOracle::IsRelevantFeedback(map, *native_context)) {
- types->AddMapIfMissing(Handle<Map>(map), zone);
- }
- }
- }
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/ic/stub-cache.h b/deps/v8/src/ic/stub-cache.h
index 4054b329d3..ffb0a398ad 100644
--- a/deps/v8/src/ic/stub-cache.h
+++ b/deps/v8/src/ic/stub-cache.h
@@ -45,9 +45,6 @@ class StubCache {
Object* Get(Name* name, Map* map);
// Clear the lookup table (@ mark compact collection).
void Clear();
- // Collect all maps that match the name.
- void CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
- Handle<Context> native_context, Zone* zone);
enum Table { kPrimary, kSecondary };
diff --git a/deps/v8/src/ic/x64/handler-compiler-x64.cc b/deps/v8/src/ic/x64/handler-compiler-x64.cc
index cd831c8b5f..eeddd55a7b 100644
--- a/deps/v8/src/ic/x64/handler-compiler-x64.cc
+++ b/deps/v8/src/ic/x64/handler-compiler-x64.cc
@@ -139,10 +139,8 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
Isolate* isolate = masm->isolate();
Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
- bool call_data_undefined = false;
// Put call data in place.
if (api_call_info->data()->IsUndefined(isolate)) {
- call_data_undefined = true;
__ LoadRoot(data, Heap::kUndefinedValueRootIndex);
} else {
if (optimization.is_constant_call()) {
@@ -164,8 +162,7 @@ void PropertyHandlerCompiler::GenerateApiAccessorCall(
RelocInfo::EXTERNAL_REFERENCE);
// Jump to stub.
- CallApiCallbackStub stub(isolate, is_store, call_data_undefined,
- !optimization.is_constant_call());
+ CallApiCallbackStub stub(isolate, is_store, !optimization.is_constant_call());
__ TailCallStub(&stub);
}
diff --git a/deps/v8/src/icu_util.cc b/deps/v8/src/icu_util.cc
index 4b511d96f3..371d09046d 100644
--- a/deps/v8/src/icu_util.cc
+++ b/deps/v8/src/icu_util.cc
@@ -8,7 +8,7 @@
#include <windows.h>
#endif
-#if defined(V8_I18N_SUPPORT)
+#if defined(V8_INTL_SUPPORT)
#include <stdio.h>
#include <stdlib.h>
@@ -30,7 +30,7 @@ namespace v8 {
namespace internal {
-#if defined(V8_I18N_SUPPORT) && (ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE)
+#if defined(V8_INTL_SUPPORT) && (ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE)
namespace {
char* g_icu_data_ptr = NULL;
@@ -43,7 +43,7 @@ void free_icu_data_ptr() {
bool InitializeICUDefaultLocation(const char* exec_path,
const char* icu_data_file) {
-#if !defined(V8_I18N_SUPPORT)
+#if !defined(V8_INTL_SUPPORT)
return true;
#else
#if ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE
@@ -68,7 +68,7 @@ bool InitializeICUDefaultLocation(const char* exec_path,
}
bool InitializeICU(const char* icu_data_file) {
-#if !defined(V8_I18N_SUPPORT)
+#if !defined(V8_INTL_SUPPORT)
return true;
#else
#if ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_SHARED
diff --git a/deps/v8/src/identity-map.cc b/deps/v8/src/identity-map.cc
index 14d4a850e1..5633347292 100644
--- a/deps/v8/src/identity-map.cc
+++ b/deps/v8/src/identity-map.cc
@@ -66,6 +66,8 @@ int IdentityMapBase::InsertKey(Object* address) {
for (int index = start; --limit > 0; index = (index + 1) & mask_) {
if (keys_[index] == address) return index; // Found.
if (keys_[index] == not_mapped) { // Free entry.
+ size_++;
+ DCHECK_LE(size_, capacity_);
keys_[index] = address;
return index;
}
@@ -133,8 +135,6 @@ int IdentityMapBase::LookupOrInsert(Object* key) {
// Miss; rehash if there was a GC, then insert.
if (gc_counter_ != heap_->gc_count()) Rehash();
index = InsertKey(key);
- size_++;
- DCHECK_LE(size_, capacity_);
}
DCHECK_GE(index, 0);
return index;
@@ -237,6 +237,7 @@ void IdentityMapBase::Rehash() {
keys_[i] = not_mapped;
values_[i] = nullptr;
last_empty = i;
+ size_--;
}
}
}
@@ -259,6 +260,7 @@ void IdentityMapBase::Resize(int new_capacity) {
capacity_ = new_capacity;
mask_ = capacity_ - 1;
gc_counter_ = heap_->gc_count();
+ size_ = 0;
keys_ = reinterpret_cast<Object**>(NewPointerArray(capacity_));
Object* not_mapped = heap_->not_mapped_symbol();
diff --git a/deps/v8/src/inspector/DEPS b/deps/v8/src/inspector/DEPS
index 2d77fb7aa7..b69626cdb8 100644
--- a/deps/v8/src/inspector/DEPS
+++ b/deps/v8/src/inspector/DEPS
@@ -6,6 +6,7 @@ include_rules = [
"+src/base/logging.h",
"+src/base/platform/platform.h",
"+src/conversions.h",
+ "+src/unicode-cache.h",
"+src/inspector",
"+src/tracing",
"+src/debug/debug-interface.h",
diff --git a/deps/v8/src/inspector/debugger-script.js b/deps/v8/src/inspector/debugger-script.js
index d9cb12a09a..89f0d75903 100644
--- a/deps/v8/src/inspector/debugger-script.js
+++ b/deps/v8/src/inspector/debugger-script.js
@@ -402,10 +402,9 @@ DebuggerScript._frameMirrorToJSCallFrame = function(frameMirror)
*/
function contextId()
{
- var mirror = ensureFuncMirror();
- var context = mirror.context();
- if (context && context.data())
- return Number(context.data());
+ var context =
+ ensureFuncMirror().context() || ensureScriptMirror().context();
+ if (context && context.data()) return Number(context.data());
return 0;
}
diff --git a/deps/v8/src/inspector/debugger_script_externs.js b/deps/v8/src/inspector/debugger_script_externs.js
index 6f36fb9c41..656bada862 100644
--- a/deps/v8/src/inspector/debugger_script_externs.js
+++ b/deps/v8/src/inspector/debugger_script_externs.js
@@ -355,6 +355,9 @@ ScriptMirror.prototype.value = function() {}
/** @return {number} */
ScriptMirror.prototype.id = function() {}
+/** @return {ContextMirror} */
+ScriptMirror.prototype.context = function() {}
+
/**
* @param {number} position
* @param {boolean=} includeResourceOffset
diff --git a/deps/v8/src/inspector/inspected-context.cc b/deps/v8/src/inspector/inspected-context.cc
index 0509683789..27766f200a 100644
--- a/deps/v8/src/inspector/inspected-context.cc
+++ b/deps/v8/src/inspector/inspected-context.cc
@@ -4,6 +4,7 @@
#include "src/inspector/inspected-context.h"
+#include "src/debug/debug-interface.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-console.h"
@@ -24,20 +25,16 @@ InspectedContext::InspectedContext(V8InspectorImpl* inspector,
m_humanReadableName(toString16(info.humanReadableName)),
m_auxData(toString16(info.auxData)),
m_reported(false) {
- v8::Isolate* isolate = m_inspector->isolate();
- info.context->SetEmbedderData(static_cast<int>(v8::Context::kDebugIdIndex),
- v8::Int32::New(isolate, contextId));
+ v8::debug::SetContextId(info.context, contextId);
+ if (!info.hasMemoryOnConsole) return;
+ v8::Context::Scope contextScope(info.context);
v8::Local<v8::Object> global = info.context->Global();
- v8::Local<v8::Object> console =
- m_inspector->console()->createConsole(info.context);
- if (info.hasMemoryOnConsole) {
- m_inspector->console()->installMemoryGetter(info.context, console);
- }
- if (!global
- ->Set(info.context, toV8StringInternalized(isolate, "console"),
- console)
- .FromMaybe(false)) {
- return;
+ v8::Local<v8::Value> console;
+ if (global->Get(info.context, toV8String(m_inspector->isolate(), "console"))
+ .ToLocal(&console) &&
+ console->IsObject()) {
+ m_inspector->console()->installMemoryGetter(
+ info.context, v8::Local<v8::Object>::Cast(console));
}
}
@@ -46,10 +43,7 @@ InspectedContext::~InspectedContext() {
// static
int InspectedContext::contextId(v8::Local<v8::Context> context) {
- v8::Local<v8::Value> data =
- context->GetEmbedderData(static_cast<int>(v8::Context::kDebugIdIndex));
- if (data.IsEmpty() || !data->IsInt32()) return 0;
- return static_cast<int>(data.As<v8::Int32>()->Value());
+ return v8::debug::GetContextId(context);
}
v8::Local<v8::Context> InspectedContext::context() const {
diff --git a/deps/v8/src/inspector/inspector.gyp b/deps/v8/src/inspector/inspector.gyp
index 91507bd579..bedab694a8 100644
--- a/deps/v8/src/inspector/inspector.gyp
+++ b/deps/v8/src/inspector/inspector.gyp
@@ -13,13 +13,7 @@
'targets': [
{ 'target_name': 'inspector_injected_script',
'type': 'none',
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }]
- ],
+ 'toolsets': ['target'],
'actions': [
{
'action_name': 'convert_js_to_cpp_char_array',
@@ -44,13 +38,7 @@
},
{ 'target_name': 'inspector_debugger_script',
'type': 'none',
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }]
- ],
+ 'toolsets': ['target'],
'actions': [
{
'action_name': 'convert_js_to_cpp_char_array',
@@ -75,13 +63,7 @@
},
{ 'target_name': 'protocol_compatibility',
'type': 'none',
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }]
- ],
+ 'toolsets': ['target'],
'actions': [
{
'action_name': 'protocol_compatibility',
@@ -104,13 +86,7 @@
{ 'target_name': 'protocol_generated_sources',
'type': 'none',
'dependencies': [ 'protocol_compatibility' ],
- 'conditions': [
- ['want_separate_host_toolset==1', {
- 'toolsets': ['host', 'target'],
- }, {
- 'toolsets': ['target'],
- }]
- ],
+ 'toolsets': ['target'],
'actions': [
{
'action_name': 'protocol_generated_sources',
diff --git a/deps/v8/src/inspector/js_protocol.json b/deps/v8/src/inspector/js_protocol.json
index 7384835fdc..62545cd80d 100644
--- a/deps/v8/src/inspector/js_protocol.json
+++ b/deps/v8/src/inspector/js_protocol.json
@@ -349,7 +349,7 @@
{
"name": "executionContextCreated",
"parameters": [
- { "name": "context", "$ref": "ExecutionContextDescription", "description": "A newly created execution contex." }
+ { "name": "context", "$ref": "ExecutionContextDescription", "description": "A newly created execution context." }
],
"description": "Issued when new execution context is created."
},
@@ -545,7 +545,7 @@
"name": "getPossibleBreakpoints",
"parameters": [
{ "name": "start", "$ref": "Location", "description": "Start of range to search possible breakpoint locations in." },
- { "name": "end", "$ref": "Location", "optional": true, "description": "End of range to search possible breakpoint locations in (excluding). When not specifed, end of scripts is used as end of range." },
+ { "name": "end", "$ref": "Location", "optional": true, "description": "End of range to search possible breakpoint locations in (excluding). When not specified, end of scripts is used as end of range." },
{ "name": "restrictToFunction", "type": "boolean", "optional": true, "description": "Only consider locations which are in the same (non-nested) function as start." }
],
"returns": [
@@ -557,7 +557,8 @@
{
"name": "continueToLocation",
"parameters": [
- { "name": "location", "$ref": "Location", "description": "Location to continue to." }
+ { "name": "location", "$ref": "Location", "description": "Location to continue to." },
+ { "name": "targetCallFrames", "type": "string", "enum": ["any", "current"], "optional": true, "experimental": true }
],
"description": "Continues execution until specific location is reached."
},
@@ -938,7 +939,7 @@
{ "name": "location", "$ref": "Debugger.Location", "description": "Location of console.profile()." },
{ "name": "title", "type": "string", "optional": true, "description": "Profile title passed as an argument to console.profile()." }
],
- "description": "Sent when new profile recodring is started using console.profile() call."
+ "description": "Sent when new profile recording is started using console.profile() call."
},
{
"name": "consoleProfileFinished",
@@ -1067,7 +1068,7 @@
},
{
"name": "lastSeenObjectId",
- "description": "If heap objects tracking has been started then backend regulary sends a current value for last seen object id and corresponding timestamp. If the were changes in the heap since last event then one or more heapStatsUpdate events will be sent before a new lastSeenObjectId event.",
+ "description": "If heap objects tracking has been started then backend regularly sends a current value for last seen object id and corresponding timestamp. If the were changes in the heap since last event then one or more heapStatsUpdate events will be sent before a new lastSeenObjectId event.",
"parameters": [
{ "name": "lastSeenObjectId", "type": "integer" },
{ "name": "timestamp", "type": "number" }
diff --git a/deps/v8/src/inspector/string-util.cc b/deps/v8/src/inspector/string-util.cc
index 31b2db572d..95d4247d14 100644
--- a/deps/v8/src/inspector/string-util.cc
+++ b/deps/v8/src/inspector/string-util.cc
@@ -4,7 +4,9 @@
#include "src/inspector/string-util.h"
+#include "src/conversions.h"
#include "src/inspector/protocol/Protocol.h"
+#include "src/unicode-cache.h"
namespace v8_inspector {
@@ -92,6 +94,16 @@ bool stringViewStartsWith(const StringView& string, const char* prefix) {
namespace protocol {
+// static
+double StringUtil::toDouble(const char* s, size_t len, bool* isOk) {
+ v8::internal::UnicodeCache unicode_cache;
+ int flags = v8::internal::ALLOW_HEX | v8::internal::ALLOW_OCTAL |
+ v8::internal::ALLOW_BINARY;
+ double result = StringToDouble(&unicode_cache, s, flags);
+ *isOk = !std::isnan(result);
+ return result;
+}
+
std::unique_ptr<protocol::Value> StringUtil::parseJSON(
const StringView& string) {
if (!string.length()) return nullptr;
diff --git a/deps/v8/src/inspector/string-util.h b/deps/v8/src/inspector/string-util.h
index 6f0e3d5ff5..134ff425e1 100644
--- a/deps/v8/src/inspector/string-util.h
+++ b/deps/v8/src/inspector/string-util.h
@@ -32,6 +32,7 @@ class StringUtil {
return String::fromInteger(number);
}
static String fromDouble(double number) { return String::fromDouble(number); }
+ static double toDouble(const char* s, size_t len, bool* isOk);
static size_t find(const String& s, const char* needle) {
return s.find(needle);
}
diff --git a/deps/v8/src/inspector/test-interface.cc b/deps/v8/src/inspector/test-interface.cc
index ead1dc3b81..6167f8be27 100644
--- a/deps/v8/src/inspector/test-interface.cc
+++ b/deps/v8/src/inspector/test-interface.cc
@@ -15,4 +15,10 @@ void SetMaxAsyncTaskStacksForTest(V8Inspector* inspector, int limit) {
->setMaxAsyncTaskStacksForTest(limit);
}
-} // v8_inspector
+void DumpAsyncTaskStacksStateForTest(V8Inspector* inspector) {
+ static_cast<V8InspectorImpl*>(inspector)
+ ->debugger()
+ ->dumpAsyncTaskStacksStateForTest();
+}
+
+} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/test-interface.h b/deps/v8/src/inspector/test-interface.h
index 98bedc2786..70fbca186f 100644
--- a/deps/v8/src/inspector/test-interface.h
+++ b/deps/v8/src/inspector/test-interface.h
@@ -12,6 +12,7 @@ namespace v8_inspector {
class V8Inspector;
V8_EXPORT void SetMaxAsyncTaskStacksForTest(V8Inspector* inspector, int limit);
+V8_EXPORT void DumpAsyncTaskStacksStateForTest(V8Inspector* inspector);
} // v8_inspector
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index 69e58dfabd..0d3c03a4da 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -25,11 +25,11 @@ namespace {
class ConsoleHelper {
public:
- explicit ConsoleHelper(const v8::FunctionCallbackInfo<v8::Value>& info,
- V8InspectorImpl* inspector)
+ ConsoleHelper(const v8::debug::ConsoleCallArguments& info,
+ V8InspectorImpl* inspector)
: m_info(info),
- m_isolate(info.GetIsolate()),
- m_context(info.GetIsolate()->GetCurrentContext()),
+ m_isolate(inspector->isolate()),
+ m_context(m_isolate->GetCurrentContext()),
m_inspector(inspector),
m_contextId(InspectedContext::contextId(m_context)),
m_groupId(m_inspector->contextGroupId(m_contextId)) {}
@@ -145,7 +145,7 @@ class ConsoleHelper {
}
private:
- const v8::FunctionCallbackInfo<v8::Value>& m_info;
+ const v8::debug::ConsoleCallArguments& m_info;
v8::Isolate* m_isolate;
v8::Local<v8::Context> m_context;
V8InspectorImpl* m_inspector = nullptr;
@@ -190,66 +190,63 @@ void createBoundFunctionProperty(v8::Local<v8::Context> context,
V8Console::V8Console(V8InspectorImpl* inspector) : m_inspector(inspector) {}
-void V8Console::debugCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::Debug(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper(info, m_inspector).reportCall(ConsoleAPIType::kDebug);
}
-void V8Console::errorCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::Error(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper(info, m_inspector).reportCall(ConsoleAPIType::kError);
}
-void V8Console::infoCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::Info(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper(info, m_inspector).reportCall(ConsoleAPIType::kInfo);
}
-void V8Console::logCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::Log(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper(info, m_inspector).reportCall(ConsoleAPIType::kLog);
}
-void V8Console::warnCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::Warn(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper(info, m_inspector).reportCall(ConsoleAPIType::kWarning);
}
-void V8Console::dirCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::Dir(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper(info, m_inspector).reportCall(ConsoleAPIType::kDir);
}
-void V8Console::dirxmlCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::DirXml(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper(info, m_inspector).reportCall(ConsoleAPIType::kDirXML);
}
-void V8Console::tableCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::Table(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper(info, m_inspector).reportCall(ConsoleAPIType::kTable);
}
-void V8Console::traceCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::Trace(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper(info, m_inspector)
.reportCallWithDefaultArgument(ConsoleAPIType::kTrace,
String16("console.trace"));
}
-void V8Console::groupCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::Group(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper(info, m_inspector)
.reportCallWithDefaultArgument(ConsoleAPIType::kStartGroup,
String16("console.group"));
}
-void V8Console::groupCollapsedCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::GroupCollapsed(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper(info, m_inspector)
.reportCallWithDefaultArgument(ConsoleAPIType::kStartGroupCollapsed,
String16("console.groupCollapsed"));
}
-void V8Console::groupEndCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::GroupEnd(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper(info, m_inspector)
.reportCallWithDefaultArgument(ConsoleAPIType::kEndGroup,
String16("console.groupEnd"));
}
-void V8Console::clearCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::Clear(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper helper(info, m_inspector);
if (!helper.groupId()) return;
m_inspector->client()->consoleClear(helper.groupId());
@@ -257,13 +254,13 @@ void V8Console::clearCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
String16("console.clear"));
}
-void V8Console::countCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::Count(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper helper(info, m_inspector);
String16 title = helper.firstArgToString(String16());
String16 identifier;
if (title.isEmpty()) {
std::unique_ptr<V8StackTraceImpl> stackTrace =
- V8StackTraceImpl::capture(m_inspector->debugger(), 0, 1);
+ V8StackTraceImpl::capture(m_inspector->debugger(), helper.groupId(), 1);
if (stackTrace && !stackTrace->isEmpty()) {
identifier = toString16(stackTrace->topSourceURL()) + ":" +
String16::fromInteger(stackTrace->topLineNumber());
@@ -280,16 +277,15 @@ void V8Console::countCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
title.isEmpty() ? countString : (title + ": " + countString));
}
-void V8Console::assertCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::Assert(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper helper(info, m_inspector);
- if (helper.firstArgToBoolean(false)) return;
+ DCHECK(!helper.firstArgToBoolean(false));
std::vector<v8::Local<v8::Value>> arguments;
for (int i = 1; i < info.Length(); ++i) arguments.push_back(info[i]);
if (info.Length() < 2)
arguments.push_back(
- toV8String(info.GetIsolate(), String16("console.assert")));
+ toV8String(m_inspector->isolate(), String16("console.assert")));
helper.reportCall(ConsoleAPIType::kAssert, arguments);
if (V8DebuggerAgentImpl* debuggerAgent = helper.debuggerAgent())
@@ -297,31 +293,28 @@ void V8Console::assertCallback(
protocol::Debugger::Paused::ReasonEnum::Assert, nullptr);
}
-void V8Console::markTimelineCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::MarkTimeline(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper(info, m_inspector)
.reportDeprecatedCall("V8Console#markTimelineDeprecated",
"'console.markTimeline' is "
"deprecated. Please use "
"'console.timeStamp' instead.");
- timeStampCallback(info);
+ TimeStamp(info);
}
-void V8Console::profileCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::Profile(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper helper(info, m_inspector);
if (V8ProfilerAgentImpl* profilerAgent = helper.profilerAgent())
profilerAgent->consoleProfile(helper.firstArgToString(String16()));
}
-void V8Console::profileEndCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::ProfileEnd(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper helper(info, m_inspector);
if (V8ProfilerAgentImpl* profilerAgent = helper.profilerAgent())
profilerAgent->consoleProfileEnd(helper.firstArgToString(String16()));
}
-static void timeFunction(const v8::FunctionCallbackInfo<v8::Value>& info,
+static void timeFunction(const v8::debug::ConsoleCallArguments& info,
bool timelinePrefix, V8InspectorImpl* inspector) {
ConsoleHelper helper(info, inspector);
String16 protocolTitle = helper.firstArgToString("default");
@@ -330,7 +323,7 @@ static void timeFunction(const v8::FunctionCallbackInfo<v8::Value>& info,
helper.consoleMessageStorage()->time(helper.contextId(), protocolTitle);
}
-static void timeEndFunction(const v8::FunctionCallbackInfo<v8::Value>& info,
+static void timeEndFunction(const v8::debug::ConsoleCallArguments& info,
bool timelinePrefix, V8InspectorImpl* inspector) {
ConsoleHelper helper(info, inspector);
String16 protocolTitle = helper.firstArgToString("default");
@@ -343,8 +336,7 @@ static void timeEndFunction(const v8::FunctionCallbackInfo<v8::Value>& info,
helper.reportCallWithArgument(ConsoleAPIType::kTimeEnd, message);
}
-void V8Console::timelineCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::Timeline(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper(info, m_inspector)
.reportDeprecatedCall("V8Console#timeline",
"'console.timeline' is deprecated. Please use "
@@ -352,8 +344,7 @@ void V8Console::timelineCallback(
timeFunction(info, true, m_inspector);
}
-void V8Console::timelineEndCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::TimelineEnd(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper(info, m_inspector)
.reportDeprecatedCall("V8Console#timelineEnd",
"'console.timelineEnd' is "
@@ -362,17 +353,15 @@ void V8Console::timelineEndCallback(
timeEndFunction(info, true, m_inspector);
}
-void V8Console::timeCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::Time(const v8::debug::ConsoleCallArguments& info) {
timeFunction(info, false, m_inspector);
}
-void V8Console::timeEndCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::TimeEnd(const v8::debug::ConsoleCallArguments& info) {
timeEndFunction(info, false, m_inspector);
}
-void V8Console::timeStampCallback(
- const v8::FunctionCallbackInfo<v8::Value>& info) {
+void V8Console::TimeStamp(const v8::debug::ConsoleCallArguments& info) {
ConsoleHelper helper(info, m_inspector);
String16 title = helper.firstArgToString(String16());
m_inspector->client()->consoleTimeStamp(toStringView(title));
@@ -400,7 +389,8 @@ void V8Console::keysCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
v8::Isolate* isolate = info.GetIsolate();
info.GetReturnValue().Set(v8::Array::New(isolate));
- ConsoleHelper helper(info, m_inspector);
+ v8::debug::ConsoleCallArguments args(info);
+ ConsoleHelper helper(args, m_inspector);
v8::Local<v8::Object> obj;
if (!helper.firstArgAsObject().ToLocal(&obj)) return;
v8::Local<v8::Array> names;
@@ -414,7 +404,8 @@ void V8Console::valuesCallback(
v8::Isolate* isolate = info.GetIsolate();
info.GetReturnValue().Set(v8::Array::New(isolate));
- ConsoleHelper helper(info, m_inspector);
+ v8::debug::ConsoleCallArguments args(info);
+ ConsoleHelper helper(args, m_inspector);
v8::Local<v8::Object> obj;
if (!helper.firstArgAsObject().ToLocal(&obj)) return;
v8::Local<v8::Array> names;
@@ -453,7 +444,8 @@ static void setFunctionBreakpoint(ConsoleHelper& helper,
void V8Console::debugFunctionCallback(
const v8::FunctionCallbackInfo<v8::Value>& info) {
- ConsoleHelper helper(info, m_inspector);
+ v8::debug::ConsoleCallArguments args(info);
+ ConsoleHelper helper(args, m_inspector);
v8::Local<v8::Function> function;
if (!helper.firstArgAsFunction().ToLocal(&function)) return;
setFunctionBreakpoint(helper, function,
@@ -463,7 +455,8 @@ void V8Console::debugFunctionCallback(
void V8Console::undebugFunctionCallback(
const v8::FunctionCallbackInfo<v8::Value>& info) {
- ConsoleHelper helper(info, m_inspector);
+ v8::debug::ConsoleCallArguments args(info);
+ ConsoleHelper helper(args, m_inspector);
v8::Local<v8::Function> function;
if (!helper.firstArgAsFunction().ToLocal(&function)) return;
setFunctionBreakpoint(helper, function,
@@ -473,7 +466,8 @@ void V8Console::undebugFunctionCallback(
void V8Console::monitorFunctionCallback(
const v8::FunctionCallbackInfo<v8::Value>& info) {
- ConsoleHelper helper(info, m_inspector);
+ v8::debug::ConsoleCallArguments args(info);
+ ConsoleHelper helper(args, m_inspector);
v8::Local<v8::Function> function;
if (!helper.firstArgAsFunction().ToLocal(&function)) return;
v8::Local<v8::Value> name = function->GetName();
@@ -496,7 +490,8 @@ void V8Console::monitorFunctionCallback(
void V8Console::unmonitorFunctionCallback(
const v8::FunctionCallbackInfo<v8::Value>& info) {
- ConsoleHelper helper(info, m_inspector);
+ v8::debug::ConsoleCallArguments args(info);
+ ConsoleHelper helper(args, m_inspector);
v8::Local<v8::Function> function;
if (!helper.firstArgAsFunction().ToLocal(&function)) return;
setFunctionBreakpoint(helper, function,
@@ -506,7 +501,8 @@ void V8Console::unmonitorFunctionCallback(
void V8Console::lastEvaluationResultCallback(
const v8::FunctionCallbackInfo<v8::Value>& info) {
- ConsoleHelper helper(info, m_inspector);
+ v8::debug::ConsoleCallArguments args(info);
+ ConsoleHelper helper(args, m_inspector);
InjectedScript* injectedScript = helper.injectedScript();
if (!injectedScript) return;
info.GetReturnValue().Set(injectedScript->lastEvaluationResult());
@@ -517,7 +513,8 @@ static void inspectImpl(const v8::FunctionCallbackInfo<v8::Value>& info,
if (info.Length() < 1) return;
if (!copyToClipboard) info.GetReturnValue().Set(info[0]);
- ConsoleHelper helper(info, inspector);
+ v8::debug::ConsoleCallArguments args(info);
+ ConsoleHelper helper(args, inspector);
InjectedScript* injectedScript = helper.injectedScript();
if (!injectedScript) return;
std::unique_ptr<protocol::Runtime::RemoteObject> wrappedObject;
@@ -547,7 +544,8 @@ void V8Console::copyCallback(const v8::FunctionCallbackInfo<v8::Value>& info) {
void V8Console::inspectedObject(const v8::FunctionCallbackInfo<v8::Value>& info,
unsigned num) {
DCHECK(num < V8InspectorSessionImpl::kInspectedObjectBufferSize);
- ConsoleHelper helper(info, m_inspector);
+ v8::debug::ConsoleCallArguments args(info);
+ ConsoleHelper helper(args, m_inspector);
if (V8InspectorSessionImpl* session = helper.currentSession()) {
V8InspectorSession::Inspectable* object = session->inspectedObject(num);
v8::Isolate* isolate = info.GetIsolate();
@@ -558,71 +556,6 @@ void V8Console::inspectedObject(const v8::FunctionCallbackInfo<v8::Value>& info,
}
}
-v8::Local<v8::Object> V8Console::createConsole(v8::Local<v8::Context> context) {
- v8::Context::Scope contextScope(context);
- v8::Isolate* isolate = context->GetIsolate();
- v8::MicrotasksScope microtasksScope(isolate,
- v8::MicrotasksScope::kDoNotRunMicrotasks);
-
- v8::Local<v8::Object> console = v8::Object::New(isolate);
- bool success =
- console->SetPrototype(context, v8::Object::New(isolate)).FromMaybe(false);
- DCHECK(success);
- USE(success);
-
- v8::Local<v8::External> data = v8::External::New(isolate, this);
- createBoundFunctionProperty(context, console, data, "debug",
- &V8Console::call<&V8Console::debugCallback>);
- createBoundFunctionProperty(context, console, data, "error",
- &V8Console::call<&V8Console::errorCallback>);
- createBoundFunctionProperty(context, console, data, "info",
- &V8Console::call<&V8Console::infoCallback>);
- createBoundFunctionProperty(context, console, data, "log",
- &V8Console::call<&V8Console::logCallback>);
- createBoundFunctionProperty(context, console, data, "warn",
- &V8Console::call<&V8Console::warnCallback>);
- createBoundFunctionProperty(context, console, data, "dir",
- &V8Console::call<&V8Console::dirCallback>);
- createBoundFunctionProperty(context, console, data, "dirxml",
- &V8Console::call<&V8Console::dirxmlCallback>);
- createBoundFunctionProperty(context, console, data, "table",
- &V8Console::call<&V8Console::tableCallback>);
- createBoundFunctionProperty(context, console, data, "trace",
- &V8Console::call<&V8Console::traceCallback>);
- createBoundFunctionProperty(context, console, data, "group",
- &V8Console::call<&V8Console::groupCallback>);
- createBoundFunctionProperty(
- context, console, data, "groupCollapsed",
- &V8Console::call<&V8Console::groupCollapsedCallback>);
- createBoundFunctionProperty(context, console, data, "groupEnd",
- &V8Console::call<&V8Console::groupEndCallback>);
- createBoundFunctionProperty(context, console, data, "clear",
- &V8Console::call<&V8Console::clearCallback>);
- createBoundFunctionProperty(context, console, data, "count",
- &V8Console::call<&V8Console::countCallback>);
- createBoundFunctionProperty(context, console, data, "assert",
- &V8Console::call<&V8Console::assertCallback>);
- createBoundFunctionProperty(
- context, console, data, "markTimeline",
- &V8Console::call<&V8Console::markTimelineCallback>);
- createBoundFunctionProperty(context, console, data, "profile",
- &V8Console::call<&V8Console::profileCallback>);
- createBoundFunctionProperty(context, console, data, "profileEnd",
- &V8Console::call<&V8Console::profileEndCallback>);
- createBoundFunctionProperty(context, console, data, "timeline",
- &V8Console::call<&V8Console::timelineCallback>);
- createBoundFunctionProperty(
- context, console, data, "timelineEnd",
- &V8Console::call<&V8Console::timelineEndCallback>);
- createBoundFunctionProperty(context, console, data, "time",
- &V8Console::call<&V8Console::timeCallback>);
- createBoundFunctionProperty(context, console, data, "timeEnd",
- &V8Console::call<&V8Console::timeEndCallback>);
- createBoundFunctionProperty(context, console, data, "timeStamp",
- &V8Console::call<&V8Console::timeStampCallback>);
- return console;
-}
-
void V8Console::installMemoryGetter(v8::Local<v8::Context> context,
v8::Local<v8::Object> console) {
v8::Isolate* isolate = context->GetIsolate();
@@ -654,24 +587,24 @@ v8::Local<v8::Object> V8Console::createCommandLineAPI(
v8::Local<v8::External> data = v8::External::New(isolate, this);
createBoundFunctionProperty(context, commandLineAPI, data, "dir",
- &V8Console::call<&V8Console::dirCallback>,
+ &V8Console::call<&V8Console::Dir>,
"function dir(value) { [Command Line API] }");
createBoundFunctionProperty(context, commandLineAPI, data, "dirxml",
- &V8Console::call<&V8Console::dirxmlCallback>,
+ &V8Console::call<&V8Console::DirXml>,
"function dirxml(value) { [Command Line API] }");
createBoundFunctionProperty(context, commandLineAPI, data, "profile",
- &V8Console::call<&V8Console::profileCallback>,
+ &V8Console::call<&V8Console::Profile>,
"function profile(title) { [Command Line API] }");
createBoundFunctionProperty(
context, commandLineAPI, data, "profileEnd",
- &V8Console::call<&V8Console::profileEndCallback>,
+ &V8Console::call<&V8Console::ProfileEnd>,
"function profileEnd(title) { [Command Line API] }");
createBoundFunctionProperty(context, commandLineAPI, data, "clear",
- &V8Console::call<&V8Console::clearCallback>,
+ &V8Console::call<&V8Console::Clear>,
"function clear() { [Command Line API] }");
createBoundFunctionProperty(
context, commandLineAPI, data, "table",
- &V8Console::call<&V8Console::tableCallback>,
+ &V8Console::call<&V8Console::Table>,
"function table(data, [columns]) { [Command Line API] }");
createBoundFunctionProperty(context, commandLineAPI, data, "keys",
diff --git a/deps/v8/src/inspector/v8-console.h b/deps/v8/src/inspector/v8-console.h
index 0b833b3c94..e31133c4e1 100644
--- a/deps/v8/src/inspector/v8-console.h
+++ b/deps/v8/src/inspector/v8-console.h
@@ -8,6 +8,7 @@
#include "src/base/macros.h"
#include "include/v8.h"
+#include "src/debug/interface-types.h"
namespace v8_inspector {
@@ -16,9 +17,8 @@ class V8InspectorImpl;
// Console API
// https://console.spec.whatwg.org/#console-interface
-class V8Console {
+class V8Console : public v8::debug::ConsoleDelegate {
public:
- v8::Local<v8::Object> createConsole(v8::Local<v8::Context> context);
v8::Local<v8::Object> createCommandLineAPI(v8::Local<v8::Context> context);
void installMemoryGetter(v8::Local<v8::Context> context,
v8::Local<v8::Object> console);
@@ -49,29 +49,29 @@ class V8Console {
explicit V8Console(V8InspectorImpl* inspector);
private:
- void debugCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void errorCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void infoCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void logCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void warnCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void dirCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void dirxmlCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void tableCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void traceCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void groupCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void groupCollapsedCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void groupEndCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void clearCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void countCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void assertCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void markTimelineCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void profileCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void profileEndCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void timelineCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void timelineEndCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void timeCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void timeEndCallback(const v8::FunctionCallbackInfo<v8::Value>&);
- void timeStampCallback(const v8::FunctionCallbackInfo<v8::Value>&);
+ void Debug(const v8::debug::ConsoleCallArguments&) override;
+ void Error(const v8::debug::ConsoleCallArguments&) override;
+ void Info(const v8::debug::ConsoleCallArguments&) override;
+ void Log(const v8::debug::ConsoleCallArguments&) override;
+ void Warn(const v8::debug::ConsoleCallArguments&) override;
+ void Dir(const v8::debug::ConsoleCallArguments&) override;
+ void DirXml(const v8::debug::ConsoleCallArguments&) override;
+ void Table(const v8::debug::ConsoleCallArguments&) override;
+ void Trace(const v8::debug::ConsoleCallArguments&) override;
+ void Group(const v8::debug::ConsoleCallArguments&) override;
+ void GroupCollapsed(const v8::debug::ConsoleCallArguments&) override;
+ void GroupEnd(const v8::debug::ConsoleCallArguments&) override;
+ void Clear(const v8::debug::ConsoleCallArguments&) override;
+ void Count(const v8::debug::ConsoleCallArguments&) override;
+ void Assert(const v8::debug::ConsoleCallArguments&) override;
+ void MarkTimeline(const v8::debug::ConsoleCallArguments&) override;
+ void Profile(const v8::debug::ConsoleCallArguments&) override;
+ void ProfileEnd(const v8::debug::ConsoleCallArguments&) override;
+ void Timeline(const v8::debug::ConsoleCallArguments&) override;
+ void TimelineEnd(const v8::debug::ConsoleCallArguments&) override;
+ void Time(const v8::debug::ConsoleCallArguments&) override;
+ void TimeEnd(const v8::debug::ConsoleCallArguments&) override;
+ void TimeStamp(const v8::debug::ConsoleCallArguments&) override;
template <void (V8Console::*func)(const v8::FunctionCallbackInfo<v8::Value>&)>
static void call(const v8::FunctionCallbackInfo<v8::Value>& info) {
@@ -79,6 +79,13 @@ class V8Console {
static_cast<V8Console*>(info.Data().As<v8::External>()->Value());
(console->*func)(info);
}
+ template <void (V8Console::*func)(const v8::debug::ConsoleCallArguments&)>
+ static void call(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ V8Console* console =
+ static_cast<V8Console*>(info.Data().As<v8::External>()->Value());
+ v8::debug::ConsoleCallArguments args(info);
+ (console->*func)(args);
+ }
// TODO(foolip): There is no spec for the Memory Info API, see blink-dev:
// https://groups.google.com/a/chromium.org/d/msg/blink-dev/g5YRCGpC9vs/b4OJz71NmPwJ
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index 293c2d43c7..7b03c96c0a 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -34,7 +34,6 @@ using protocol::Debugger::BreakpointId;
using protocol::Debugger::CallFrame;
using protocol::Runtime::ExceptionDetails;
using protocol::Runtime::ScriptId;
-using protocol::Runtime::StackTrace;
using protocol::Runtime::RemoteObject;
namespace DebuggerAgentState {
@@ -238,7 +237,7 @@ Response V8DebuggerAgentImpl::disable() {
v8::debug::NoBreakOnException);
m_state->setInteger(DebuggerAgentState::asyncCallStackDepth, 0);
- if (isPaused()) m_debugger->continueProgram();
+ if (isPaused()) m_debugger->continueProgram(m_session->contextGroupId());
m_debugger->disable();
JavaScriptCallFrames emptyCallFrames;
m_pausedCallFrames.swap(emptyCallFrames);
@@ -248,7 +247,6 @@ Response V8DebuggerAgentImpl::disable() {
m_scripts.clear();
m_breakpointIdToDebuggerBreakpointIds.clear();
m_debugger->setAsyncCallStackDepth(this, 0);
- m_continueToLocationBreakpointId = String16();
clearBreakDetails();
m_skipAllPauses = false;
m_state->setBoolean(DebuggerAgentState::skipAllPauses, false);
@@ -457,11 +455,15 @@ Response V8DebuggerAgentImpl::getPossibleBreakpoints(
}
auto it = m_scripts.find(scriptId);
if (it == m_scripts.end()) return Response::Error("Script not found");
-
std::vector<v8::debug::BreakLocation> v8Locations;
- if (!it->second->getPossibleBreakpoints(
- v8Start, v8End, restrictToFunction.fromMaybe(false), &v8Locations)) {
- return Response::InternalError();
+ {
+ v8::HandleScope handleScope(m_isolate);
+ v8::Local<v8::Context> debuggerContext =
+ v8::debug::GetDebugContext(m_isolate);
+ v8::Context::Scope contextScope(debuggerContext);
+ v8::TryCatch tryCatch(m_isolate);
+ it->second->getPossibleBreakpoints(
+ v8Start, v8End, restrictToFunction.fromMaybe(false), &v8Locations);
}
*locations = protocol::Array<protocol::Debugger::BreakLocation>::create();
@@ -481,21 +483,14 @@ Response V8DebuggerAgentImpl::getPossibleBreakpoints(
}
Response V8DebuggerAgentImpl::continueToLocation(
- std::unique_ptr<protocol::Debugger::Location> location) {
+ std::unique_ptr<protocol::Debugger::Location> location,
+ Maybe<String16> targetCallFrames) {
if (!enabled()) return Response::Error(kDebuggerNotEnabled);
- if (!m_continueToLocationBreakpointId.isEmpty()) {
- m_debugger->removeBreakpoint(m_continueToLocationBreakpointId);
- m_continueToLocationBreakpointId = "";
- }
-
- ScriptBreakpoint breakpoint(location->getScriptId(),
- location->getLineNumber(),
- location->getColumnNumber(0), String16());
-
- m_continueToLocationBreakpointId = m_debugger->setBreakpoint(
- breakpoint, &breakpoint.line_number, &breakpoint.column_number);
- // TODO(kozyatinskiy): Return actual line and column number.
- return resume();
+ if (!isPaused()) return Response::Error(kDebuggerNotPaused);
+ return m_debugger->continueToLocation(
+ m_session->contextGroupId(), std::move(location),
+ targetCallFrames.fromMaybe(
+ protocol::Debugger::ContinueToLocation::TargetCallFramesEnum::Any));
}
bool V8DebuggerAgentImpl::isFunctionBlackboxed(const String16& scriptId,
@@ -598,7 +593,8 @@ Response V8DebuggerAgentImpl::searchInContent(
Response V8DebuggerAgentImpl::setScriptSource(
const String16& scriptId, const String16& newContent, Maybe<bool> dryRun,
Maybe<protocol::Array<protocol::Debugger::CallFrame>>* newCallFrames,
- Maybe<bool>* stackChanged, Maybe<StackTrace>* asyncStackTrace,
+ Maybe<bool>* stackChanged,
+ Maybe<protocol::Runtime::StackTrace>* asyncStackTrace,
Maybe<protocol::Runtime::ExceptionDetails>* optOutCompileError) {
if (!enabled()) return Response::Error(kDebuggerNotEnabled);
@@ -631,7 +627,7 @@ Response V8DebuggerAgentImpl::setScriptSource(
Response V8DebuggerAgentImpl::restartFrame(
const String16& callFrameId,
std::unique_ptr<Array<CallFrame>>* newCallFrames,
- Maybe<StackTrace>* asyncStackTrace) {
+ Maybe<protocol::Runtime::StackTrace>* asyncStackTrace) {
if (!isPaused()) return Response::Error(kDebuggerNotPaused);
InjectedScript::CallFrameScope scope(m_inspector, m_session->contextGroupId(),
callFrameId);
@@ -715,7 +711,7 @@ Response V8DebuggerAgentImpl::pause() {
Response V8DebuggerAgentImpl::resume() {
if (!isPaused()) return Response::Error(kDebuggerNotPaused);
m_session->releaseObjectGroup(kBacktraceObjectGroup);
- m_debugger->continueProgram();
+ m_debugger->continueProgram(m_session->contextGroupId());
return Response::OK();
}
@@ -1028,11 +1024,14 @@ Response V8DebuggerAgentImpl::currentCallFrames(
return Response::OK();
}
-std::unique_ptr<StackTrace> V8DebuggerAgentImpl::currentAsyncStackTrace() {
- if (!isPaused()) return nullptr;
- V8StackTraceImpl* stackTrace = m_debugger->currentAsyncCallChain();
- return stackTrace ? stackTrace->buildInspectorObjectForTail(m_debugger)
- : nullptr;
+std::unique_ptr<protocol::Runtime::StackTrace>
+V8DebuggerAgentImpl::currentAsyncStackTrace() {
+ std::shared_ptr<AsyncStackTrace> asyncParent =
+ m_debugger->currentAsyncParent();
+ if (!asyncParent) return nullptr;
+ return asyncParent->buildInspectorObject(
+ m_debugger->currentAsyncCreation().get(),
+ m_debugger->maxAsyncCallChainDepth() - 1);
}
bool V8DebuggerAgentImpl::isPaused() const { return m_debugger->isPaused(); }
@@ -1226,11 +1225,6 @@ void V8DebuggerAgentImpl::didPause(int contextId,
m_frontend.paused(std::move(protocolCallFrames), breakReason,
std::move(breakAuxData), std::move(hitBreakpointIds),
currentAsyncStackTrace());
-
- if (!m_continueToLocationBreakpointId.isEmpty()) {
- m_debugger->removeBreakpoint(m_continueToLocationBreakpointId);
- m_continueToLocationBreakpointId = "";
- }
}
void V8DebuggerAgentImpl::didContinue() {
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.h b/deps/v8/src/inspector/v8-debugger-agent-impl.h
index 5fa251ca98..c9433e20f6 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.h
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.h
@@ -23,7 +23,6 @@ class V8DebuggerScript;
class V8InspectorImpl;
class V8InspectorSessionImpl;
class V8Regex;
-class V8StackTraceImpl;
using protocol::Maybe;
using protocol::Response;
@@ -57,8 +56,8 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
Maybe<String16> optionalCondition, String16*,
std::unique_ptr<protocol::Debugger::Location>* actualLocation) override;
Response removeBreakpoint(const String16& breakpointId) override;
- Response continueToLocation(
- std::unique_ptr<protocol::Debugger::Location>) override;
+ Response continueToLocation(std::unique_ptr<protocol::Debugger::Location>,
+ Maybe<String16> targetCallFrames) override;
Response searchInContent(
const String16& scriptId, const String16& query,
Maybe<bool> optionalCaseSensitive, Maybe<bool> optionalIsRegex,
@@ -185,7 +184,6 @@ class V8DebuggerAgentImpl : public protocol::Debugger::Backend {
ScriptsMap m_scripts;
BreakpointIdToDebuggerBreakpointIdsMap m_breakpointIdToDebuggerBreakpointIds;
DebugServerBreakpointToBreakpointIdAndSourceMap m_serverBreakpoints;
- String16 m_continueToLocationBreakpointId;
using BreakReason =
std::pair<String16, std::unique_ptr<protocol::DictionaryValue>>;
diff --git a/deps/v8/src/inspector/v8-debugger-script.cc b/deps/v8/src/inspector/v8-debugger-script.cc
index d151ab821f..e0aaceb888 100644
--- a/deps/v8/src/inspector/v8-debugger-script.cc
+++ b/deps/v8/src/inspector/v8-debugger-script.cc
@@ -138,11 +138,7 @@ class ActualScript : public V8DebuggerScript {
m_endColumn = m_startColumn;
}
- v8::Local<v8::Value> contextData;
- if (script->ContextData().ToLocal(&contextData) && contextData->IsInt32()) {
- m_executionContextId =
- static_cast<int>(contextData.As<v8::Int32>()->Value());
- }
+ USE(script->ContextId().To(&m_executionContextId));
if (script->Source().ToLocal(&tmp)) {
m_source = toProtocolString(tmp);
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 87c864cd38..86a48401a6 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -21,9 +21,6 @@ namespace v8_inspector {
namespace {
-// Based on DevTools frontend measurement, with asyncCallStackDepth = 4,
-// average async call stack tail requires ~1 Kb. Let's reserve ~ 128 Mb
-// for async stacks.
static const int kMaxAsyncTaskStacks = 128 * 1024;
inline v8::Local<v8::Boolean> v8Boolean(bool value, v8::Isolate* isolate) {
@@ -32,11 +29,8 @@ inline v8::Local<v8::Boolean> v8Boolean(bool value, v8::Isolate* isolate) {
V8DebuggerAgentImpl* agentForScript(V8InspectorImpl* inspector,
v8::Local<v8::debug::Script> script) {
- v8::Local<v8::Value> contextData;
- if (!script->ContextData().ToLocal(&contextData) || !contextData->IsInt32()) {
- return nullptr;
- }
- int contextId = static_cast<int>(contextData.As<v8::Int32>()->Value());
+ int contextId;
+ if (!script->ContextId().To(&contextId)) return nullptr;
int contextGroupId = inspector->contextGroupId(contextId);
if (!contextGroupId) return nullptr;
return inspector->enabledDebuggerAgentForGroup(contextGroupId);
@@ -136,6 +130,17 @@ v8::MaybeLocal<v8::Object> generatorObjectLocation(
suspendedLocation.GetColumnNumber());
}
+template <typename Map>
+void cleanupExpiredWeakPointers(Map& map) {
+ for (auto it = map.begin(); it != map.end();) {
+ if (it->second.expired()) {
+ it = map.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
} // namespace
static bool inLiveEditScope = false;
@@ -164,10 +169,8 @@ V8Debugger::V8Debugger(v8::Isolate* isolate, V8InspectorImpl* inspector)
m_inspector(inspector),
m_enableCount(0),
m_breakpointsActivated(true),
- m_runningNestedMessageLoop(false),
m_ignoreScriptParsedEventsCounter(0),
m_maxAsyncCallStacks(kMaxAsyncTaskStacks),
- m_lastTaskId(0),
m_maxAsyncCallStackDepth(0),
m_pauseOnExceptionsState(v8::debug::NoBreakOnException),
m_wasmTranslation(isolate) {}
@@ -191,6 +194,7 @@ void V8Debugger::disable() {
if (--m_enableCount) return;
DCHECK(enabled());
clearBreakpoints();
+ clearContinueToLocation();
m_debuggerScript.Reset();
m_debuggerContext.Reset();
allAsyncTasksCanceled();
@@ -212,10 +216,12 @@ void V8Debugger::getCompiledScripts(
for (size_t i = 0; i < scripts.Size(); ++i) {
v8::Local<v8::debug::Script> script = scripts.Get(i);
if (!script->WasCompiled()) continue;
- v8::Local<v8::Value> contextData;
- if (!script->ContextData().ToLocal(&contextData) || !contextData->IsInt32())
+ if (script->IsEmbedded()) {
+ result.push_back(V8DebuggerScript::Create(m_isolate, script, false));
continue;
- int contextId = static_cast<int>(contextData.As<v8::Int32>()->Value());
+ }
+ int contextId;
+ if (!script->ContextId().To(&contextId)) continue;
if (m_inspector->contextGroupId(contextId) != contextGroupId) continue;
result.push_back(V8DebuggerScript::Create(m_isolate, script, false));
}
@@ -358,7 +364,8 @@ bool V8Debugger::breakProgram(int targetContextGroupId) {
return m_inspector->enabledDebuggerAgentForGroup(targetContextGroupId);
}
-void V8Debugger::continueProgram() {
+void V8Debugger::continueProgram(int targetContextGroupId) {
+ if (m_pausedContextGroupId != targetContextGroupId) return;
if (isPaused()) m_inspector->client()->quitMessageLoopOnPause();
m_pausedContext.Clear();
m_executionState.Clear();
@@ -370,7 +377,7 @@ void V8Debugger::stepIntoStatement(int targetContextGroupId) {
DCHECK(targetContextGroupId);
m_targetContextGroupId = targetContextGroupId;
v8::debug::PrepareStep(m_isolate, v8::debug::StepIn);
- continueProgram();
+ continueProgram(targetContextGroupId);
}
void V8Debugger::stepOverStatement(int targetContextGroupId) {
@@ -379,7 +386,7 @@ void V8Debugger::stepOverStatement(int targetContextGroupId) {
DCHECK(targetContextGroupId);
m_targetContextGroupId = targetContextGroupId;
v8::debug::PrepareStep(m_isolate, v8::debug::StepNext);
- continueProgram();
+ continueProgram(targetContextGroupId);
}
void V8Debugger::stepOutOfFunction(int targetContextGroupId) {
@@ -388,7 +395,7 @@ void V8Debugger::stepOutOfFunction(int targetContextGroupId) {
DCHECK(targetContextGroupId);
m_targetContextGroupId = targetContextGroupId;
v8::debug::PrepareStep(m_isolate, v8::debug::StepOut);
- continueProgram();
+ continueProgram(targetContextGroupId);
}
void V8Debugger::scheduleStepIntoAsync(
@@ -405,6 +412,58 @@ void V8Debugger::scheduleStepIntoAsync(
m_stepIntoAsyncCallback = std::move(callback);
}
+Response V8Debugger::continueToLocation(
+ int targetContextGroupId,
+ std::unique_ptr<protocol::Debugger::Location> location,
+ const String16& targetCallFrames) {
+ DCHECK(isPaused());
+ DCHECK(!m_executionState.IsEmpty());
+ DCHECK(targetContextGroupId);
+ m_targetContextGroupId = targetContextGroupId;
+ ScriptBreakpoint breakpoint(location->getScriptId(),
+ location->getLineNumber(),
+ location->getColumnNumber(0), String16());
+ int lineNumber = 0;
+ int columnNumber = 0;
+ m_continueToLocationBreakpointId =
+ setBreakpoint(breakpoint, &lineNumber, &columnNumber);
+ if (!m_continueToLocationBreakpointId.isEmpty()) {
+ m_continueToLocationTargetCallFrames = targetCallFrames;
+ if (m_continueToLocationTargetCallFrames !=
+ protocol::Debugger::ContinueToLocation::TargetCallFramesEnum::Any) {
+ m_continueToLocationStack = captureStackTrace(true);
+ DCHECK(m_continueToLocationStack);
+ }
+ continueProgram(targetContextGroupId);
+ // TODO(kozyatinskiy): Return actual line and column number.
+ return Response::OK();
+ } else {
+ return Response::Error("Cannot continue to specified location");
+ }
+}
+
+bool V8Debugger::shouldContinueToCurrentLocation() {
+ if (m_continueToLocationTargetCallFrames ==
+ protocol::Debugger::ContinueToLocation::TargetCallFramesEnum::Any) {
+ return true;
+ }
+ std::unique_ptr<V8StackTraceImpl> currentStack = captureStackTrace(true);
+ if (m_continueToLocationTargetCallFrames ==
+ protocol::Debugger::ContinueToLocation::TargetCallFramesEnum::Current) {
+ return m_continueToLocationStack->isEqualIgnoringTopFrame(
+ currentStack.get());
+ }
+ return true;
+}
+
+void V8Debugger::clearContinueToLocation() {
+ if (m_continueToLocationBreakpointId.isEmpty()) return;
+ removeBreakpoint(m_continueToLocationBreakpointId);
+ m_continueToLocationBreakpointId = String16();
+ m_continueToLocationTargetCallFrames = String16();
+ m_continueToLocationStack.reset();
+}
+
Response V8Debugger::setScriptSource(
const String16& sourceID, v8::Local<v8::String> newSource, bool dryRun,
Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails,
@@ -560,11 +619,17 @@ void V8Debugger::handleProgramBreak(v8::Local<v8::Context> pausedContext,
breakpointIds.push_back(String16::fromInteger(
hitBreakpointNumber->Int32Value(debuggerContext()).FromJust()));
}
+ if (breakpointIds.size() == 1 &&
+ breakpointIds[0] == m_continueToLocationBreakpointId) {
+ v8::Context::Scope contextScope(pausedContext);
+ if (!shouldContinueToCurrentLocation()) return;
+ }
}
+ clearContinueToLocation();
m_pausedContext = pausedContext;
m_executionState = executionState;
- m_runningNestedMessageLoop = true;
+ m_pausedContextGroupId = contextGroupId;
agent->didPause(InspectedContext::contextId(pausedContext), exception,
breakpointIds, isPromiseRejection, isUncaught,
m_scheduledOOMBreak);
@@ -576,7 +641,7 @@ void V8Debugger::handleProgramBreak(v8::Local<v8::Context> pausedContext,
CHECK(!context.IsEmpty() &&
context != v8::debug::GetDebugContext(m_isolate));
m_inspector->client()->runMessageLoopOnPause(groupId);
- m_runningNestedMessageLoop = false;
+ m_pausedContextGroupId = 0;
}
// The agent may have been removed in the nested loop.
agent = m_inspector->enabledDebuggerAgentForGroup(groupId);
@@ -643,8 +708,7 @@ bool V8Debugger::IsFunctionBlackboxed(v8::Local<v8::debug::Script> script,
end);
}
-void V8Debugger::PromiseEventOccurred(v8::Local<v8::Context> context,
- v8::debug::PromiseDebugActionType type,
+void V8Debugger::PromiseEventOccurred(v8::debug::PromiseDebugActionType type,
int id, int parentId,
bool createdByUser) {
// Async task events from Promises are given misaligned pointers to prevent
@@ -655,10 +719,7 @@ void V8Debugger::PromiseEventOccurred(v8::Local<v8::Context> context,
switch (type) {
case v8::debug::kDebugPromiseCreated:
asyncTaskCreatedForStack(task, parentTask);
- if (createdByUser && parentTask) {
- v8::Context::Scope contextScope(context);
- asyncTaskCandidateForStepping(task);
- }
+ if (createdByUser && parentTask) asyncTaskCandidateForStepping(task);
break;
case v8::debug::kDebugEnqueueAsyncFunction:
asyncTaskScheduledForStack("async function", task, true);
@@ -669,10 +730,6 @@ void V8Debugger::PromiseEventOccurred(v8::Local<v8::Context> context,
case v8::debug::kDebugEnqueuePromiseReject:
asyncTaskScheduledForStack("Promise.reject", task, true);
break;
- case v8::debug::kDebugPromiseCollected:
- asyncTaskCanceledForStack(task);
- asyncTaskCanceledForStepping(task);
- break;
case v8::debug::kDebugWillHandle:
asyncTaskStartedForStack(task);
asyncTaskStartedForStepping(task);
@@ -684,9 +741,16 @@ void V8Debugger::PromiseEventOccurred(v8::Local<v8::Context> context,
}
}
-V8StackTraceImpl* V8Debugger::currentAsyncCallChain() {
- if (!m_currentStacks.size()) return nullptr;
- return m_currentStacks.back().get();
+std::shared_ptr<AsyncStackTrace> V8Debugger::currentAsyncParent() {
+ // TODO(kozyatinskiy): implement creation chain as parent without hack.
+ if (!m_currentAsyncCreation.empty() && m_currentAsyncCreation.back()) {
+ return m_currentAsyncCreation.back();
+ }
+ return m_currentAsyncParent.empty() ? nullptr : m_currentAsyncParent.back();
+}
+
+std::shared_ptr<AsyncStackTrace> V8Debugger::currentAsyncCreation() {
+ return nullptr;
}
void V8Debugger::compileDebuggerScript() {
@@ -827,8 +891,8 @@ v8::MaybeLocal<v8::Array> V8Debugger::internalProperties(
}
std::unique_ptr<V8StackTraceImpl> V8Debugger::createStackTrace(
- v8::Local<v8::StackTrace> stackTrace) {
- return V8StackTraceImpl::create(this, currentContextGroupId(), stackTrace,
+ v8::Local<v8::StackTrace> v8StackTrace) {
+ return V8StackTraceImpl::create(this, currentContextGroupId(), v8StackTrace,
V8StackTraceImpl::maxCallStackSizeToCapture);
}
@@ -849,31 +913,19 @@ void V8Debugger::setAsyncCallStackDepth(V8DebuggerAgentImpl* agent, int depth) {
if (!maxAsyncCallStackDepth) allAsyncTasksCanceled();
}
-void V8Debugger::registerAsyncTaskIfNeeded(void* task) {
- if (m_taskToId.find(task) != m_taskToId.end()) return;
-
- int id = ++m_lastTaskId;
- m_taskToId[task] = id;
- m_idToTask[id] = task;
- if (static_cast<int>(m_idToTask.size()) > m_maxAsyncCallStacks) {
- void* taskToRemove = m_idToTask.begin()->second;
- asyncTaskCanceledForStack(taskToRemove);
- }
-}
-
void V8Debugger::asyncTaskCreatedForStack(void* task, void* parentTask) {
if (!m_maxAsyncCallStackDepth) return;
if (parentTask) m_parentTask[task] = parentTask;
v8::HandleScope scope(m_isolate);
- // We don't need to pass context group id here because we get this callback
- // from V8 for promise events only.
- // Passing one as maxStackSize forces no async chain for the new stack and
- // allows us to not grow exponentially.
- std::unique_ptr<V8StackTraceImpl> creationStack =
- V8StackTraceImpl::capture(this, 0, 1, String16());
- if (creationStack && !creationStack->isEmpty()) {
- m_asyncTaskCreationStacks[task] = std::move(creationStack);
- registerAsyncTaskIfNeeded(task);
+ std::shared_ptr<AsyncStackTrace> asyncCreation =
+ AsyncStackTrace::capture(this, currentContextGroupId(), String16(),
+ V8StackTraceImpl::maxCallStackSizeToCapture);
+ // Passing one as maxStackSize forces no async chain for the new stack.
+ if (asyncCreation && !asyncCreation->isEmpty()) {
+ m_asyncTaskCreationStacks[task] = asyncCreation;
+ m_allAsyncStacks.push_back(std::move(asyncCreation));
+ ++m_asyncStacksCount;
+ collectOldAsyncStacksIfNeeded();
}
}
@@ -902,13 +954,15 @@ void V8Debugger::asyncTaskScheduledForStack(const String16& taskName,
void* task, bool recurring) {
if (!m_maxAsyncCallStackDepth) return;
v8::HandleScope scope(m_isolate);
- std::unique_ptr<V8StackTraceImpl> chain = V8StackTraceImpl::capture(
- this, currentContextGroupId(),
- V8StackTraceImpl::maxCallStackSizeToCapture, taskName);
- if (chain) {
- m_asyncTaskStacks[task] = std::move(chain);
+ std::shared_ptr<AsyncStackTrace> asyncStack =
+ AsyncStackTrace::capture(this, currentContextGroupId(), taskName,
+ V8StackTraceImpl::maxCallStackSizeToCapture);
+ if (asyncStack) {
+ m_asyncTaskStacks[task] = asyncStack;
if (recurring) m_recurringTasks.insert(task);
- registerAsyncTaskIfNeeded(task);
+ m_allAsyncStacks.push_back(std::move(asyncStack));
+ ++m_asyncStacksCount;
+ collectOldAsyncStacksIfNeeded();
}
}
@@ -918,18 +972,10 @@ void V8Debugger::asyncTaskCanceledForStack(void* task) {
m_recurringTasks.erase(task);
m_parentTask.erase(task);
m_asyncTaskCreationStacks.erase(task);
- auto it = m_taskToId.find(task);
- if (it == m_taskToId.end()) return;
- m_idToTask.erase(it->second);
- m_taskToId.erase(it);
}
void V8Debugger::asyncTaskStartedForStack(void* task) {
if (!m_maxAsyncCallStackDepth) return;
- m_currentTasks.push_back(task);
- auto parentIt = m_parentTask.find(task);
- AsyncTaskToStackTrace::iterator stackIt = m_asyncTaskStacks.find(
- parentIt == m_parentTask.end() ? task : parentIt->second);
// Needs to support following order of events:
// - asyncTaskScheduled
// <-- attached here -->
@@ -937,25 +983,40 @@ void V8Debugger::asyncTaskStartedForStack(void* task) {
// - asyncTaskCanceled <-- canceled before finished
// <-- async stack requested here -->
// - asyncTaskFinished
- std::unique_ptr<V8StackTraceImpl> stack;
- if (stackIt != m_asyncTaskStacks.end() && stackIt->second)
- stack = stackIt->second->cloneImpl();
+ m_currentTasks.push_back(task);
+ auto parentIt = m_parentTask.find(task);
+ AsyncTaskToStackTrace::iterator stackIt = m_asyncTaskStacks.find(
+ parentIt == m_parentTask.end() ? task : parentIt->second);
+ if (stackIt != m_asyncTaskStacks.end()) {
+ m_currentAsyncParent.push_back(stackIt->second.lock());
+ } else {
+ m_currentAsyncParent.emplace_back();
+ }
auto itCreation = m_asyncTaskCreationStacks.find(task);
- if (stack && itCreation != m_asyncTaskCreationStacks.end()) {
- stack->setCreation(itCreation->second->cloneImpl());
+ if (itCreation != m_asyncTaskCreationStacks.end()) {
+ m_currentAsyncCreation.push_back(itCreation->second.lock());
+ // TODO(kozyatinskiy): implement it without hack.
+ if (m_currentAsyncParent.back()) {
+ m_currentAsyncCreation.back()->setDescription(
+ m_currentAsyncParent.back()->description());
+ m_currentAsyncParent.back().reset();
+ }
+ } else {
+ m_currentAsyncCreation.emplace_back();
}
- m_currentStacks.push_back(std::move(stack));
}
void V8Debugger::asyncTaskFinishedForStack(void* task) {
if (!m_maxAsyncCallStackDepth) return;
// We could start instrumenting half way and the stack is empty.
- if (!m_currentStacks.size()) return;
-
+ if (!m_currentTasks.size()) return;
DCHECK(m_currentTasks.back() == task);
m_currentTasks.pop_back();
- m_currentStacks.pop_back();
+ DCHECK(m_currentAsyncParent.size() == m_currentAsyncCreation.size());
+ m_currentAsyncParent.pop_back();
+ m_currentAsyncCreation.pop_back();
+
if (m_recurringTasks.find(task) == m_recurringTasks.end()) {
asyncTaskCanceledForStack(task);
}
@@ -992,13 +1053,15 @@ void V8Debugger::asyncTaskCanceledForStepping(void* task) {
void V8Debugger::allAsyncTasksCanceled() {
m_asyncTaskStacks.clear();
m_recurringTasks.clear();
- m_currentStacks.clear();
+ m_currentAsyncParent.clear();
+ m_currentAsyncCreation.clear();
m_currentTasks.clear();
m_parentTask.clear();
m_asyncTaskCreationStacks.clear();
- m_idToTask.clear();
- m_taskToId.clear();
- m_lastTaskId = 0;
+
+ m_framesCache.clear();
+ m_allAsyncStacks.clear();
+ m_asyncStacksCount = 0;
}
void V8Debugger::muteScriptParsedEvents() {
@@ -1018,11 +1081,10 @@ std::unique_ptr<V8StackTraceImpl> V8Debugger::captureStackTrace(
int contextGroupId = currentContextGroupId();
if (!contextGroupId) return nullptr;
- size_t stackSize =
- fullStack ? V8StackTraceImpl::maxCallStackSizeToCapture : 1;
- if (m_inspector->enabledRuntimeAgentForGroup(contextGroupId))
+ int stackSize = 1;
+ if (fullStack || m_inspector->enabledRuntimeAgentForGroup(contextGroupId)) {
stackSize = V8StackTraceImpl::maxCallStackSizeToCapture;
-
+ }
return V8StackTraceImpl::capture(this, contextGroupId, stackSize);
}
@@ -1031,4 +1093,68 @@ int V8Debugger::currentContextGroupId() {
return m_inspector->contextGroupId(m_isolate->GetCurrentContext());
}
+void V8Debugger::collectOldAsyncStacksIfNeeded() {
+ if (m_asyncStacksCount <= m_maxAsyncCallStacks) return;
+ int halfOfLimitRoundedUp =
+ m_maxAsyncCallStacks / 2 + m_maxAsyncCallStacks % 2;
+ while (m_asyncStacksCount > halfOfLimitRoundedUp) {
+ m_allAsyncStacks.pop_front();
+ --m_asyncStacksCount;
+ }
+ cleanupExpiredWeakPointers(m_asyncTaskStacks);
+ cleanupExpiredWeakPointers(m_asyncTaskCreationStacks);
+ for (auto it = m_recurringTasks.begin(); it != m_recurringTasks.end();) {
+ if (m_asyncTaskStacks.find(*it) == m_asyncTaskStacks.end()) {
+ it = m_recurringTasks.erase(it);
+ } else {
+ ++it;
+ }
+ }
+ for (auto it = m_parentTask.begin(); it != m_parentTask.end();) {
+ if (m_asyncTaskCreationStacks.find(it->second) ==
+ m_asyncTaskCreationStacks.end() &&
+ m_asyncTaskStacks.find(it->second) == m_asyncTaskStacks.end()) {
+ it = m_parentTask.erase(it);
+ } else {
+ ++it;
+ }
+ }
+ cleanupExpiredWeakPointers(m_framesCache);
+}
+
+std::shared_ptr<StackFrame> V8Debugger::symbolize(
+ v8::Local<v8::StackFrame> v8Frame) {
+ auto it = m_framesCache.end();
+ int frameId = 0;
+ if (m_maxAsyncCallStackDepth) {
+ frameId = v8::debug::GetStackFrameId(v8Frame);
+ it = m_framesCache.find(frameId);
+ }
+ if (it != m_framesCache.end() && it->second.lock()) return it->second.lock();
+ std::shared_ptr<StackFrame> frame(new StackFrame(v8Frame));
+ // TODO(clemensh): Figure out a way to do this translation only right before
+ // sending the stack trace over wire.
+ if (v8Frame->IsWasm()) frame->translate(&m_wasmTranslation);
+ if (m_maxAsyncCallStackDepth) {
+ m_framesCache[frameId] = frame;
+ }
+ return frame;
+}
+
+void V8Debugger::setMaxAsyncTaskStacksForTest(int limit) {
+ m_maxAsyncCallStacks = 0;
+ collectOldAsyncStacksIfNeeded();
+ m_maxAsyncCallStacks = limit;
+}
+
+void V8Debugger::dumpAsyncTaskStacksStateForTest() {
+ fprintf(stdout, "Async stacks count: %d\n", m_asyncStacksCount);
+ fprintf(stdout, "Scheduled async tasks: %zu\n", m_asyncTaskStacks.size());
+ fprintf(stdout, "Created async tasks: %zu\n",
+ m_asyncTaskCreationStacks.size());
+ fprintf(stdout, "Async tasks with parent: %zu\n", m_parentTask.size());
+ fprintf(stdout, "Recurring async tasks: %zu\n", m_recurringTasks.size());
+ fprintf(stdout, "\n");
+}
+
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-debugger.h b/deps/v8/src/inspector/v8-debugger.h
index 83cc8afb0d..a15c288c0d 100644
--- a/deps/v8/src/inspector/v8-debugger.h
+++ b/deps/v8/src/inspector/v8-debugger.h
@@ -5,6 +5,7 @@
#ifndef V8_INSPECTOR_V8DEBUGGER_H_
#define V8_INSPECTOR_V8DEBUGGER_H_
+#include <list>
#include <vector>
#include "src/base/macros.h"
@@ -20,7 +21,10 @@
namespace v8_inspector {
+class AsyncStackTrace;
struct ScriptBreakpoint;
+class StackFrame;
+class V8Debugger;
class V8DebuggerAgentImpl;
class V8InspectorImpl;
class V8StackTraceImpl;
@@ -35,6 +39,7 @@ class V8Debugger : public v8::debug::DebugDelegate {
~V8Debugger();
bool enabled() const;
+ v8::Isolate* isolate() const { return m_isolate; }
String16 setBreakpoint(const ScriptBreakpoint&, int* actualLineNumber,
int* actualColumnNumber);
@@ -46,7 +51,7 @@ class V8Debugger : public v8::debug::DebugDelegate {
void setPauseOnExceptionsState(v8::debug::ExceptionBreakState);
bool canBreakProgram();
bool breakProgram(int targetContextGroupId);
- void continueProgram();
+ void continueProgram(int targetContextGroupId);
void setPauseOnNextStatement(bool, int targetContextGroupId);
void stepIntoStatement(int targetContextGroupId);
@@ -56,6 +61,10 @@ class V8Debugger : public v8::debug::DebugDelegate {
std::unique_ptr<ScheduleStepIntoAsyncCallback> callback,
int targetContextGroupId);
+ Response continueToLocation(int targetContextGroupId,
+ std::unique_ptr<protocol::Debugger::Location>,
+ const String16& targetCallFramess);
+
Response setScriptSource(
const String16& sourceID, v8::Local<v8::String> newSource, bool dryRun,
protocol::Maybe<protocol::Runtime::ExceptionDetails>*,
@@ -72,12 +81,17 @@ class V8Debugger : public v8::debug::DebugDelegate {
void enable();
void disable();
- bool isPaused() const { return m_runningNestedMessageLoop; }
+ bool isPaused() const { return m_pausedContextGroupId; }
v8::Local<v8::Context> pausedContext() { return m_pausedContext; }
int maxAsyncCallChainDepth() { return m_maxAsyncCallStackDepth; }
- V8StackTraceImpl* currentAsyncCallChain();
void setAsyncCallStackDepth(V8DebuggerAgentImpl*, int);
+
+ std::shared_ptr<AsyncStackTrace> currentAsyncParent();
+ std::shared_ptr<AsyncStackTrace> currentAsyncCreation();
+
+ std::shared_ptr<StackFrame> symbolize(v8::Local<v8::StackFrame> v8Frame);
+
std::unique_ptr<V8StackTraceImpl> createStackTrace(v8::Local<v8::StackTrace>);
std::unique_ptr<V8StackTraceImpl> captureStackTrace(bool fullStack);
@@ -98,7 +112,8 @@ class V8Debugger : public v8::debug::DebugDelegate {
WasmTranslation* wasmTranslation() { return &m_wasmTranslation; }
- void setMaxAsyncTaskStacksForTest(int limit) { m_maxAsyncCallStacks = limit; }
+ void setMaxAsyncTaskStacksForTest(int limit);
+ void dumpAsyncTaskStacksStateForTest();
private:
void compileDebuggerScript();
@@ -108,6 +123,8 @@ class V8Debugger : public v8::debug::DebugDelegate {
bool catchExceptions);
v8::Local<v8::Context> debuggerContext() const;
void clearBreakpoints();
+ void clearContinueToLocation();
+ bool shouldContinueToCurrentLocation();
static void v8OOMCallback(void* data);
@@ -143,11 +160,8 @@ class V8Debugger : public v8::debug::DebugDelegate {
void asyncTaskFinishedForStepping(void* task);
void asyncTaskCanceledForStepping(void* task);
- void registerAsyncTaskIfNeeded(void* task);
-
// v8::debug::DebugEventListener implementation.
- void PromiseEventOccurred(v8::Local<v8::Context> context,
- v8::debug::PromiseDebugActionType type, int id,
+ void PromiseEventOccurred(v8::debug::PromiseDebugActionType type, int id,
int parentId, bool createdByUser) override;
void ScriptCompiled(v8::Local<v8::debug::Script> script,
bool has_compile_error) override;
@@ -172,26 +186,36 @@ class V8Debugger : public v8::debug::DebugDelegate {
v8::Global<v8::Context> m_debuggerContext;
v8::Local<v8::Object> m_executionState;
v8::Local<v8::Context> m_pausedContext;
- bool m_runningNestedMessageLoop;
int m_ignoreScriptParsedEventsCounter;
bool m_scheduledOOMBreak = false;
int m_targetContextGroupId = 0;
+ int m_pausedContextGroupId = 0;
+ String16 m_continueToLocationBreakpointId;
+ String16 m_continueToLocationTargetCallFrames;
+ std::unique_ptr<V8StackTraceImpl> m_continueToLocationStack;
using AsyncTaskToStackTrace =
- protocol::HashMap<void*, std::unique_ptr<V8StackTraceImpl>>;
+ protocol::HashMap<void*, std::weak_ptr<AsyncStackTrace>>;
AsyncTaskToStackTrace m_asyncTaskStacks;
AsyncTaskToStackTrace m_asyncTaskCreationStacks;
- int m_maxAsyncCallStacks;
- std::map<int, void*> m_idToTask;
- std::unordered_map<void*, int> m_taskToId;
- int m_lastTaskId;
protocol::HashSet<void*> m_recurringTasks;
+ protocol::HashMap<void*, void*> m_parentTask;
+
+ int m_maxAsyncCallStacks;
int m_maxAsyncCallStackDepth;
+
std::vector<void*> m_currentTasks;
- std::vector<std::unique_ptr<V8StackTraceImpl>> m_currentStacks;
+ std::vector<std::shared_ptr<AsyncStackTrace>> m_currentAsyncParent;
+ std::vector<std::shared_ptr<AsyncStackTrace>> m_currentAsyncCreation;
+
+ void collectOldAsyncStacksIfNeeded();
+ int m_asyncStacksCount = 0;
+ // V8Debugger owns all the async stacks, while most of the other references
+ // are weak, which allows to collect some stacks when there are too many.
+ std::list<std::shared_ptr<AsyncStackTrace>> m_allAsyncStacks;
+ std::map<int, std::weak_ptr<StackFrame>> m_framesCache;
+
protocol::HashMap<V8DebuggerAgentImpl*, int> m_maxAsyncCallStackDepthMap;
- protocol::HashMap<void*, void*> m_parentTask;
- protocol::HashMap<void*, void*> m_firstNextTask;
void* m_taskWithScheduledBreak = nullptr;
std::unique_ptr<ScheduleStepIntoAsyncCallback> m_stepIntoAsyncCallback;
diff --git a/deps/v8/src/inspector/v8-function-call.cc b/deps/v8/src/inspector/v8-function-call.cc
index b8c86d3da0..0fcca70cb7 100644
--- a/deps/v8/src/inspector/v8-function-call.cc
+++ b/deps/v8/src/inspector/v8-function-call.cc
@@ -75,6 +75,8 @@ v8::Local<v8::Value> V8FunctionCall::call(bool& hadException,
}
v8::Local<v8::Value> V8FunctionCall::callWithoutExceptionHandling() {
+ v8::Context::Scope contextScope(m_context);
+
v8::Local<v8::Object> thisObject = v8::Local<v8::Object>::Cast(m_value);
v8::Local<v8::Value> value;
if (!thisObject->Get(m_context, m_name).ToLocal(&value))
diff --git a/deps/v8/src/inspector/v8-inspector-impl.cc b/deps/v8/src/inspector/v8-inspector-impl.cc
index 705fd793de..3c55507c5a 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-impl.cc
@@ -56,9 +56,13 @@ V8InspectorImpl::V8InspectorImpl(v8::Isolate* isolate,
m_debugger(new V8Debugger(isolate, this)),
m_capturingStackTracesCount(0),
m_lastExceptionId(0),
- m_lastContextId(0) {}
+ m_lastContextId(0) {
+ v8::debug::SetConsoleDelegate(m_isolate, console());
+}
-V8InspectorImpl::~V8InspectorImpl() {}
+V8InspectorImpl::~V8InspectorImpl() {
+ v8::debug::SetConsoleDelegate(m_isolate, nullptr);
+}
int V8InspectorImpl::contextGroupId(v8::Local<v8::Context> context) {
return contextGroupId(InspectedContext::contextId(context));
diff --git a/deps/v8/src/inspector/v8-regex.cc b/deps/v8/src/inspector/v8-regex.cc
index 47af70d360..0bab4364c4 100644
--- a/deps/v8/src/inspector/v8-regex.cc
+++ b/deps/v8/src/inspector/v8-regex.cc
@@ -49,6 +49,7 @@ int V8Regex::match(const String16& string, int startFrom,
v8::Isolate* isolate = m_inspector->isolate();
v8::HandleScope handleScope(isolate);
v8::Local<v8::Context> context = m_inspector->regexContext();
+ v8::Context::Scope contextScope(context);
v8::MicrotasksScope microtasks(isolate,
v8::MicrotasksScope::kDoNotRunMicrotasks);
v8::TryCatch tryCatch(isolate);
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.cc b/deps/v8/src/inspector/v8-stack-trace-impl.cc
index dddad36202..9db6b47caf 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.cc
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.cc
@@ -4,12 +4,10 @@
#include "src/inspector/v8-stack-trace-impl.h"
-#include "src/inspector/string-util.h"
-#include "src/inspector/v8-debugger-agent-impl.h"
-#include "src/inspector/v8-debugger.h"
-#include "src/inspector/v8-inspector-impl.h"
+#include <algorithm>
-#include "include/v8-version.h"
+#include "src/inspector/v8-debugger.h"
+#include "src/inspector/wasm-translation.h"
namespace v8_inspector {
@@ -17,270 +15,212 @@ namespace {
static const v8::StackTrace::StackTraceOptions stackTraceOptions =
static_cast<v8::StackTrace::StackTraceOptions>(
- v8::StackTrace::kLineNumber | v8::StackTrace::kColumnOffset |
- v8::StackTrace::kScriptId | v8::StackTrace::kScriptNameOrSourceURL |
- v8::StackTrace::kFunctionName |
+ v8::StackTrace::kDetailed |
v8::StackTrace::kExposeFramesAcrossSecurityOrigins);
-V8StackTraceImpl::Frame toFrame(v8::Local<v8::StackFrame> frame,
- WasmTranslation* wasmTranslation,
- int contextGroupId) {
- String16 scriptId = String16::fromInteger(frame->GetScriptId());
- String16 sourceName;
- v8::Local<v8::String> sourceNameValue(frame->GetScriptNameOrSourceURL());
- if (!sourceNameValue.IsEmpty())
- sourceName = toProtocolString(sourceNameValue);
-
- String16 functionName;
- v8::Local<v8::String> functionNameValue(frame->GetFunctionName());
- if (!functionNameValue.IsEmpty())
- functionName = toProtocolString(functionNameValue);
-
- int sourceLineNumber = frame->GetLineNumber() - 1;
- int sourceColumn = frame->GetColumn() - 1;
- // TODO(clemensh): Figure out a way to do this translation only right before
- // sending the stack trace over wire.
- if (frame->IsWasm()) {
- wasmTranslation->TranslateWasmScriptLocationToProtocolLocation(
- &scriptId, &sourceLineNumber, &sourceColumn);
+std::vector<std::shared_ptr<StackFrame>> toFramesVector(
+ V8Debugger* debugger, v8::Local<v8::StackTrace> v8StackTrace,
+ int maxStackSize) {
+ DCHECK(debugger->isolate()->InContext());
+ int frameCount = std::min(v8StackTrace->GetFrameCount(), maxStackSize);
+ std::vector<std::shared_ptr<StackFrame>> frames;
+ for (int i = 0; i < frameCount; ++i) {
+ frames.push_back(debugger->symbolize(v8StackTrace->GetFrame(i)));
}
- return V8StackTraceImpl::Frame(functionName, scriptId, sourceName,
- sourceLineNumber + 1, sourceColumn + 1);
+ return frames;
}
-void toFramesVector(v8::Local<v8::StackTrace> stackTrace,
- std::vector<V8StackTraceImpl::Frame>& frames,
- size_t maxStackSize, v8::Isolate* isolate,
- V8Debugger* debugger, int contextGroupId) {
- DCHECK(isolate->InContext());
- int frameCount = stackTrace->GetFrameCount();
- if (frameCount > static_cast<int>(maxStackSize))
- frameCount = static_cast<int>(maxStackSize);
- WasmTranslation* wasmTranslation = debugger->wasmTranslation();
- for (int i = 0; i < frameCount; i++) {
- v8::Local<v8::StackFrame> stackFrame = stackTrace->GetFrame(i);
- frames.push_back(toFrame(stackFrame, wasmTranslation, contextGroupId));
+void calculateAsyncChain(V8Debugger* debugger, int contextGroupId,
+ std::shared_ptr<AsyncStackTrace>* asyncParent,
+ std::shared_ptr<AsyncStackTrace>* asyncCreation,
+ int* maxAsyncDepth) {
+ *asyncParent = debugger->currentAsyncParent();
+ *asyncCreation = debugger->currentAsyncCreation();
+ if (maxAsyncDepth) *maxAsyncDepth = debugger->maxAsyncCallChainDepth();
+
+ DCHECK(!*asyncParent || !*asyncCreation ||
+ (*asyncParent)->contextGroupId() ==
+ (*asyncCreation)->contextGroupId());
+ // Do not accidentally append async call chain from another group. This should
+ // not happen if we have proper instrumentation, but let's double-check to be
+ // safe.
+ if (contextGroupId && *asyncParent &&
+ (*asyncParent)->contextGroupId() != contextGroupId) {
+ asyncParent->reset();
+ asyncCreation->reset();
+ if (maxAsyncDepth) *maxAsyncDepth = 0;
+ return;
+ }
+
+ // Only the top stack in the chain may be empty and doesn't contain creation
+ // stack, so ensure that second stack is non-empty (it's the top of appended
+ // chain).
+ if (*asyncParent && !(*asyncCreation) && !(*asyncParent)->creation().lock() &&
+ (*asyncParent)->isEmpty()) {
+ *asyncParent = (*asyncParent)->parent().lock();
}
}
+std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectCommon(
+ const std::vector<std::shared_ptr<StackFrame>>& frames,
+ const String16& description,
+ const std::shared_ptr<AsyncStackTrace>& asyncParent,
+ const std::shared_ptr<AsyncStackTrace>& asyncCreation, int maxAsyncDepth) {
+ if (asyncParent && frames.empty() &&
+ description == asyncParent->description() && !asyncCreation) {
+ return asyncParent->buildInspectorObject(nullptr, maxAsyncDepth);
+ }
+
+ std::unique_ptr<protocol::Array<protocol::Runtime::CallFrame>>
+ inspectorFrames = protocol::Array<protocol::Runtime::CallFrame>::create();
+ for (size_t i = 0; i < frames.size(); i++) {
+ inspectorFrames->addItem(frames[i]->buildInspectorObject());
+ }
+ std::unique_ptr<protocol::Runtime::StackTrace> stackTrace =
+ protocol::Runtime::StackTrace::create()
+ .setCallFrames(std::move(inspectorFrames))
+ .build();
+ if (!description.isEmpty()) stackTrace->setDescription(description);
+ if (asyncParent && maxAsyncDepth > 0) {
+ stackTrace->setParent(asyncParent->buildInspectorObject(asyncCreation.get(),
+ maxAsyncDepth - 1));
+ }
+ return stackTrace;
+}
+
} // namespace
-V8StackTraceImpl::Frame::Frame()
- : m_functionName("undefined"),
- m_scriptId(""),
- m_scriptName("undefined"),
- m_lineNumber(0),
- m_columnNumber(0) {}
-
-V8StackTraceImpl::Frame::Frame(const String16& functionName,
- const String16& scriptId,
- const String16& scriptName, int lineNumber,
- int column)
- : m_functionName(functionName),
- m_scriptId(scriptId),
- m_scriptName(scriptName),
- m_lineNumber(lineNumber),
- m_columnNumber(column) {
- DCHECK(m_lineNumber != v8::Message::kNoLineNumberInfo);
- DCHECK(m_columnNumber != v8::Message::kNoColumnInfo);
+StackFrame::StackFrame(v8::Local<v8::StackFrame> v8Frame)
+ : m_functionName(toProtocolString(v8Frame->GetFunctionName())),
+ m_scriptId(String16::fromInteger(v8Frame->GetScriptId())),
+ m_sourceURL(toProtocolString(v8Frame->GetScriptNameOrSourceURL())),
+ m_lineNumber(v8Frame->GetLineNumber() - 1),
+ m_columnNumber(v8Frame->GetColumn() - 1) {
+ DCHECK(m_lineNumber + 1 != v8::Message::kNoLineNumberInfo);
+ DCHECK(m_columnNumber + 1 != v8::Message::kNoColumnInfo);
}
-V8StackTraceImpl::Frame::~Frame() {}
+void StackFrame::translate(WasmTranslation* wasmTranslation) {
+ wasmTranslation->TranslateWasmScriptLocationToProtocolLocation(
+ &m_scriptId, &m_lineNumber, &m_columnNumber);
+}
+
+const String16& StackFrame::functionName() const { return m_functionName; }
+
+const String16& StackFrame::scriptId() const { return m_scriptId; }
-// buildInspectorObject() and SourceLocation's toTracedValue() should set the
-// same fields.
-// If either of them is modified, the other should be also modified.
-std::unique_ptr<protocol::Runtime::CallFrame>
-V8StackTraceImpl::Frame::buildInspectorObject() const {
+const String16& StackFrame::sourceURL() const { return m_sourceURL; }
+
+int StackFrame::lineNumber() const { return m_lineNumber; }
+
+int StackFrame::columnNumber() const { return m_columnNumber; }
+
+std::unique_ptr<protocol::Runtime::CallFrame> StackFrame::buildInspectorObject()
+ const {
return protocol::Runtime::CallFrame::create()
.setFunctionName(m_functionName)
.setScriptId(m_scriptId)
- .setUrl(m_scriptName)
- .setLineNumber(m_lineNumber - 1)
- .setColumnNumber(m_columnNumber - 1)
+ .setUrl(m_sourceURL)
+ .setLineNumber(m_lineNumber)
+ .setColumnNumber(m_columnNumber)
.build();
}
-V8StackTraceImpl::Frame V8StackTraceImpl::Frame::clone() const {
- return Frame(m_functionName, m_scriptId, m_scriptName, m_lineNumber,
- m_columnNumber);
+bool StackFrame::isEqual(StackFrame* frame) const {
+ return m_scriptId == frame->m_scriptId &&
+ m_lineNumber == frame->m_lineNumber &&
+ m_columnNumber == frame->m_columnNumber;
}
// static
void V8StackTraceImpl::setCaptureStackTraceForUncaughtExceptions(
v8::Isolate* isolate, bool capture) {
isolate->SetCaptureStackTraceForUncaughtExceptions(
- capture, V8StackTraceImpl::maxCallStackSizeToCapture, stackTraceOptions);
+ capture, V8StackTraceImpl::maxCallStackSizeToCapture);
}
// static
std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::create(
V8Debugger* debugger, int contextGroupId,
- v8::Local<v8::StackTrace> stackTrace, size_t maxStackSize,
- const String16& description) {
+ v8::Local<v8::StackTrace> v8StackTrace, int maxStackSize) {
DCHECK(debugger);
- v8::Isolate* isolate = debugger->inspector()->isolate();
- v8::HandleScope scope(isolate);
- std::vector<V8StackTraceImpl::Frame> frames;
- if (!stackTrace.IsEmpty())
- toFramesVector(stackTrace, frames, maxStackSize, isolate, debugger,
- contextGroupId);
-
- int maxAsyncCallChainDepth = 1;
- V8StackTraceImpl* asyncCallChain = nullptr;
- if (maxStackSize > 1) {
- asyncCallChain = debugger->currentAsyncCallChain();
- maxAsyncCallChainDepth = debugger->maxAsyncCallChainDepth();
- }
- // Do not accidentally append async call chain from another group. This should
- // not
- // happen if we have proper instrumentation, but let's double-check to be
- // safe.
- if (contextGroupId && asyncCallChain && asyncCallChain->m_contextGroupId &&
- asyncCallChain->m_contextGroupId != contextGroupId) {
- asyncCallChain = nullptr;
- maxAsyncCallChainDepth = 1;
- }
-
- // Only the top stack in the chain may be empty and doesn't contain creation
- // stack , so ensure that second stack is non-empty (it's the top of appended
- // chain).
- if (asyncCallChain && asyncCallChain->isEmpty() &&
- !asyncCallChain->m_creation) {
- asyncCallChain = asyncCallChain->m_parent.get();
- }
-
- if (stackTrace.IsEmpty() && !asyncCallChain) return nullptr;
- std::unique_ptr<V8StackTraceImpl> result(new V8StackTraceImpl(
- contextGroupId, description, frames,
- asyncCallChain ? asyncCallChain->cloneImpl() : nullptr));
+ v8::Isolate* isolate = debugger->isolate();
+ v8::HandleScope scope(isolate);
- // Crop to not exceed maxAsyncCallChainDepth.
- V8StackTraceImpl* deepest = result.get();
- while (deepest && maxAsyncCallChainDepth) {
- deepest = deepest->m_parent.get();
- maxAsyncCallChainDepth--;
+ std::vector<std::shared_ptr<StackFrame>> frames;
+ if (!v8StackTrace.IsEmpty() && v8StackTrace->GetFrameCount()) {
+ frames = toFramesVector(debugger, v8StackTrace, maxStackSize);
}
- if (deepest) deepest->m_parent.reset();
- return result;
+ int maxAsyncDepth = 0;
+ std::shared_ptr<AsyncStackTrace> asyncParent;
+ std::shared_ptr<AsyncStackTrace> asyncCreation;
+ calculateAsyncChain(debugger, contextGroupId, &asyncParent, &asyncCreation,
+ &maxAsyncDepth);
+ if (frames.empty() && !asyncCreation && !asyncParent) return nullptr;
+ return std::unique_ptr<V8StackTraceImpl>(new V8StackTraceImpl(
+ std::move(frames), maxAsyncDepth, asyncParent, asyncCreation));
}
// static
std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::capture(
- V8Debugger* debugger, int contextGroupId, size_t maxStackSize,
- const String16& description) {
+ V8Debugger* debugger, int contextGroupId, int maxStackSize) {
DCHECK(debugger);
- v8::Isolate* isolate = debugger->inspector()->isolate();
+ v8::Isolate* isolate = debugger->isolate();
v8::HandleScope handleScope(isolate);
- v8::Local<v8::StackTrace> stackTrace;
+ v8::Local<v8::StackTrace> v8StackTrace;
if (isolate->InContext()) {
- stackTrace = v8::StackTrace::CurrentStackTrace(
- isolate, static_cast<int>(maxStackSize), stackTraceOptions);
+ v8StackTrace = v8::StackTrace::CurrentStackTrace(isolate, maxStackSize,
+ stackTraceOptions);
}
- return V8StackTraceImpl::create(debugger, contextGroupId, stackTrace,
- maxStackSize, description);
+ return V8StackTraceImpl::create(debugger, contextGroupId, v8StackTrace,
+ maxStackSize);
}
-std::unique_ptr<V8StackTraceImpl> V8StackTraceImpl::cloneImpl() {
- std::vector<Frame> framesCopy(m_frames);
- std::unique_ptr<V8StackTraceImpl> copy(
- new V8StackTraceImpl(m_contextGroupId, m_description, framesCopy,
- m_parent ? m_parent->cloneImpl() : nullptr));
- if (m_creation) copy->setCreation(m_creation->cloneImpl());
- return copy;
-}
-
-std::unique_ptr<V8StackTrace> V8StackTraceImpl::clone() {
- std::vector<Frame> frames;
- for (size_t i = 0; i < m_frames.size(); i++)
- frames.push_back(m_frames.at(i).clone());
- return std::unique_ptr<V8StackTraceImpl>(
- new V8StackTraceImpl(m_contextGroupId, m_description, frames, nullptr));
-}
-
-V8StackTraceImpl::V8StackTraceImpl(int contextGroupId,
- const String16& description,
- std::vector<Frame>& frames,
- std::unique_ptr<V8StackTraceImpl> parent)
- : m_contextGroupId(contextGroupId),
- m_description(description),
- m_parent(std::move(parent)) {
- m_frames.swap(frames);
-}
+V8StackTraceImpl::V8StackTraceImpl(
+ std::vector<std::shared_ptr<StackFrame>> frames, int maxAsyncDepth,
+ std::shared_ptr<AsyncStackTrace> asyncParent,
+ std::shared_ptr<AsyncStackTrace> asyncCreation)
+ : m_frames(std::move(frames)),
+ m_maxAsyncDepth(maxAsyncDepth),
+ m_asyncParent(asyncParent),
+ m_asyncCreation(asyncCreation) {}
V8StackTraceImpl::~V8StackTraceImpl() {}
-void V8StackTraceImpl::setCreation(std::unique_ptr<V8StackTraceImpl> creation) {
- m_creation = std::move(creation);
- // When async call chain is empty but doesn't contain useful schedule stack
- // and parent async call chain contains creationg stack but doesn't
- // synchronous we can merge them together.
- // e.g. Promise ThenableJob.
- if (m_parent && isEmpty() && m_description == m_parent->m_description &&
- !m_parent->m_creation) {
- m_frames.swap(m_parent->m_frames);
- m_parent = std::move(m_parent->m_parent);
- }
+std::unique_ptr<V8StackTrace> V8StackTraceImpl::clone() {
+ return std::unique_ptr<V8StackTrace>(
+ new V8StackTraceImpl(m_frames, 0, std::shared_ptr<AsyncStackTrace>(),
+ std::shared_ptr<AsyncStackTrace>()));
}
+bool V8StackTraceImpl::isEmpty() const { return m_frames.empty(); }
+
StringView V8StackTraceImpl::topSourceURL() const {
- DCHECK(m_frames.size());
- return toStringView(m_frames[0].m_scriptName);
+ return toStringView(m_frames[0]->sourceURL());
}
int V8StackTraceImpl::topLineNumber() const {
- DCHECK(m_frames.size());
- return m_frames[0].m_lineNumber;
+ return m_frames[0]->lineNumber() + 1;
}
int V8StackTraceImpl::topColumnNumber() const {
- DCHECK(m_frames.size());
- return m_frames[0].m_columnNumber;
-}
-
-StringView V8StackTraceImpl::topFunctionName() const {
- DCHECK(m_frames.size());
- return toStringView(m_frames[0].m_functionName);
+ return m_frames[0]->columnNumber() + 1;
}
StringView V8StackTraceImpl::topScriptId() const {
- DCHECK(m_frames.size());
- return toStringView(m_frames[0].m_scriptId);
+ return toStringView(m_frames[0]->scriptId());
}
-std::unique_ptr<protocol::Runtime::StackTrace>
-V8StackTraceImpl::buildInspectorObjectImpl() const {
- std::unique_ptr<protocol::Array<protocol::Runtime::CallFrame>> frames =
- protocol::Array<protocol::Runtime::CallFrame>::create();
- for (size_t i = 0; i < m_frames.size(); i++)
- frames->addItem(m_frames.at(i).buildInspectorObject());
-
- std::unique_ptr<protocol::Runtime::StackTrace> stackTrace =
- protocol::Runtime::StackTrace::create()
- .setCallFrames(std::move(frames))
- .build();
- if (!m_description.isEmpty()) stackTrace->setDescription(m_description);
- if (m_parent) stackTrace->setParent(m_parent->buildInspectorObjectImpl());
- if (m_creation && m_creation->m_frames.size()) {
- stackTrace->setPromiseCreationFrame(
- m_creation->m_frames[0].buildInspectorObject());
- }
- return stackTrace;
+StringView V8StackTraceImpl::topFunctionName() const {
+ return toStringView(m_frames[0]->functionName());
}
std::unique_ptr<protocol::Runtime::StackTrace>
-V8StackTraceImpl::buildInspectorObjectForTail(V8Debugger* debugger) const {
- DCHECK(debugger);
- v8::HandleScope handleScope(debugger->inspector()->isolate());
- // Next call collapses possible empty stack and ensures
- // maxAsyncCallChainDepth.
- std::unique_ptr<V8StackTraceImpl> fullChain = V8StackTraceImpl::create(
- debugger, m_contextGroupId, v8::Local<v8::StackTrace>(),
- V8StackTraceImpl::maxCallStackSizeToCapture);
- if (!fullChain || !fullChain->m_parent) return nullptr;
- return fullChain->m_parent->buildInspectorObjectImpl();
+V8StackTraceImpl::buildInspectorObjectImpl() const {
+ return buildInspectorObjectCommon(m_frames, String16(), m_asyncParent.lock(),
+ m_asyncCreation.lock(), m_maxAsyncDepth);
}
std::unique_ptr<protocol::Runtime::API::StackTrace>
@@ -291,20 +231,139 @@ V8StackTraceImpl::buildInspectorObject() const {
std::unique_ptr<StringBuffer> V8StackTraceImpl::toString() const {
String16Builder stackTrace;
for (size_t i = 0; i < m_frames.size(); ++i) {
- const Frame& frame = m_frames[i];
+ const StackFrame& frame = *m_frames[i];
stackTrace.append("\n at " + (frame.functionName().length()
? frame.functionName()
: "(anonymous function)"));
stackTrace.append(" (");
stackTrace.append(frame.sourceURL());
stackTrace.append(':');
- stackTrace.append(String16::fromInteger(frame.lineNumber()));
+ stackTrace.append(String16::fromInteger(frame.lineNumber() + 1));
stackTrace.append(':');
- stackTrace.append(String16::fromInteger(frame.columnNumber()));
+ stackTrace.append(String16::fromInteger(frame.columnNumber() + 1));
stackTrace.append(')');
}
String16 string = stackTrace.toString();
return StringBufferImpl::adopt(string);
}
+bool V8StackTraceImpl::isEqualIgnoringTopFrame(
+ V8StackTraceImpl* stackTrace) const {
+ StackFrameIterator current(this);
+ StackFrameIterator target(stackTrace);
+
+ current.next();
+ target.next();
+ while (!current.done() && !target.done()) {
+ if (!current.frame()->isEqual(target.frame())) {
+ return false;
+ }
+ current.next();
+ target.next();
+ }
+ return current.done() == target.done();
+}
+
+V8StackTraceImpl::StackFrameIterator::StackFrameIterator(
+ const V8StackTraceImpl* stackTrace)
+ : m_currentIt(stackTrace->m_frames.begin()),
+ m_currentEnd(stackTrace->m_frames.end()),
+ m_parent(stackTrace->m_asyncParent.lock().get()) {}
+
+void V8StackTraceImpl::StackFrameIterator::next() {
+ if (m_currentIt == m_currentEnd) return;
+ ++m_currentIt;
+ while (m_currentIt == m_currentEnd && m_parent) {
+ const std::vector<std::shared_ptr<StackFrame>>& frames = m_parent->frames();
+ m_currentIt = frames.begin();
+ if (m_parent->description() == "async function") ++m_currentIt;
+ m_currentEnd = frames.end();
+ m_parent = m_parent->parent().lock().get();
+ }
+}
+
+bool V8StackTraceImpl::StackFrameIterator::done() {
+ return m_currentIt == m_currentEnd;
+}
+
+StackFrame* V8StackTraceImpl::StackFrameIterator::frame() {
+ return m_currentIt->get();
+}
+
+// static
+std::shared_ptr<AsyncStackTrace> AsyncStackTrace::capture(
+ V8Debugger* debugger, int contextGroupId, const String16& description,
+ int maxStackSize) {
+ DCHECK(debugger);
+
+ v8::Isolate* isolate = debugger->isolate();
+ v8::HandleScope handleScope(isolate);
+
+ std::vector<std::shared_ptr<StackFrame>> frames;
+ if (isolate->InContext()) {
+ v8::Local<v8::StackTrace> v8StackTrace = v8::StackTrace::CurrentStackTrace(
+ isolate, maxStackSize, stackTraceOptions);
+ frames = toFramesVector(debugger, v8StackTrace, maxStackSize);
+ }
+
+ std::shared_ptr<AsyncStackTrace> asyncParent;
+ std::shared_ptr<AsyncStackTrace> asyncCreation;
+ calculateAsyncChain(debugger, contextGroupId, &asyncParent, &asyncCreation,
+ nullptr);
+
+ if (frames.empty() && !asyncCreation && !asyncParent) return nullptr;
+
+ // When async call chain is empty but doesn't contain useful schedule stack
+ // and parent async call chain contains creationg stack but doesn't
+ // synchronous we can merge them together.
+ // e.g. Promise ThenableJob.
+ if (asyncParent && frames.empty() &&
+ asyncParent->m_description == description && !asyncCreation) {
+ return asyncParent;
+ }
+
+ DCHECK(contextGroupId || asyncParent);
+ if (!contextGroupId && asyncParent) {
+ contextGroupId = asyncParent->m_contextGroupId;
+ }
+ return std::shared_ptr<AsyncStackTrace>(
+ new AsyncStackTrace(contextGroupId, description, std::move(frames),
+ asyncParent, asyncCreation));
+}
+
+AsyncStackTrace::AsyncStackTrace(
+ int contextGroupId, const String16& description,
+ std::vector<std::shared_ptr<StackFrame>> frames,
+ std::shared_ptr<AsyncStackTrace> asyncParent,
+ std::shared_ptr<AsyncStackTrace> asyncCreation)
+ : m_contextGroupId(contextGroupId),
+ m_description(description),
+ m_frames(std::move(frames)),
+ m_asyncParent(asyncParent),
+ m_asyncCreation(asyncCreation) {
+ DCHECK(m_contextGroupId);
+}
+
+std::unique_ptr<protocol::Runtime::StackTrace>
+AsyncStackTrace::buildInspectorObject(AsyncStackTrace* asyncCreation,
+ int maxAsyncDepth) const {
+ return buildInspectorObjectCommon(m_frames, m_description,
+ m_asyncParent.lock(),
+ m_asyncCreation.lock(), maxAsyncDepth);
+}
+
+int AsyncStackTrace::contextGroupId() const { return m_contextGroupId; }
+
+const String16& AsyncStackTrace::description() const { return m_description; }
+
+std::weak_ptr<AsyncStackTrace> AsyncStackTrace::parent() const {
+ return m_asyncParent;
+}
+
+std::weak_ptr<AsyncStackTrace> AsyncStackTrace::creation() const {
+ return m_asyncCreation;
+}
+
+bool AsyncStackTrace::isEmpty() const { return m_frames.empty(); }
+
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.h b/deps/v8/src/inspector/v8-stack-trace-impl.h
index f8b53d0a65..5ce051bd5c 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.h
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.h
@@ -5,96 +5,142 @@
#ifndef V8_INSPECTOR_V8STACKTRACEIMPL_H_
#define V8_INSPECTOR_V8STACKTRACEIMPL_H_
+#include <memory>
#include <vector>
+#include "include/v8-inspector.h"
+#include "include/v8.h"
#include "src/base/macros.h"
-#include "src/inspector/protocol/Forward.h"
#include "src/inspector/protocol/Runtime.h"
-
-#include "include/v8-inspector.h"
+#include "src/inspector/string-16.h"
namespace v8_inspector {
-class TracedValue;
+class AsyncStackTrace;
class V8Debugger;
+class WasmTranslation;
-// Note: async stack trace may have empty top stack with non-empty tail to
-// indicate
-// that current native-only state had some async story.
-// On the other hand, any non-top async stack is guaranteed to be non-empty.
-class V8StackTraceImpl final : public V8StackTrace {
+class StackFrame {
public:
- static const size_t maxCallStackSizeToCapture = 200;
+ explicit StackFrame(v8::Local<v8::StackFrame> frame);
+ ~StackFrame() = default;
- class Frame {
- public:
- Frame();
- Frame(const String16& functionName, const String16& scriptId,
- const String16& scriptName, int lineNumber, int column = 0);
- ~Frame();
-
- const String16& functionName() const { return m_functionName; }
- const String16& scriptId() const { return m_scriptId; }
- const String16& sourceURL() const { return m_scriptName; }
- int lineNumber() const { return m_lineNumber; }
- int columnNumber() const { return m_columnNumber; }
- Frame clone() const;
+ void translate(WasmTranslation* wasmTranslation);
- private:
- friend class V8StackTraceImpl;
- std::unique_ptr<protocol::Runtime::CallFrame> buildInspectorObject() const;
- void toTracedValue(TracedValue*) const;
-
- String16 m_functionName;
- String16 m_scriptId;
- String16 m_scriptName;
- int m_lineNumber;
- int m_columnNumber;
- };
+ const String16& functionName() const;
+ const String16& scriptId() const;
+ const String16& sourceURL() const;
+ int lineNumber() const; // 0-based.
+ int columnNumber() const; // 0-based.
+ std::unique_ptr<protocol::Runtime::CallFrame> buildInspectorObject() const;
+ bool isEqual(StackFrame* frame) const;
+
+ private:
+ String16 m_functionName;
+ String16 m_scriptId;
+ String16 m_sourceURL;
+ int m_lineNumber; // 0-based.
+ int m_columnNumber; // 0-based.
+};
+class V8StackTraceImpl : public V8StackTrace {
+ public:
static void setCaptureStackTraceForUncaughtExceptions(v8::Isolate*,
bool capture);
- static std::unique_ptr<V8StackTraceImpl> create(
- V8Debugger*, int contextGroupId, v8::Local<v8::StackTrace>,
- size_t maxStackSize, const String16& description = String16());
- static std::unique_ptr<V8StackTraceImpl> capture(
- V8Debugger*, int contextGroupId, size_t maxStackSize,
- const String16& description = String16());
-
- // This method drops the async chain. Use cloneImpl() instead.
- std::unique_ptr<V8StackTrace> clone() override;
- std::unique_ptr<V8StackTraceImpl> cloneImpl();
- std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectForTail(
- V8Debugger*) const;
+ static const int maxCallStackSizeToCapture = 200;
+ static std::unique_ptr<V8StackTraceImpl> create(V8Debugger*,
+ int contextGroupId,
+ v8::Local<v8::StackTrace>,
+ int maxStackSize);
+ static std::unique_ptr<V8StackTraceImpl> capture(V8Debugger*,
+ int contextGroupId,
+ int maxStackSize);
+
+ ~V8StackTraceImpl() override;
std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObjectImpl()
const;
- ~V8StackTraceImpl() override;
// V8StackTrace implementation.
- bool isEmpty() const override { return !m_frames.size(); };
+ // This method drops the async stack trace.
+ std::unique_ptr<V8StackTrace> clone() override;
+ bool isEmpty() const override;
StringView topSourceURL() const override;
- int topLineNumber() const override;
- int topColumnNumber() const override;
+ int topLineNumber() const override; // 1-based.
+ int topColumnNumber() const override; // 1-based.
StringView topScriptId() const override;
StringView topFunctionName() const override;
std::unique_ptr<protocol::Runtime::API::StackTrace> buildInspectorObject()
const override;
std::unique_ptr<StringBuffer> toString() const override;
- void setCreation(std::unique_ptr<V8StackTraceImpl> creation);
+ bool isEqualIgnoringTopFrame(V8StackTraceImpl* stackTrace) const;
private:
- V8StackTraceImpl(int contextGroupId, const String16& description,
- std::vector<Frame>& frames,
- std::unique_ptr<V8StackTraceImpl> parent);
+ V8StackTraceImpl(std::vector<std::shared_ptr<StackFrame>> frames,
+ int maxAsyncDepth,
+ std::shared_ptr<AsyncStackTrace> asyncParent,
+ std::shared_ptr<AsyncStackTrace> asyncCreation);
+
+ class StackFrameIterator {
+ public:
+ explicit StackFrameIterator(const V8StackTraceImpl* stackTrace);
+
+ void next();
+ StackFrame* frame();
+ bool done();
+
+ private:
+ std::vector<std::shared_ptr<StackFrame>>::const_iterator m_currentIt;
+ std::vector<std::shared_ptr<StackFrame>>::const_iterator m_currentEnd;
+ AsyncStackTrace* m_parent;
+ };
+
+ std::vector<std::shared_ptr<StackFrame>> m_frames;
+ int m_maxAsyncDepth;
+ std::weak_ptr<AsyncStackTrace> m_asyncParent;
+ std::weak_ptr<AsyncStackTrace> m_asyncCreation;
+
+ DISALLOW_COPY_AND_ASSIGN(V8StackTraceImpl);
+};
+
+class AsyncStackTrace {
+ public:
+ static std::shared_ptr<AsyncStackTrace> capture(V8Debugger*,
+ int contextGroupId,
+ const String16& description,
+ int maxStackSize);
+
+ std::unique_ptr<protocol::Runtime::StackTrace> buildInspectorObject(
+ AsyncStackTrace* asyncCreation, int maxAsyncDepth) const;
+
+ int contextGroupId() const;
+ const String16& description() const;
+ std::weak_ptr<AsyncStackTrace> parent() const;
+ std::weak_ptr<AsyncStackTrace> creation() const;
+ bool isEmpty() const;
+
+ void setDescription(const String16& description) {
+ // TODO(kozyatinskiy): implement it without hack.
+ m_description = description;
+ }
+ const std::vector<std::shared_ptr<StackFrame>>& frames() const {
+ return m_frames;
+ }
+
+ private:
+ AsyncStackTrace(int contextGroupId, const String16& description,
+ std::vector<std::shared_ptr<StackFrame>> frames,
+ std::shared_ptr<AsyncStackTrace> asyncParent,
+ std::shared_ptr<AsyncStackTrace> asyncCreation);
int m_contextGroupId;
String16 m_description;
- std::vector<Frame> m_frames;
- std::unique_ptr<V8StackTraceImpl> m_parent;
- std::unique_ptr<V8StackTraceImpl> m_creation;
- DISALLOW_COPY_AND_ASSIGN(V8StackTraceImpl);
+ std::vector<std::shared_ptr<StackFrame>> m_frames;
+ std::weak_ptr<AsyncStackTrace> m_asyncParent;
+ std::weak_ptr<AsyncStackTrace> m_asyncCreation;
+
+ DISALLOW_COPY_AND_ASSIGN(AsyncStackTrace);
};
} // namespace v8_inspector
diff --git a/deps/v8/src/interface-descriptors.cc b/deps/v8/src/interface-descriptors.cc
index a5dccc3583..f44ee619d9 100644
--- a/deps/v8/src/interface-descriptors.cc
+++ b/deps/v8/src/interface-descriptors.cc
@@ -447,8 +447,18 @@ void CallTrampolineDescriptor::InitializePlatformIndependent(
void CallForwardVarargsDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
- // kTarget, kStartIndex
+ // kTarget, kActualArgumentsCount, kStartIndex
+ MachineType machine_types[] = {MachineType::AnyTagged(), MachineType::Int32(),
+ MachineType::Int32()};
+ data->InitializePlatformIndependent(arraysize(machine_types), 0,
+ machine_types);
+}
+
+void ConstructForwardVarargsDescriptor::InitializePlatformIndependent(
+ CallInterfaceDescriptorData* data) {
+ // kTarget, kNewTarget, kActualArgumentsCount, kStartIndex
MachineType machine_types[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::Int32(),
MachineType::Int32()};
data->InitializePlatformIndependent(arraysize(machine_types), 0,
machine_types);
@@ -492,24 +502,6 @@ void CallICTrampolineDescriptor::InitializePlatformIndependent(
machine_types);
}
-void RegExpExecDescriptor::InitializePlatformIndependent(
- CallInterfaceDescriptorData* data) {
- // kString, kLastIndex, kStringStart, kStringEnd, kEntryPoint
- MachineType machine_types[] = {MachineType::AnyTagged(), MachineType::Int32(),
- MachineType::Pointer(), MachineType::Pointer(),
- MachineType::AnyTagged()};
- data->InitializePlatformIndependent(arraysize(machine_types), 0,
- machine_types);
-}
-
-void RegExpExecDescriptor::InitializePlatformSpecific(
- CallInterfaceDescriptorData* data) {
- Register registers[] = {StringRegister(), LastIndexRegister(),
- StringStartRegister(), StringEndRegister(),
- CodeRegister()};
- data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
void BuiltinDescriptor::InitializePlatformIndependent(
CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kArgumentsCount
diff --git a/deps/v8/src/interface-descriptors.h b/deps/v8/src/interface-descriptors.h
index b97c45cd0e..127e156a82 100644
--- a/deps/v8/src/interface-descriptors.h
+++ b/deps/v8/src/interface-descriptors.h
@@ -48,14 +48,13 @@ class PlatformInterfaceDescriptor;
V(CallConstruct) \
V(CallTrampoline) \
V(ConstructStub) \
+ V(ConstructForwardVarargs) \
V(ConstructTrampoline) \
- V(RegExpExec) \
V(TransitionElementsKind) \
V(AllocateHeapNumber) \
V(Builtin) \
V(ArrayConstructor) \
V(IteratingArrayBuiltin) \
- V(IteratingArrayBuiltinLoopContinuation) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
V(ArrayNArgumentsConstructor) \
@@ -227,15 +226,25 @@ class V8_EXPORT_PRIVATE CallInterfaceDescriptor {
} \
static inline CallDescriptors::Key key();
-#define DECLARE_DEFAULT_DESCRIPTOR(name, base, parameter_count) \
- DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
- protected: \
- void InitializePlatformSpecific(CallInterfaceDescriptorData* data) \
- override { \
- DefaultInitializePlatformSpecific(data, parameter_count); \
- } \
- name(Isolate* isolate, CallDescriptors::Key key) : base(isolate, key) {} \
- \
+static const int kMaxBuiltinRegisterParams = 5;
+
+#define DECLARE_DEFAULT_DESCRIPTOR(name, base, parameter_count) \
+ DECLARE_DESCRIPTOR_WITH_BASE(name, base) \
+ protected: \
+ static const int kRegisterParams = \
+ parameter_count > kMaxBuiltinRegisterParams ? kMaxBuiltinRegisterParams \
+ : parameter_count; \
+ static const int kStackParams = parameter_count - kRegisterParams; \
+ void InitializePlatformSpecific(CallInterfaceDescriptorData* data) \
+ override { \
+ DefaultInitializePlatformSpecific(data, kRegisterParams); \
+ } \
+ void InitializePlatformIndependent(CallInterfaceDescriptorData* data) \
+ override { \
+ data->InitializePlatformIndependent(kRegisterParams, kStackParams, NULL); \
+ } \
+ name(Isolate* isolate, CallDescriptors::Key key) : base(isolate, key) {} \
+ \
public:
#define DECLARE_DESCRIPTOR(name, base) \
@@ -537,7 +546,7 @@ class FastCloneShallowArrayDescriptor : public CallInterfaceDescriptor {
class FastCloneShallowObjectDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kClosure, kLiteralIndex, kConstantProperties, kFlags)
+ DEFINE_PARAMETERS(kClosure, kLiteralIndex, kBoilerplateDescription, kFlags)
DECLARE_DESCRIPTOR(FastCloneShallowObjectDescriptor, CallInterfaceDescriptor)
};
@@ -567,11 +576,18 @@ class CallTrampolineDescriptor : public CallInterfaceDescriptor {
class CallForwardVarargsDescriptor : public CallInterfaceDescriptor {
public:
- DEFINE_PARAMETERS(kTarget, kStartIndex)
+ DEFINE_PARAMETERS(kTarget, kActualArgumentsCount, kStartIndex)
DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(CallForwardVarargsDescriptor,
CallInterfaceDescriptor)
};
+class ConstructForwardVarargsDescriptor : public CallInterfaceDescriptor {
+ public:
+ DEFINE_PARAMETERS(kTarget, kNewTarget, kActualArgumentsCount, kStartIndex)
+ DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(
+ ConstructForwardVarargsDescriptor, CallInterfaceDescriptor)
+};
+
class ConstructStubDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kFunction, kNewTarget, kActualArgumentsCount,
@@ -613,19 +629,6 @@ class CallConstructDescriptor : public CallInterfaceDescriptor {
DECLARE_DESCRIPTOR(CallConstructDescriptor, CallInterfaceDescriptor)
};
-class RegExpExecDescriptor : public CallInterfaceDescriptor {
- public:
- DEFINE_PARAMETERS(kString, kLastIndex, kStringStart, kStringEnd, kCode)
- DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE(RegExpExecDescriptor,
- CallInterfaceDescriptor)
-
- static const Register StringRegister();
- static const Register LastIndexRegister();
- static const Register StringStartRegister();
- static const Register StringEndRegister();
- static const Register CodeRegister();
-};
-
class TransitionElementsKindDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kObject, kMap)
@@ -656,14 +659,6 @@ class IteratingArrayBuiltinDescriptor : public BuiltinDescriptor {
DECLARE_BUILTIN_DESCRIPTOR(IteratingArrayBuiltinDescriptor)
};
-class IteratingArrayBuiltinLoopContinuationDescriptor
- : public BuiltinDescriptor {
- public:
- DEFINE_BUILTIN_PARAMETERS(kCallback, kThisArg, kArray, kObject, kInitialK,
- kLength, kTo)
- DECLARE_BUILTIN_DESCRIPTOR(IteratingArrayBuiltinLoopContinuationDescriptor)
-};
-
class ArrayConstructorDescriptor : public CallInterfaceDescriptor {
public:
DEFINE_PARAMETERS(kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite)
@@ -898,6 +893,7 @@ class WasmRuntimeCallDescriptor final : public CallInterfaceDescriptor {
BUILTIN_LIST_TFS(DEFINE_TFS_BUILTIN_DESCRIPTOR)
#undef DEFINE_TFS_BUILTIN_DESCRIPTOR
+#undef DECLARE_DEFAULT_DESCRIPTOR
#undef DECLARE_DESCRIPTOR_WITH_BASE
#undef DECLARE_DESCRIPTOR
#undef DECLARE_DESCRIPTOR_WITH_CUSTOM_FUNCTION_TYPE
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.cc b/deps/v8/src/interpreter/bytecode-array-accessor.cc
index cc6777588a..c3a0b3cb9e 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.cc
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.cc
@@ -168,11 +168,14 @@ Runtime::FunctionId BytecodeArrayAccessor::GetIntrinsicIdOperand(
static_cast<IntrinsicsHelper::IntrinsicId>(raw_id));
}
+Handle<Object> BytecodeArrayAccessor::GetConstantAtIndex(int index) const {
+ return FixedArray::get(bytecode_array()->constant_pool(), index,
+ bytecode_array()->GetIsolate());
+}
+
Handle<Object> BytecodeArrayAccessor::GetConstantForIndexOperand(
int operand_index) const {
- return FixedArray::get(bytecode_array()->constant_pool(),
- GetIndexOperand(operand_index),
- bytecode_array()->GetIsolate());
+ return GetConstantAtIndex(GetIndexOperand(operand_index));
}
int BytecodeArrayAccessor::GetJumpTargetOffset() const {
@@ -182,16 +185,31 @@ int BytecodeArrayAccessor::GetJumpTargetOffset() const {
if (bytecode == Bytecode::kJumpLoop) {
relative_offset = -relative_offset;
}
- return current_offset() + relative_offset + current_prefix_offset();
+ return GetAbsoluteOffset(relative_offset);
} else if (interpreter::Bytecodes::IsJumpConstant(bytecode)) {
Smi* smi = Smi::cast(*GetConstantForIndexOperand(0));
- return current_offset() + smi->value() + current_prefix_offset();
+ return GetAbsoluteOffset(smi->value());
} else {
UNREACHABLE();
return kMinInt;
}
}
+JumpTableTargetOffsets BytecodeArrayAccessor::GetJumpTableTargetOffsets()
+ const {
+ DCHECK_EQ(current_bytecode(), Bytecode::kSwitchOnSmiNoFeedback);
+
+ uint32_t table_start = GetIndexOperand(0);
+ uint32_t table_size = GetUnsignedImmediateOperand(1);
+ int32_t case_value_base = GetImmediateOperand(2);
+
+ return JumpTableTargetOffsets(this, table_start, table_size, case_value_base);
+}
+
+int BytecodeArrayAccessor::GetAbsoluteOffset(int relative_offset) const {
+ return current_offset() + relative_offset + current_prefix_offset();
+}
+
bool BytecodeArrayAccessor::OffsetWithinBytecode(int offset) const {
return current_offset() <= offset &&
offset < current_offset() + current_bytecode_size();
@@ -203,6 +221,77 @@ std::ostream& BytecodeArrayAccessor::PrintTo(std::ostream& os) const {
bytecode_array()->parameter_count());
}
+JumpTableTargetOffsets::JumpTableTargetOffsets(
+ const BytecodeArrayAccessor* accessor, int table_start, int table_size,
+ int case_value_base)
+ : accessor_(accessor),
+ table_start_(table_start),
+ table_size_(table_size),
+ case_value_base_(case_value_base) {}
+
+JumpTableTargetOffsets::iterator JumpTableTargetOffsets::begin() const {
+ return iterator(case_value_base_, table_start_, table_start_ + table_size_,
+ accessor_);
+}
+JumpTableTargetOffsets::iterator JumpTableTargetOffsets::end() const {
+ return iterator(case_value_base_ + table_size_, table_start_ + table_size_,
+ table_start_ + table_size_, accessor_);
+}
+int JumpTableTargetOffsets::size() const {
+ int ret = 0;
+ // TODO(leszeks): Is there a more efficient way of doing this than iterating?
+ for (const auto& entry : *this) {
+ USE(entry);
+ ret++;
+ }
+ return ret;
+}
+
+JumpTableTargetOffsets::iterator::iterator(
+ int case_value, int table_offset, int table_end,
+ const BytecodeArrayAccessor* accessor)
+ : accessor_(accessor),
+ index_(case_value),
+ table_offset_(table_offset),
+ table_end_(table_end) {
+ UpdateAndAdvanceToValid();
+}
+
+JumpTableTargetOffset JumpTableTargetOffsets::iterator::operator*() {
+ DCHECK_LT(table_offset_, table_end_);
+ DCHECK(current_->IsSmi());
+ return {index_, accessor_->GetAbsoluteOffset(Smi::cast(*current_)->value())};
+}
+
+JumpTableTargetOffsets::iterator& JumpTableTargetOffsets::iterator::
+operator++() {
+ DCHECK_LT(table_offset_, table_end_);
+ ++table_offset_;
+ ++index_;
+ UpdateAndAdvanceToValid();
+ return *this;
+}
+
+bool JumpTableTargetOffsets::iterator::operator!=(
+ const JumpTableTargetOffsets::iterator& other) {
+ DCHECK_EQ(accessor_, other.accessor_);
+ DCHECK_EQ(table_end_, other.table_end_);
+ DCHECK_EQ(index_ - other.index_, table_offset_ - other.table_offset_);
+ return index_ != other.index_;
+}
+
+void JumpTableTargetOffsets::iterator::UpdateAndAdvanceToValid() {
+ if (table_offset_ >= table_end_) return;
+
+ current_ = accessor_->GetConstantAtIndex(table_offset_);
+ Isolate* isolate = accessor_->bytecode_array()->GetIsolate();
+ while (current_->IsTheHole(isolate)) {
+ ++table_offset_;
+ ++index_;
+ current_ = accessor_->GetConstantAtIndex(table_offset_);
+ }
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-array-accessor.h b/deps/v8/src/interpreter/bytecode-array-accessor.h
index e5a24f3e7f..e465a5c881 100644
--- a/deps/v8/src/interpreter/bytecode-array-accessor.h
+++ b/deps/v8/src/interpreter/bytecode-array-accessor.h
@@ -16,6 +16,50 @@ namespace v8 {
namespace internal {
namespace interpreter {
+class BytecodeArrayAccessor;
+
+struct V8_EXPORT_PRIVATE JumpTableTargetOffset {
+ int case_value;
+ int target_offset;
+};
+
+class V8_EXPORT_PRIVATE JumpTableTargetOffsets final {
+ public:
+ // Minimal iterator implementation for use in ranged-for.
+ class V8_EXPORT_PRIVATE iterator final {
+ public:
+ iterator(int case_value, int table_offset, int table_end,
+ const BytecodeArrayAccessor* accessor);
+
+ JumpTableTargetOffset operator*();
+ iterator& operator++();
+ bool operator!=(const iterator& other);
+
+ private:
+ void UpdateAndAdvanceToValid();
+
+ const BytecodeArrayAccessor* accessor_;
+ Handle<Object> current_;
+ int index_;
+ int table_offset_;
+ int table_end_;
+ };
+
+ JumpTableTargetOffsets(const BytecodeArrayAccessor* accessor, int table_start,
+ int table_size, int case_value_base);
+
+ iterator begin() const;
+ iterator end() const;
+
+ int size() const;
+
+ private:
+ const BytecodeArrayAccessor* accessor_;
+ int table_start_;
+ int table_size_;
+ int case_value_base_;
+};
+
class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
public:
BytecodeArrayAccessor(Handle<BytecodeArray> bytecode_array,
@@ -41,12 +85,21 @@ class V8_EXPORT_PRIVATE BytecodeArrayAccessor {
int GetRegisterOperandRange(int operand_index) const;
Runtime::FunctionId GetRuntimeIdOperand(int operand_index) const;
Runtime::FunctionId GetIntrinsicIdOperand(int operand_index) const;
+ Handle<Object> GetConstantAtIndex(int offset) const;
Handle<Object> GetConstantForIndexOperand(int operand_index) const;
- // Returns the absolute offset of the branch target at the current
- // bytecode. It is an error to call this method if the bytecode is
- // not for a jump or conditional jump.
+ // Returns the absolute offset of the branch target at the current bytecode.
+ // It is an error to call this method if the bytecode is not for a jump or
+ // conditional jump.
int GetJumpTargetOffset() const;
+ // Returns an iterator over the absolute offsets of the targets of the current
+ // switch bytecode's jump table. It is an error to call this method if the
+ // bytecode is not a switch.
+ JumpTableTargetOffsets GetJumpTableTargetOffsets() const;
+
+ // Returns the absolute offset of the bytecode at the given relative offset
+ // from the current bytecode.
+ int GetAbsoluteOffset(int relative_offset) const;
bool OffsetWithinBytecode(int offset) const;
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.cc b/deps/v8/src/interpreter/bytecode-array-builder.cc
index 5634e1a6fd..80c59e4c47 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.cc
+++ b/deps/v8/src/interpreter/bytecode-array-builder.cc
@@ -6,8 +6,11 @@
#include "src/globals.h"
#include "src/interpreter/bytecode-array-writer.h"
+#include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/bytecode-node.h"
#include "src/interpreter/bytecode-register-optimizer.h"
+#include "src/interpreter/bytecode-source-info.h"
#include "src/interpreter/interpreter-intrinsics.h"
#include "src/objects-inl.h"
@@ -35,8 +38,8 @@ class RegisterTransferWriter final
};
BytecodeArrayBuilder::BytecodeArrayBuilder(
- Isolate* isolate, Zone* zone, int parameter_count, int context_count,
- int locals_count, FunctionLiteral* literal,
+ Isolate* isolate, Zone* zone, int parameter_count, int locals_count,
+ FunctionLiteral* literal,
SourcePositionTableBuilder::RecordingMode source_position_mode)
: zone_(zone),
literal_(literal),
@@ -46,14 +49,11 @@ BytecodeArrayBuilder::BytecodeArrayBuilder(
return_seen_in_block_(false),
parameter_count_(parameter_count),
local_register_count_(locals_count),
- context_register_count_(context_count),
register_allocator_(fixed_register_count()),
bytecode_array_writer_(zone, &constant_array_builder_,
source_position_mode),
- pipeline_(&bytecode_array_writer_),
register_optimizer_(nullptr) {
DCHECK_GE(parameter_count_, 0);
- DCHECK_GE(context_register_count_, 0);
DCHECK_GE(local_register_count_, 0);
if (FLAG_ignition_reo) {
@@ -65,16 +65,6 @@ BytecodeArrayBuilder::BytecodeArrayBuilder(
return_position_ = literal ? literal->return_position() : kNoSourcePosition;
}
-Register BytecodeArrayBuilder::first_context_register() const {
- DCHECK_GT(context_register_count_, 0);
- return Register(local_register_count_);
-}
-
-Register BytecodeArrayBuilder::last_context_register() const {
- DCHECK_GT(context_register_count_, 0);
- return Register(local_register_count_ + context_register_count_ - 1);
-}
-
Register BytecodeArrayBuilder::Parameter(int parameter_index) const {
DCHECK_GE(parameter_index, 0);
// The parameter indices are shifted by 1 (receiver is the
@@ -106,8 +96,8 @@ Handle<BytecodeArray> BytecodeArrayBuilder::ToBytecodeArray(Isolate* isolate) {
Handle<FixedArray> handler_table =
handler_table_builder()->ToHandlerTable(isolate);
- return pipeline_->ToBytecodeArray(isolate, register_count, parameter_count(),
- handler_table);
+ return bytecode_array_writer_.ToBytecodeArray(
+ isolate, register_count, parameter_count(), handler_table);
}
BytecodeSourceInfo BytecodeArrayBuilder::CurrentSourcePosition(
@@ -134,7 +124,7 @@ void BytecodeArrayBuilder::SetDeferredSourceInfo(
if (deferred_source_info_.is_valid()) {
// Emit any previous deferred source info now as a nop.
BytecodeNode node = BytecodeNode::Nop(deferred_source_info_);
- pipeline()->Write(&node);
+ bytecode_array_writer_.Write(&node);
}
deferred_source_info_ = source_info;
}
@@ -146,19 +136,25 @@ void BytecodeArrayBuilder::AttachOrEmitDeferredSourceInfo(BytecodeNode* node) {
node->set_source_info(deferred_source_info_);
} else {
BytecodeNode node = BytecodeNode::Nop(deferred_source_info_);
- pipeline()->Write(&node);
+ bytecode_array_writer_.Write(&node);
}
deferred_source_info_.set_invalid();
}
void BytecodeArrayBuilder::Write(BytecodeNode* node) {
AttachOrEmitDeferredSourceInfo(node);
- pipeline()->Write(node);
+ bytecode_array_writer_.Write(node);
}
void BytecodeArrayBuilder::WriteJump(BytecodeNode* node, BytecodeLabel* label) {
AttachOrEmitDeferredSourceInfo(node);
- pipeline()->WriteJump(node, label);
+ bytecode_array_writer_.WriteJump(node, label);
+}
+
+void BytecodeArrayBuilder::WriteSwitch(BytecodeNode* node,
+ BytecodeJumpTable* jump_table) {
+ AttachOrEmitDeferredSourceInfo(node);
+ bytecode_array_writer_.WriteSwitch(node, jump_table);
}
void BytecodeArrayBuilder::OutputLdarRaw(Register reg) {
@@ -294,8 +290,9 @@ class BytecodeNodeBuilder {
public:
template <typename... Operands>
INLINE(static BytecodeNode Make(BytecodeArrayBuilder* builder,
- BytecodeSourceInfo source_info,
Operands... operands)) {
+ static_assert(sizeof...(Operands) <= Bytecodes::kMaxOperands,
+ "too many operands for bytecode");
builder->PrepareToOutputBytecode<bytecode, accumulator_use>();
// The "OperandHelper<operand_types>::Convert(builder, operands)..." will
// expand both the OperandType... and Operands... parameter packs e.g. for:
@@ -305,37 +302,45 @@ class BytecodeNodeBuilder {
// OperandHelper<OperandType::kReg>::Convert(builder, reg),
// OperandHelper<OperandType::kImm>::Convert(builder, immediate),
return BytecodeNode::Create<bytecode, accumulator_use, operand_types...>(
- source_info,
+ builder->CurrentSourcePosition(bytecode),
OperandHelper<operand_types>::Convert(builder, operands)...);
}
};
-#define DEFINE_BYTECODE_OUTPUT(name, ...) \
- template <typename... Operands> \
- void BytecodeArrayBuilder::Output##name(Operands... operands) { \
- static_assert(sizeof...(Operands) <= Bytecodes::kMaxOperands, \
- "too many operands for bytecode"); \
- BytecodeNode node( \
- BytecodeNodeBuilder<Bytecode::k##name, __VA_ARGS__>::Make< \
- Operands...>(this, CurrentSourcePosition(Bytecode::k##name), \
- operands...)); \
- Write(&node); \
- } \
- \
- template <typename... Operands> \
- void BytecodeArrayBuilder::Output##name(BytecodeLabel* label, \
- Operands... operands) { \
- DCHECK(Bytecodes::IsJump(Bytecode::k##name)); \
- BytecodeNode node( \
- BytecodeNodeBuilder<Bytecode::k##name, __VA_ARGS__>::Make< \
- Operands...>(this, CurrentSourcePosition(Bytecode::k##name), \
- operands...)); \
- WriteJump(&node, label); \
- LeaveBasicBlock(); \
+#define DEFINE_BYTECODE_OUTPUT(name, ...) \
+ template <typename... Operands> \
+ BytecodeNode BytecodeArrayBuilder::Create##name##Node( \
+ Operands... operands) { \
+ return BytecodeNodeBuilder<Bytecode::k##name, __VA_ARGS__>::Make( \
+ this, operands...); \
+ } \
+ \
+ template <typename... Operands> \
+ void BytecodeArrayBuilder::Output##name(Operands... operands) { \
+ BytecodeNode node(Create##name##Node(operands...)); \
+ Write(&node); \
+ } \
+ \
+ template <typename... Operands> \
+ void BytecodeArrayBuilder::Output##name(BytecodeLabel* label, \
+ Operands... operands) { \
+ DCHECK(Bytecodes::IsJump(Bytecode::k##name)); \
+ BytecodeNode node(Create##name##Node(operands...)); \
+ WriteJump(&node, label); \
+ LeaveBasicBlock(); \
}
BYTECODE_LIST(DEFINE_BYTECODE_OUTPUT)
#undef DEFINE_BYTECODE_OUTPUT
+void BytecodeArrayBuilder::OutputSwitchOnSmiNoFeedback(
+ BytecodeJumpTable* jump_table) {
+ BytecodeNode node(CreateSwitchOnSmiNoFeedbackNode(
+ jump_table->constant_pool_index(), jump_table->size(),
+ jump_table->case_value_base()));
+ WriteSwitch(&node, jump_table);
+ LeaveBasicBlock();
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::BinaryOperation(Token::Value op,
Register reg,
int feedback_slot) {
@@ -995,14 +1000,24 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeLabel* label) {
// Flush the register optimizer when binding a label to ensure all
// expected registers are valid when jumping to this label.
if (register_optimizer_) register_optimizer_->Flush();
- pipeline_->BindLabel(label);
+ bytecode_array_writer_.BindLabel(label);
LeaveBasicBlock();
return *this;
}
BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(const BytecodeLabel& target,
BytecodeLabel* label) {
- pipeline_->BindLabel(target, label);
+ bytecode_array_writer_.BindLabel(target, label);
+ LeaveBasicBlock();
+ return *this;
+}
+
+BytecodeArrayBuilder& BytecodeArrayBuilder::Bind(BytecodeJumpTable* jump_table,
+ int case_value) {
+ // Flush the register optimizer when binding a jump table entry to ensure
+ // all expected registers are valid when jumping to this location.
+ if (register_optimizer_) register_optimizer_->Flush();
+ bytecode_array_writer_.BindJumpTableEntry(jump_table, case_value);
LeaveBasicBlock();
return *this;
}
@@ -1121,6 +1136,12 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::JumpLoop(BytecodeLabel* label,
return *this;
}
+BytecodeArrayBuilder& BytecodeArrayBuilder::SwitchOnSmiNoFeedback(
+ BytecodeJumpTable* jump_table) {
+ OutputSwitchOnSmiNoFeedback(jump_table);
+ return *this;
+}
+
BytecodeArrayBuilder& BytecodeArrayBuilder::StackCheck(int position) {
if (position != kNoSourcePosition) {
// We need to attach a non-breakable source position to a stack
@@ -1386,6 +1407,16 @@ size_t BytecodeArrayBuilder::GetConstantPoolEntry(const Scope* scope) {
SINGLETON_CONSTANT_ENTRY_TYPES(ENTRY_GETTER)
#undef ENTRY_GETTER
+BytecodeJumpTable* BytecodeArrayBuilder::AllocateJumpTable(
+ int size, int case_value_base) {
+ DCHECK_GT(size, 0);
+
+ size_t constant_pool_index = constant_array_builder()->InsertJumpTable(size);
+
+ return new (zone())
+ BytecodeJumpTable(constant_pool_index, size, case_value_base, zone());
+}
+
size_t BytecodeArrayBuilder::AllocateDeferredConstantPoolEntry() {
return constant_array_builder()->InsertDeferred();
}
diff --git a/deps/v8/src/interpreter/bytecode-array-builder.h b/deps/v8/src/interpreter/bytecode-array-builder.h
index bc6d5a39d4..fa336cde13 100644
--- a/deps/v8/src/interpreter/bytecode-array-builder.h
+++ b/deps/v8/src/interpreter/bytecode-array-builder.h
@@ -12,6 +12,7 @@
#include "src/interpreter/bytecode-flags.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/bytecode-register.h"
+#include "src/interpreter/bytecode-source-info.h"
#include "src/interpreter/bytecodes.h"
#include "src/interpreter/constant-array-builder.h"
#include "src/interpreter/handler-table-builder.h"
@@ -26,16 +27,16 @@ namespace interpreter {
class BytecodeLabel;
class BytecodeNode;
-class BytecodePipelineStage;
class BytecodeRegisterOptimizer;
+class BytecodeJumpTable;
class Register;
class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
: public NON_EXPORTED_BASE(ZoneObject) {
public:
BytecodeArrayBuilder(
- Isolate* isolate, Zone* zone, int parameter_count, int context_count,
- int locals_count, FunctionLiteral* literal = nullptr,
+ Isolate* isolate, Zone* zone, int parameter_count, int locals_count,
+ FunctionLiteral* literal = nullptr,
SourcePositionTableBuilder::RecordingMode source_position_mode =
SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS);
@@ -53,17 +54,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
return local_register_count_;
}
- // Get number of contexts required for bytecode array.
- int context_count() const {
- DCHECK_GE(context_register_count_, 0);
- return context_register_count_;
- }
-
- Register first_context_register() const;
- Register last_context_register() const;
-
// Returns the number of fixed (non-temporary) registers.
- int fixed_register_count() const { return context_count() + locals_count(); }
+ int fixed_register_count() const { return locals_count(); }
// Returns the number of fixed and temporary registers.
int total_register_count() const {
@@ -359,6 +351,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Flow Control.
BytecodeArrayBuilder& Bind(BytecodeLabel* label);
BytecodeArrayBuilder& Bind(const BytecodeLabel& target, BytecodeLabel* label);
+ BytecodeArrayBuilder& Bind(BytecodeJumpTable* jump_table, int case_value);
BytecodeArrayBuilder& Jump(BytecodeLabel* label);
BytecodeArrayBuilder& JumpLoop(BytecodeLabel* label, int loop_depth);
@@ -376,6 +369,8 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
BytecodeArrayBuilder& JumpIfNotNil(BytecodeLabel* label, Token::Value op,
NilValue nil);
+ BytecodeArrayBuilder& SwitchOnSmiNoFeedback(BytecodeJumpTable* jump_table);
+
BytecodeArrayBuilder& StackCheck(int position);
// Sets the pending message to the value in the accumulator, and returns the
@@ -413,6 +408,10 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// entry, so that it can be referenced by above exception handling support.
int NewHandlerEntry() { return handler_table_builder()->NewHandlerEntry(); }
+ // Allocates a new jump table of given |size| and |case_value_base| in the
+ // constant pool.
+ BytecodeJumpTable* AllocateJumpTable(int size, int case_value_base);
+
// Gets a constant pool entry.
size_t GetConstantPoolEntry(const AstRawString* raw_string);
size_t GetConstantPoolEntry(const AstValue* heap_number);
@@ -483,14 +482,18 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Returns the current source position for the given |bytecode|.
INLINE(BytecodeSourceInfo CurrentSourcePosition(Bytecode bytecode));
-#define DECLARE_BYTECODE_OUTPUT(Name, ...) \
- template <typename... Operands> \
- INLINE(void Output##Name(Operands... operands)); \
- template <typename... Operands> \
+#define DECLARE_BYTECODE_OUTPUT(Name, ...) \
+ template <typename... Operands> \
+ INLINE(BytecodeNode Create##Name##Node(Operands... operands)); \
+ template <typename... Operands> \
+ INLINE(void Output##Name(Operands... operands)); \
+ template <typename... Operands> \
INLINE(void Output##Name(BytecodeLabel* label, Operands... operands));
BYTECODE_LIST(DECLARE_BYTECODE_OUTPUT)
#undef DECLARE_OPERAND_TYPE_INFO
+ INLINE(void OutputSwitchOnSmiNoFeedback(BytecodeJumpTable* jump_table));
+
bool RegisterIsValid(Register reg) const;
bool RegisterListIsValid(RegisterList reg_list) const;
@@ -507,6 +510,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
// Write bytecode to bytecode array.
void Write(BytecodeNode* node);
void WriteJump(BytecodeNode* node, BytecodeLabel* label);
+ void WriteSwitch(BytecodeNode* node, BytecodeJumpTable* label);
// Not implemented as the illegal bytecode is used inside internally
// to indicate a bytecode field is not valid or an error has occured
@@ -521,7 +525,6 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
BytecodeArrayWriter* bytecode_array_writer() {
return &bytecode_array_writer_;
}
- BytecodePipelineStage* pipeline() { return pipeline_; }
ConstantArrayBuilder* constant_array_builder() {
return &constant_array_builder_;
}
@@ -540,11 +543,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final
bool return_seen_in_block_;
int parameter_count_;
int local_register_count_;
- int context_register_count_;
int return_position_;
BytecodeRegisterAllocator register_allocator_;
BytecodeArrayWriter bytecode_array_writer_;
- BytecodePipelineStage* pipeline_;
BytecodeRegisterOptimizer* register_optimizer_;
BytecodeSourceInfo latest_source_info_;
BytecodeSourceInfo deferred_source_info_;
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.cc b/deps/v8/src/interpreter/bytecode-array-writer.cc
index 22eabc6159..d3cc0204d4 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.cc
+++ b/deps/v8/src/interpreter/bytecode-array-writer.cc
@@ -5,8 +5,11 @@
#include "src/interpreter/bytecode-array-writer.h"
#include "src/api.h"
+#include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/bytecode-node.h"
#include "src/interpreter/bytecode-register.h"
+#include "src/interpreter/bytecode-source-info.h"
#include "src/interpreter/constant-array-builder.h"
#include "src/log.h"
#include "src/objects-inl.h"
@@ -33,10 +36,6 @@ BytecodeArrayWriter::BytecodeArrayWriter(
bytecodes_.reserve(512); // Derived via experimentation.
}
-// override
-BytecodeArrayWriter::~BytecodeArrayWriter() {}
-
-// override
Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
Isolate* isolate, int register_count, int parameter_count,
Handle<FixedArray> handler_table) {
@@ -57,7 +56,6 @@ Handle<BytecodeArray> BytecodeArrayWriter::ToBytecodeArray(
return bytecode_array;
}
-// override
void BytecodeArrayWriter::Write(BytecodeNode* node) {
DCHECK(!Bytecodes::IsJump(node->bytecode()));
@@ -69,7 +67,6 @@ void BytecodeArrayWriter::Write(BytecodeNode* node) {
EmitBytecode(node);
}
-// override
void BytecodeArrayWriter::WriteJump(BytecodeNode* node, BytecodeLabel* label) {
DCHECK(Bytecodes::IsJump(node->bytecode()));
@@ -83,7 +80,20 @@ void BytecodeArrayWriter::WriteJump(BytecodeNode* node, BytecodeLabel* label) {
EmitJump(node, label);
}
-// override
+void BytecodeArrayWriter::WriteSwitch(BytecodeNode* node,
+ BytecodeJumpTable* jump_table) {
+ DCHECK(Bytecodes::IsSwitch(node->bytecode()));
+
+ // TODO(rmcilroy): For jump tables we could also mark the table as dead,
+ // thereby avoiding emitting dead code when we bind the entries.
+ if (exit_seen_in_block_) return; // Don't emit dead code.
+ UpdateExitSeenInBlock(node->bytecode());
+ MaybeElideLastBytecode(node->bytecode(), node->source_info().is_valid());
+
+ UpdateSourcePositionTable(node);
+ EmitSwitch(node, jump_table);
+}
+
void BytecodeArrayWriter::BindLabel(BytecodeLabel* label) {
size_t current_offset = bytecodes()->size();
if (label->is_forward_target()) {
@@ -96,7 +106,6 @@ void BytecodeArrayWriter::BindLabel(BytecodeLabel* label) {
exit_seen_in_block_ = false; // Starting a new basic block.
}
-// override
void BytecodeArrayWriter::BindLabel(const BytecodeLabel& target,
BytecodeLabel* label) {
DCHECK(!label->is_bound());
@@ -112,6 +121,22 @@ void BytecodeArrayWriter::BindLabel(const BytecodeLabel& target,
// changed here.
}
+void BytecodeArrayWriter::BindJumpTableEntry(BytecodeJumpTable* jump_table,
+ int case_value) {
+ DCHECK(!jump_table->is_bound(case_value));
+
+ size_t current_offset = bytecodes()->size();
+ size_t relative_jump = current_offset - jump_table->switch_bytecode_offset();
+
+ constant_array_builder()->SetJumpTableSmi(
+ jump_table->ConstantPoolEntryFor(case_value),
+ Smi::FromInt(static_cast<int>(relative_jump)));
+ jump_table->mark_bound(case_value);
+
+ InvalidateLastBytecode();
+ exit_seen_in_block_ = false; // Starting a new basic block.
+}
+
void BytecodeArrayWriter::UpdateSourcePositionTable(
const BytecodeNode* const node) {
int bytecode_offset = static_cast<int>(bytecodes()->size());
@@ -393,6 +418,20 @@ void BytecodeArrayWriter::EmitJump(BytecodeNode* node, BytecodeLabel* label) {
EmitBytecode(node);
}
+void BytecodeArrayWriter::EmitSwitch(BytecodeNode* node,
+ BytecodeJumpTable* jump_table) {
+ DCHECK(Bytecodes::IsSwitch(node->bytecode()));
+
+ size_t current_offset = bytecodes()->size();
+ if (node->operand_scale() > OperandScale::kSingle) {
+ // Adjust for scaling byte prefix.
+ current_offset += 1;
+ }
+ jump_table->set_switch_bytecode_offset(current_offset);
+
+ EmitBytecode(node);
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-array-writer.h b/deps/v8/src/interpreter/bytecode-array-writer.h
index 6e9fc02ad8..b2dfae1ddd 100644
--- a/deps/v8/src/interpreter/bytecode-array-writer.h
+++ b/deps/v8/src/interpreter/bytecode-array-writer.h
@@ -7,7 +7,7 @@
#include "src/base/compiler-specific.h"
#include "src/globals.h"
-#include "src/interpreter/bytecode-pipeline.h"
+#include "src/interpreter/bytecodes.h"
#include "src/source-position-table.h"
namespace v8 {
@@ -18,26 +18,27 @@ class SourcePositionTableBuilder;
namespace interpreter {
class BytecodeLabel;
+class BytecodeNode;
+class BytecodeJumpTable;
class ConstantArrayBuilder;
// Class for emitting bytecode as the final stage of the bytecode
// generation pipeline.
-class V8_EXPORT_PRIVATE BytecodeArrayWriter final
- : public NON_EXPORTED_BASE(BytecodePipelineStage) {
+class V8_EXPORT_PRIVATE BytecodeArrayWriter final {
public:
BytecodeArrayWriter(
Zone* zone, ConstantArrayBuilder* constant_array_builder,
SourcePositionTableBuilder::RecordingMode source_position_mode);
- virtual ~BytecodeArrayWriter();
- // BytecodePipelineStage interface.
- void Write(BytecodeNode* node) override;
- void WriteJump(BytecodeNode* node, BytecodeLabel* label) override;
- void BindLabel(BytecodeLabel* label) override;
- void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override;
- Handle<BytecodeArray> ToBytecodeArray(
- Isolate* isolate, int register_count, int parameter_count,
- Handle<FixedArray> handler_table) override;
+ void Write(BytecodeNode* node);
+ void WriteJump(BytecodeNode* node, BytecodeLabel* label);
+ void WriteSwitch(BytecodeNode* node, BytecodeJumpTable* jump_table);
+ void BindLabel(BytecodeLabel* label);
+ void BindLabel(const BytecodeLabel& target, BytecodeLabel* label);
+ void BindJumpTableEntry(BytecodeJumpTable* jump_table, int case_value);
+ Handle<BytecodeArray> ToBytecodeArray(Isolate* isolate, int register_count,
+ int parameter_count,
+ Handle<FixedArray> handler_table);
private:
// Maximum sized packed bytecode is comprised of a prefix bytecode,
@@ -63,6 +64,7 @@ class V8_EXPORT_PRIVATE BytecodeArrayWriter final
void EmitBytecode(const BytecodeNode* const node);
void EmitJump(BytecodeNode* node, BytecodeLabel* label);
+ void EmitSwitch(BytecodeNode* node, BytecodeJumpTable* jump_table);
void UpdateSourcePositionTable(const BytecodeNode* const node);
void UpdateExitSeenInBlock(Bytecode bytecode);
diff --git a/deps/v8/src/interpreter/bytecode-flags.cc b/deps/v8/src/interpreter/bytecode-flags.cc
index 25f5260de2..4d50bf69c3 100644
--- a/deps/v8/src/interpreter/bytecode-flags.cc
+++ b/deps/v8/src/interpreter/bytecode-flags.cc
@@ -23,19 +23,10 @@ uint8_t CreateArrayLiteralFlags::Encode(bool use_fast_shallow_clone,
}
// static
-uint8_t CreateObjectLiteralFlags::Encode(bool fast_clone_supported,
- int properties_count,
- int runtime_flags) {
+uint8_t CreateObjectLiteralFlags::Encode(int runtime_flags,
+ bool fast_clone_supported) {
uint8_t result = FlagsBits::encode(runtime_flags);
- if (fast_clone_supported) {
- STATIC_ASSERT(
- ConstructorBuiltins::kMaximumClonedShallowObjectProperties <=
- 1 << CreateObjectLiteralFlags::FastClonePropertiesCountBits::kShift);
- DCHECK_LE(properties_count,
- ConstructorBuiltins::kMaximumClonedShallowObjectProperties);
- result |= CreateObjectLiteralFlags::FastClonePropertiesCountBits::encode(
- properties_count);
- }
+ result |= FastCloneSupportedBit::encode(fast_clone_supported);
return result;
}
diff --git a/deps/v8/src/interpreter/bytecode-flags.h b/deps/v8/src/interpreter/bytecode-flags.h
index 0256bc249b..76e5f868c5 100644
--- a/deps/v8/src/interpreter/bytecode-flags.h
+++ b/deps/v8/src/interpreter/bytecode-flags.h
@@ -18,7 +18,7 @@ namespace interpreter {
class CreateArrayLiteralFlags {
public:
- class FlagsBits : public BitField8<int, 0, 3> {};
+ class FlagsBits : public BitField8<int, 0, 4> {};
class FastShallowCloneBit : public BitField8<bool, FlagsBits::kNext, 1> {};
static uint8_t Encode(bool use_fast_shallow_clone, int runtime_flags);
@@ -29,12 +29,10 @@ class CreateArrayLiteralFlags {
class CreateObjectLiteralFlags {
public:
- class FlagsBits : public BitField8<int, 0, 3> {};
- class FastClonePropertiesCountBits
- : public BitField8<int, FlagsBits::kNext, 3> {};
+ class FlagsBits : public BitField8<int, 0, 4> {};
+ class FastCloneSupportedBit : public BitField8<bool, FlagsBits::kNext, 1> {};
- static uint8_t Encode(bool fast_clone_supported, int properties_count,
- int runtime_flags);
+ static uint8_t Encode(int runtime_flags, bool fast_clone_supported);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CreateObjectLiteralFlags);
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index 87f2e1f0a6..7ca2c37607 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -11,6 +11,7 @@
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/interpreter/bytecode-flags.h"
+#include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/interpreter/control-flow-builders.h"
@@ -40,8 +41,8 @@ class BytecodeGenerator::ContextScope BASE_EMBEDDED {
depth_ = outer_->depth_ + 1;
// Push the outer context into a new context register.
- Register outer_context_reg(builder()->first_context_register().index() +
- outer_->depth_);
+ Register outer_context_reg =
+ generator_->register_allocator()->NewRegister();
outer_->set_register(outer_context_reg);
generator_->builder()->PushContext(outer_context_reg);
}
@@ -145,7 +146,10 @@ class BytecodeGenerator::ControlScope::DeferredCommands final {
: generator_(generator),
deferred_(generator->zone()),
token_register_(token_register),
- result_register_(result_register) {}
+ result_register_(result_register),
+ return_token_(-1),
+ async_return_token_(-1),
+ rethrow_token_(-1) {}
// One recorded control-flow command.
struct Entry {
@@ -158,8 +162,12 @@ class BytecodeGenerator::ControlScope::DeferredCommands final {
// generates a new dispatch token that identifies one particular path. This
// expects the result to be in the accumulator.
void RecordCommand(Command command, Statement* statement) {
- int token = static_cast<int>(deferred_.size());
- deferred_.push_back({command, statement, token});
+ int token = GetTokenForCommand(command, statement);
+
+ DCHECK_LT(token, deferred_.size());
+ DCHECK_EQ(deferred_[token].command, command);
+ DCHECK_EQ(deferred_[token].statement, statement);
+ DCHECK_EQ(deferred_[token].token, token);
builder()->StoreAccumulatorInRegister(result_register_);
builder()->LoadLiteral(Smi::FromInt(token));
@@ -184,32 +192,98 @@ class BytecodeGenerator::ControlScope::DeferredCommands final {
// Applies all recorded control-flow commands after the finally-block again.
// This generates a dynamic dispatch on the token from the entry point.
void ApplyDeferredCommands() {
- // The fall-through path is covered by the default case, hence +1 here.
- SwitchBuilder dispatch(builder(), static_cast<int>(deferred_.size() + 1));
- for (size_t i = 0; i < deferred_.size(); ++i) {
- Entry& entry = deferred_[i];
- builder()->LoadLiteral(Smi::FromInt(entry.token));
- builder()->CompareOperation(Token::EQ_STRICT, token_register_);
- dispatch.Case(ToBooleanMode::kAlreadyBoolean, static_cast<int>(i));
- }
- dispatch.DefaultAt(static_cast<int>(deferred_.size()));
- for (size_t i = 0; i < deferred_.size(); ++i) {
- Entry& entry = deferred_[i];
- dispatch.SetCaseTarget(static_cast<int>(i));
+ if (deferred_.size() == 0) return;
+
+ BytecodeLabel fall_through;
+
+ if (deferred_.size() == 1) {
+ // For a single entry, just jump to the fallthrough if we don't match the
+ // entry token.
+ const Entry& entry = deferred_[0];
+
+ builder()
+ ->LoadLiteral(Smi::FromInt(entry.token))
+ .CompareOperation(Token::EQ_STRICT, token_register_)
+ .JumpIfFalse(ToBooleanMode::kAlreadyBoolean, &fall_through);
+
builder()->LoadAccumulatorWithRegister(result_register_);
execution_control()->PerformCommand(entry.command, entry.statement);
+ } else {
+ // For multiple entries, build a jump table and switch on the token,
+ // jumping to the fallthrough if none of them match.
+
+ BytecodeJumpTable* jump_table =
+ builder()->AllocateJumpTable(static_cast<int>(deferred_.size()), 0);
+ builder()
+ ->LoadAccumulatorWithRegister(token_register_)
+ .SwitchOnSmiNoFeedback(jump_table)
+ .Jump(&fall_through);
+ for (const Entry& entry : deferred_) {
+ builder()
+ ->Bind(jump_table, entry.token)
+ .LoadAccumulatorWithRegister(result_register_);
+ execution_control()->PerformCommand(entry.command, entry.statement);
+ }
}
- dispatch.SetCaseTarget(static_cast<int>(deferred_.size()));
+
+ builder()->Bind(&fall_through);
}
BytecodeArrayBuilder* builder() { return generator_->builder(); }
ControlScope* execution_control() { return generator_->execution_control(); }
private:
+ int GetTokenForCommand(Command command, Statement* statement) {
+ switch (command) {
+ case CMD_RETURN:
+ return GetReturnToken();
+ case CMD_ASYNC_RETURN:
+ return GetAsyncReturnToken();
+ case CMD_RETHROW:
+ return GetRethrowToken();
+ default:
+ // TODO(leszeks): We could also search for entries with the same
+ // command and statement.
+ return GetNewTokenForCommand(command, statement);
+ }
+ }
+
+ int GetReturnToken() {
+ if (return_token_ == -1) {
+ return_token_ = GetNewTokenForCommand(CMD_RETURN, nullptr);
+ }
+ return return_token_;
+ }
+
+ int GetAsyncReturnToken() {
+ if (async_return_token_ == -1) {
+ async_return_token_ = GetNewTokenForCommand(CMD_ASYNC_RETURN, nullptr);
+ }
+ return async_return_token_;
+ }
+
+ int GetRethrowToken() {
+ if (rethrow_token_ == -1) {
+ rethrow_token_ = GetNewTokenForCommand(CMD_RETHROW, nullptr);
+ }
+ return rethrow_token_;
+ }
+
+ int GetNewTokenForCommand(Command command, Statement* statement) {
+ int token = static_cast<int>(deferred_.size());
+ deferred_.push_back({command, statement, token});
+ return token;
+ }
+
BytecodeGenerator* generator_;
ZoneVector<Entry> deferred_;
Register token_register_;
Register result_register_;
+
+ // Tokens for commands that don't need a statement.
+ int return_token_;
+ int async_return_token_;
+ int rethrow_token_;
};
// Scoped class for dealing with control flow reaching the function level.
@@ -626,7 +700,6 @@ BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
: zone_(info->zone()),
builder_(new (zone()) BytecodeArrayBuilder(
info->isolate(), info->zone(), info->num_parameters_including_this(),
- info->scope()->MaxNestedContextChainLength(),
info->scope()->num_stack_slots(), info->literal(),
info->SourcePositionRecordingMode())),
info_(info),
@@ -642,7 +715,7 @@ BytecodeGenerator::BytecodeGenerator(CompilationInfo* info)
execution_control_(nullptr),
execution_context_(nullptr),
execution_result_(nullptr),
- generator_resume_points_(info->literal()->suspend_count(), info->zone()),
+ generator_jump_table_(nullptr),
generator_state_(),
loop_depth_(0) {
DCHECK_EQ(closure_scope(), closure_scope()->GetClosureScope());
@@ -722,9 +795,8 @@ void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
RegisterAllocationScope register_scope(this);
- if (IsResumableFunction(info()->literal()->kind())) {
- generator_state_ = register_allocator()->NewRegister();
- VisitGeneratorPrologue();
+ if (info()->literal()->CanSuspend()) {
+ BuildGeneratorPrologue();
}
if (closure_scope()->NeedsContext()) {
@@ -737,14 +809,6 @@ void BytecodeGenerator::GenerateBytecode(uintptr_t stack_limit) {
GenerateBytecodeBody();
}
- // In generator functions, we may not have visited every yield in the AST
- // since we skip some obviously dead code. Hence the generated bytecode may
- // contain jumps to unbound labels (resume points that will never be used).
- // We bind these now.
- for (auto& label : generator_resume_points_) {
- if (!label.is_bound()) builder()->Bind(&label);
- }
-
// Emit an implicit return instruction in case control flow can fall off the
// end of the function without an explicit return being present on all paths.
if (builder()->RequiresImplicitReturn()) {
@@ -768,6 +832,12 @@ void BytecodeGenerator::GenerateBytecodeBody() {
// Build assignment to {new.target} variable if it is used.
VisitNewTargetVariable(closure_scope()->new_target_var());
+ // Create a generator object if necessary and initialize the
+ // {.generator_object} variable.
+ if (info()->literal()->CanSuspend()) {
+ BuildGeneratorObjectVariableInitialization();
+ }
+
// Emit tracing call if requested to do so.
if (FLAG_trace) builder()->CallRuntime(Runtime::kTraceEnter);
@@ -794,20 +864,6 @@ void BytecodeGenerator::GenerateBytecodeBody() {
VisitStatements(info()->literal()->body());
}
-void BytecodeGenerator::BuildIndexedJump(Register index, size_t start_index,
- size_t size,
- ZoneVector<BytecodeLabel>& targets) {
- // TODO(neis): Optimize this by using a proper jump table.
- DCHECK_LE(start_index + size, targets.size());
- for (size_t i = start_index; i < start_index + size; i++) {
- builder()
- ->LoadLiteral(Smi::FromInt(static_cast<int>(i)))
- .CompareOperation(Token::Value::EQ_STRICT, index)
- .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &(targets[i]));
- }
- BuildAbort(BailoutReason::kInvalidJumpTableIndex);
-}
-
void BytecodeGenerator::VisitIterationHeader(IterationStatement* stmt,
LoopBuilder* loop_builder) {
// Recall that stmt->yield_count() is always zero inside ordinary
@@ -815,36 +871,39 @@ void BytecodeGenerator::VisitIterationHeader(IterationStatement* stmt,
if (stmt->suspend_count() == 0) {
loop_builder->LoopHeader();
} else {
- // Collect all labels for generator resume points within the loop (if any)
- // so that they can be bound to the loop header below. Also create fresh
- // labels for these resume points, to be used inside the loop.
- ZoneVector<BytecodeLabel> resume_points_in_loop(zone());
- size_t first_yield = stmt->first_suspend_id();
- DCHECK_LE(first_yield + stmt->suspend_count(),
- generator_resume_points_.size());
- for (size_t id = first_yield; id < first_yield + stmt->suspend_count();
- id++) {
- auto& label = generator_resume_points_[id];
- resume_points_in_loop.push_back(label);
- generator_resume_points_[id] = BytecodeLabel();
- }
-
- loop_builder->LoopHeader(&resume_points_in_loop);
-
- // If we are not resuming, fall through to loop body.
- // If we are resuming, perform state dispatch.
+ loop_builder->LoopHeaderInGenerator(
+ &generator_jump_table_, static_cast<int>(stmt->first_suspend_id()),
+ static_cast<int>(stmt->suspend_count()));
+
+ // Perform state dispatch on the generator state, assuming this is a resume.
+ builder()
+ ->LoadAccumulatorWithRegister(generator_state_)
+ .SwitchOnSmiNoFeedback(generator_jump_table_);
+
+ // We fall through when the generator state is not in the jump table. If we
+ // are not resuming, we want to fall through to the loop body.
+ // TODO(leszeks): Only generate this test for debug builds, we can skip it
+ // entirely in release assuming that the generator states is always valid.
BytecodeLabel not_resuming;
builder()
->LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
.CompareOperation(Token::Value::EQ_STRICT, generator_state_)
.JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &not_resuming);
- BuildIndexedJump(generator_state_, first_yield, stmt->suspend_count(),
- generator_resume_points_);
+
+ // Otherwise this is an error.
+ BuildAbort(BailoutReason::kInvalidJumpTableIndex);
+
builder()->Bind(&not_resuming);
}
}
-void BytecodeGenerator::VisitGeneratorPrologue() {
+void BytecodeGenerator::BuildGeneratorPrologue() {
+ DCHECK_GT(info()->literal()->suspend_count(), 0);
+
+ generator_state_ = register_allocator()->NewRegister();
+ generator_jump_table_ =
+ builder()->AllocateJumpTable(info()->literal()->suspend_count(), 0);
+
// The generator resume trampoline abuses the new.target register both to
// indicate that this is a resume call and to pass in the generator object.
// In ordinary calls, new.target is always undefined because generator
@@ -855,24 +914,27 @@ void BytecodeGenerator::VisitGeneratorPrologue() {
->LoadAccumulatorWithRegister(generator_object)
.JumpIfUndefined(&regular_call);
- // This is a resume call. Restore the current context and the registers, then
- // perform state dispatch.
- Register dummy = register_allocator()->NewRegister();
+ // This is a resume call. Restore the current context and the registers,
+ // then perform state dispatch.
+ Register generator_context = register_allocator()->NewRegister();
builder()
->CallRuntime(Runtime::kInlineGeneratorGetContext, generator_object)
- .PushContext(dummy)
+ .PushContext(generator_context)
.ResumeGenerator(generator_object)
- .StoreAccumulatorInRegister(generator_state_);
- BuildIndexedJump(generator_state_, 0, generator_resume_points_.size(),
- generator_resume_points_);
+ .StoreAccumulatorInRegister(generator_state_)
+ .SwitchOnSmiNoFeedback(generator_jump_table_);
+ // We fall through when the generator state is not in the jump table.
+ // TODO(leszeks): Only generate this for debug builds.
+ BuildAbort(BailoutReason::kInvalidJumpTableIndex);
+ // This is a regular call.
builder()
->Bind(&regular_call)
.LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
.StoreAccumulatorInRegister(generator_state_);
- // This is a regular call. Fall through to the ordinary function prologue,
- // after which we will run into the generator object creation and other extra
- // code inserted by the parser.
+ // Now fall through to the ordinary function prologue, after which we will run
+ // into the generator object creation and other extra code inserted by the
+ // parser.
}
void BytecodeGenerator::VisitBlock(Block* stmt) {
@@ -1203,7 +1265,6 @@ void BytecodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
loop_backbranch.Bind(builder());
loop_builder.JumpToHeader(loop_depth_);
}
- loop_builder.EndLoop();
}
void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
@@ -1223,7 +1284,6 @@ void BytecodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
}
VisitIterationBody(stmt, &loop_builder);
loop_builder.JumpToHeader(loop_depth_);
- loop_builder.EndLoop();
}
void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
@@ -1251,7 +1311,6 @@ void BytecodeGenerator::VisitForStatement(ForStatement* stmt) {
Visit(stmt->next());
}
loop_builder.JumpToHeader(loop_depth_);
- loop_builder.EndLoop();
}
void BytecodeGenerator::VisitForInAssignment(Expression* expr,
@@ -1328,7 +1387,6 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
return;
}
- LoopBuilder loop_builder(builder());
BytecodeLabel subject_null_label, subject_undefined_label;
// Prepare the state for executing ForIn.
@@ -1350,20 +1408,22 @@ void BytecodeGenerator::VisitForInStatement(ForInStatement* stmt) {
builder()->StoreAccumulatorInRegister(index);
// The loop
- VisitIterationHeader(stmt, &loop_builder);
- builder()->SetExpressionAsStatementPosition(stmt->each());
- builder()->ForInContinue(index, cache_length);
- loop_builder.BreakIfFalse(ToBooleanMode::kAlreadyBoolean);
- FeedbackSlot slot = stmt->ForInFeedbackSlot();
- builder()->ForInNext(receiver, index, triple.Truncate(2),
- feedback_index(slot));
- loop_builder.ContinueIfUndefined();
- VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
- VisitIterationBody(stmt, &loop_builder);
- builder()->ForInStep(index);
- builder()->StoreAccumulatorInRegister(index);
- loop_builder.JumpToHeader(loop_depth_);
- loop_builder.EndLoop();
+ {
+ LoopBuilder loop_builder(builder());
+ VisitIterationHeader(stmt, &loop_builder);
+ builder()->SetExpressionAsStatementPosition(stmt->each());
+ builder()->ForInContinue(index, cache_length);
+ loop_builder.BreakIfFalse(ToBooleanMode::kAlreadyBoolean);
+ FeedbackSlot slot = stmt->ForInFeedbackSlot();
+ builder()->ForInNext(receiver, index, triple.Truncate(2),
+ feedback_index(slot));
+ loop_builder.ContinueIfUndefined();
+ VisitForInAssignment(stmt->each(), stmt->EachFeedbackSlot());
+ VisitIterationBody(stmt, &loop_builder);
+ builder()->ForInStep(index);
+ builder()->StoreAccumulatorInRegister(index);
+ loop_builder.JumpToHeader(loop_depth_);
+ }
builder()->Bind(&subject_null_label);
builder()->Bind(&subject_undefined_label);
}
@@ -1383,7 +1443,6 @@ void BytecodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
VisitForEffect(stmt->assign_each());
VisitIterationBody(stmt, &loop_builder);
loop_builder.JumpToHeader(loop_depth_);
- loop_builder.EndLoop();
}
void BytecodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
@@ -1497,7 +1556,8 @@ void BytecodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
function_literals_.push_back(std::make_pair(expr, entry));
}
-void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
+void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr) {
+ VisitDeclarations(expr->scope()->declarations());
Register constructor = VisitForRegisterValue(expr->constructor());
{
RegisterAllocationScope register_scope(this);
@@ -1534,6 +1594,18 @@ void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
}
}
+void BytecodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
+ CurrentScope current_scope(this, expr->scope());
+ DCHECK_NOT_NULL(expr->scope());
+ if (expr->scope()->NeedsContext()) {
+ BuildNewLocalBlockContext(expr->scope());
+ ContextScope scope(this, expr->scope());
+ BuildClassLiteral(expr);
+ } else {
+ BuildClassLiteral(expr);
+ }
+}
+
void BytecodeGenerator::VisitClassLiteralProperties(ClassLiteral* expr,
Register constructor,
Register prototype) {
@@ -1680,10 +1752,7 @@ void BytecodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Deep-copy the literal boilerplate.
uint8_t flags = CreateObjectLiteralFlags::Encode(
- expr->IsFastCloningSupported(),
- ConstructorBuiltins::FastCloneShallowObjectPropertiesCount(
- expr->properties_count()),
- expr->ComputeFlags());
+ expr->ComputeFlags(), expr->IsFastCloningSupported());
Register literal = register_allocator()->NewRegister();
size_t entry;
@@ -1695,6 +1764,9 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
entry = builder()->AllocateDeferredConstantPoolEntry();
object_literals_.push_back(std::make_pair(expr, entry));
}
+ // TODO(cbruni): Directly generate runtime call for literals we cannot
+ // optimize once the FastCloneShallowObject stub is in sync with the TF
+ // optimizations.
builder()->CreateObjectLiteral(entry, feedback_index(expr->literal_slot()),
flags, literal);
@@ -1756,6 +1828,8 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
}
case ObjectLiteral::Property::PROTOTYPE: {
+ // __proto__:null is handled by CreateObjectLiteral.
+ if (property->IsNullPrototype()) break;
DCHECK(property->emit_store());
RegisterList args = register_allocator()->NewRegisterList(2);
builder()->MoveRegister(literal, args[0]);
@@ -1805,7 +1879,9 @@ void BytecodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
ObjectLiteral::Property* property = expr->properties()->at(property_index);
RegisterAllocationScope inner_register_scope(this);
- if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
+ if (property->IsPrototype()) {
+ // __proto__:null is handled by CreateObjectLiteral.
+ if (property->IsNullPrototype()) continue;
DCHECK(property->emit_store());
RegisterList args = register_allocator()->NewRegisterList(2);
builder()->MoveRegister(literal, args[0]);
@@ -2104,8 +2180,6 @@ void BytecodeGenerator::BuildThrowReferenceError(const AstRawString* name) {
}
void BytecodeGenerator::BuildThrowIfHole(Variable* variable) {
- // TODO(interpreter): Can the parser reduce the number of checks
- // performed? Or should there be a ThrowIfHole bytecode.
BytecodeLabel no_reference_error;
builder()->JumpIfNotHole(&no_reference_error);
@@ -2380,12 +2454,13 @@ void BytecodeGenerator::VisitAssignment(Assignment* expr) {
}
}
-void BytecodeGenerator::VisitSuspend(Suspend* expr) {
+void BytecodeGenerator::BuildGeneratorSuspend(Suspend* expr,
+ Register generator) {
+ RegisterAllocationScope register_scope(this);
+
builder()->SetExpressionPosition(expr);
Register value = VisitForRegisterValue(expr->expression());
- Register generator = VisitForRegisterValue(expr->generator_object());
-
// Save context, registers, and state. Then return.
builder()
->LoadLiteral(Smi::FromInt(expr->suspend_id()))
@@ -2394,98 +2469,99 @@ void BytecodeGenerator::VisitSuspend(Suspend* expr) {
if (expr->IsNonInitialAsyncGeneratorYield()) {
// AsyncGenerator yields (with the exception of the initial yield) delegate
// to AsyncGeneratorResolve(), implemented via the runtime call below.
- RegisterList args = register_allocator()->NewRegisterList(2);
-
- int context_index = expr->is_yield_star()
- ? Context::ASYNC_GENERATOR_RAW_YIELD
- : Context::ASYNC_GENERATOR_YIELD;
+ RegisterList args = register_allocator()->NewRegisterList(3);
- // Async GeneratorYield:
+ // AsyncGeneratorYield:
// perform AsyncGeneratorResolve(<generator>, <value>, false).
builder()
->MoveRegister(generator, args[0])
.MoveRegister(value, args[1])
- .CallJSRuntime(context_index, args);
+ .LoadFalse()
+ .StoreAccumulatorInRegister(args[2])
+ .CallRuntime(Runtime::kInlineAsyncGeneratorResolve, args);
} else {
builder()->LoadAccumulatorWithRegister(value);
}
builder()->Return(); // Hard return (ignore any finally blocks).
+}
- builder()->Bind(&(generator_resume_points_[expr->suspend_id()]));
- // Upon resume, we continue here.
-
- {
- RegisterAllocationScope register_scope(this);
-
- // Update state to indicate that we have finished resuming. Loop headers
- // rely on this.
- builder()
- ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
- .StoreAccumulatorInRegister(generator_state_);
+void BytecodeGenerator::BuildGeneratorResume(Suspend* expr,
+ Register generator) {
+ RegisterAllocationScope register_scope(this);
- Register input = register_allocator()->NewRegister();
+ // Update state to indicate that we have finished resuming. Loop headers
+ // rely on this.
+ builder()
+ ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))
+ .StoreAccumulatorInRegister(generator_state_);
- // When resuming an Async Generator from an Await expression, the sent
- // value is in the [[await_input_or_debug_pos]] slot. Otherwise, the sent
- // value is in the [[input_or_debug_pos]] slot.
- Runtime::FunctionId get_generator_input =
- expr->is_async_generator() && expr->is_await()
- ? Runtime::kInlineAsyncGeneratorGetAwaitInputOrDebugPos
- : Runtime::kInlineGeneratorGetInputOrDebugPos;
+ Register input = register_allocator()->NewRegister();
- builder()
- ->CallRuntime(get_generator_input, generator)
- .StoreAccumulatorInRegister(input);
+ // When resuming an Async Generator from an Await expression, the sent
+ // value is in the [[await_input_or_debug_pos]] slot. Otherwise, the sent
+ // value is in the [[input_or_debug_pos]] slot.
+ Runtime::FunctionId get_generator_input =
+ expr->is_async_generator() && expr->is_await()
+ ? Runtime::kInlineAsyncGeneratorGetAwaitInputOrDebugPos
+ : Runtime::kInlineGeneratorGetInputOrDebugPos;
- Register resume_mode = register_allocator()->NewRegister();
- builder()
- ->CallRuntime(Runtime::kInlineGeneratorGetResumeMode, generator)
- .StoreAccumulatorInRegister(resume_mode);
+ DCHECK(generator.is_valid());
+ builder()
+ ->CallRuntime(get_generator_input, generator)
+ .StoreAccumulatorInRegister(input);
- // Now dispatch on resume mode.
+ Register resume_mode = register_allocator()->NewRegister();
+ builder()
+ ->CallRuntime(Runtime::kInlineGeneratorGetResumeMode, generator)
+ .StoreAccumulatorInRegister(resume_mode);
- BytecodeLabel resume_with_next;
- BytecodeLabel resume_with_return;
- BytecodeLabel resume_with_throw;
+ // Now dispatch on resume mode.
- builder()
- ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kNext))
- .CompareOperation(Token::EQ_STRICT, resume_mode)
- .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &resume_with_next)
- .LoadLiteral(Smi::FromInt(JSGeneratorObject::kThrow))
- .CompareOperation(Token::EQ_STRICT, resume_mode)
- .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &resume_with_throw)
- .Jump(&resume_with_return);
-
- builder()->Bind(&resume_with_return);
- {
- if (expr->is_async_generator()) {
- // Async generator methods will produce the iter result object.
- builder()->LoadAccumulatorWithRegister(input);
- execution_control()->AsyncReturnAccumulator();
- } else {
- RegisterList args = register_allocator()->NewRegisterList(2);
- builder()
- ->MoveRegister(input, args[0])
- .LoadTrue()
- .StoreAccumulatorInRegister(args[1])
- .CallRuntime(Runtime::kInlineCreateIterResultObject, args);
- execution_control()->ReturnAccumulator();
- }
- }
+ BytecodeLabel resume_with_next;
+ BytecodeLabel resume_with_throw;
- builder()->Bind(&resume_with_throw);
- builder()->SetExpressionPosition(expr);
+ builder()
+ ->LoadLiteral(Smi::FromInt(JSGeneratorObject::kNext))
+ .CompareOperation(Token::EQ_STRICT, resume_mode)
+ .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &resume_with_next)
+ .LoadLiteral(Smi::FromInt(JSGeneratorObject::kThrow))
+ .CompareOperation(Token::EQ_STRICT, resume_mode)
+ .JumpIfTrue(ToBooleanMode::kAlreadyBoolean, &resume_with_throw);
+ // Fall through for resuming with return.
+
+ if (expr->is_async_generator()) {
+ // Async generator methods will produce the iter result object.
builder()->LoadAccumulatorWithRegister(input);
- if (expr->rethrow_on_exception()) {
- builder()->ReThrow();
- } else {
- builder()->Throw();
- }
+ execution_control()->AsyncReturnAccumulator();
+ } else {
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->MoveRegister(input, args[0])
+ .LoadTrue()
+ .StoreAccumulatorInRegister(args[1])
+ .CallRuntime(Runtime::kInlineCreateIterResultObject, args);
+ execution_control()->ReturnAccumulator();
+ }
- builder()->Bind(&resume_with_next);
- builder()->LoadAccumulatorWithRegister(input);
+ builder()->Bind(&resume_with_throw);
+ builder()->SetExpressionPosition(expr);
+ builder()->LoadAccumulatorWithRegister(input);
+ if (expr->rethrow_on_exception()) {
+ builder()->ReThrow();
+ } else {
+ builder()->Throw();
}
+
+ builder()->Bind(&resume_with_next);
+ builder()->LoadAccumulatorWithRegister(input);
+}
+
+void BytecodeGenerator::VisitSuspend(Suspend* expr) {
+ Register generator = VisitForRegisterValue(expr->generator_object());
+ BuildGeneratorSuspend(expr, generator);
+ builder()->Bind(generator_jump_table_, static_cast<int>(expr->suspend_id()));
+ // Upon resume, we continue here.
+ BuildGeneratorResume(expr, generator);
}
void BytecodeGenerator::VisitThrow(Throw* expr) {
@@ -3511,6 +3587,20 @@ void BytecodeGenerator::VisitNewTargetVariable(Variable* variable) {
builder()->Bind(&flush_state_label);
}
+void BytecodeGenerator::BuildGeneratorObjectVariableInitialization() {
+ DCHECK(IsResumableFunction(info()->literal()->kind()));
+ DCHECK_NOT_NULL(closure_scope()->generator_object_var());
+
+ RegisterAllocationScope register_scope(this);
+ RegisterList args = register_allocator()->NewRegisterList(2);
+ builder()
+ ->MoveRegister(Register::function_closure(), args[0])
+ .MoveRegister(builder()->Receiver(), args[1])
+ .CallRuntime(Runtime::kInlineCreateJSGeneratorObject, args);
+ BuildVariableAssignment(closure_scope()->generator_object_var(), Token::INIT,
+ FeedbackSlot::Invalid(), HoleCheckMode::kElided);
+}
+
void BytecodeGenerator::VisitFunctionClosureForContext() {
ValueResultScope value_execution_result(this);
if (closure_scope()->is_script_scope()) {
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 8ad09f686a..6e277e3799 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -21,6 +21,7 @@ namespace interpreter {
class GlobalDeclarationsBuilder;
class LoopBuilder;
+class BytecodeJumpTable;
class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
public:
@@ -133,7 +134,9 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void BuildNewLocalCatchContext(Scope* scope);
void BuildNewLocalWithContext(Scope* scope);
- void VisitGeneratorPrologue();
+ void BuildGeneratorPrologue();
+ void BuildGeneratorSuspend(Suspend* expr, Register generator);
+ void BuildGeneratorResume(Suspend* expr, Register generator);
void VisitArgumentsObject(Variable* variable);
void VisitRestArgumentsArray(Variable* rest);
@@ -141,8 +144,10 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
void VisitClassLiteralProperties(ClassLiteral* expr, Register constructor,
Register prototype);
void BuildClassLiteralNameProperty(ClassLiteral* expr, Register constructor);
+ void BuildClassLiteral(ClassLiteral* expr);
void VisitThisFunctionVariable(Variable* variable);
void VisitNewTargetVariable(Variable* variable);
+ void BuildGeneratorObjectVariableInitialization();
void VisitBlockDeclarationsAndStatements(Block* stmt);
void VisitFunctionClosureForContext();
void VisitSetHomeObject(Register value, Register home_object,
@@ -237,7 +242,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
ContextScope* execution_context_;
ExpressionResultScope* execution_result_;
- ZoneVector<BytecodeLabel> generator_resume_points_;
+ BytecodeJumpTable* generator_jump_table_;
Register generator_state_;
int loop_depth_;
};
diff --git a/deps/v8/src/interpreter/bytecode-jump-table.h b/deps/v8/src/interpreter/bytecode-jump-table.h
new file mode 100644
index 0000000000..b0a36cadbb
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-jump-table.h
@@ -0,0 +1,88 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_JUMP_TABLE_H_
+#define V8_INTERPRETER_BYTECODE_JUMP_TABLE_H_
+
+#include "src/bit-vector.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class ConstantArrayBuilder;
+
+// A jump table for a set of targets in a bytecode array. When an entry in the
+// table is bound, it represents a known position in the bytecode array. If no
+// entries match, the switch falls through.
+class V8_EXPORT_PRIVATE BytecodeJumpTable final : public ZoneObject {
+ public:
+ // Constructs a new BytecodeJumpTable starting at |constant_pool_index|, with
+ // the given |size|, where the case values of the table start at
+ // |case_value_base|.
+ BytecodeJumpTable(size_t constant_pool_index, int size, int case_value_base,
+ Zone* zone)
+ :
+#ifdef DEBUG
+ bound_(size, zone),
+#endif
+ constant_pool_index_(constant_pool_index),
+ switch_bytecode_offset_(kInvalidOffset),
+ size_(size),
+ case_value_base_(case_value_base) {
+ }
+
+ size_t constant_pool_index() const { return constant_pool_index_; }
+ size_t switch_bytecode_offset() const { return switch_bytecode_offset_; }
+ int case_value_base() const { return case_value_base_; }
+ int size() const { return size_; }
+#ifdef DEBUG
+ bool is_bound(int case_value) const {
+ DCHECK_GE(case_value, case_value_base_);
+ DCHECK_LT(case_value, case_value_base_ + size());
+ return bound_.Contains(case_value - case_value_base_);
+ }
+#endif
+
+ size_t ConstantPoolEntryFor(int case_value) {
+ DCHECK_GE(case_value, case_value_base_);
+ return constant_pool_index_ + case_value - case_value_base_;
+ }
+
+ private:
+ static const size_t kInvalidIndex = static_cast<size_t>(-1);
+ static const size_t kInvalidOffset = static_cast<size_t>(-1);
+
+ void mark_bound(int case_value) {
+#ifdef DEBUG
+ DCHECK_GE(case_value, case_value_base_);
+ DCHECK_LT(case_value, case_value_base_ + size());
+ bound_.Add(case_value - case_value_base_);
+#endif
+ }
+
+ void set_switch_bytecode_offset(size_t offset) {
+ DCHECK_EQ(switch_bytecode_offset_, kInvalidOffset);
+ switch_bytecode_offset_ = offset;
+ }
+
+#ifdef DEBUG
+ // This bit vector is only used for DCHECKS, so only store the field in debug
+ // builds.
+ BitVector bound_;
+#endif
+ size_t constant_pool_index_;
+ size_t switch_bytecode_offset_;
+ int size_;
+ int case_value_base_;
+
+ friend class BytecodeArrayWriter;
+};
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_JUMP_TABLE_H_
diff --git a/deps/v8/src/interpreter/bytecode-pipeline.cc b/deps/v8/src/interpreter/bytecode-node.cc
index 06accd75dc..2bcea0a16a 100644
--- a/deps/v8/src/interpreter/bytecode-pipeline.cc
+++ b/deps/v8/src/interpreter/bytecode-node.cc
@@ -1,8 +1,8 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
+// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/interpreter/bytecode-pipeline.h"
+#include "src/interpreter/bytecode-node.h"
#include <iomanip>
#include "src/source-position-table.h"
@@ -47,14 +47,6 @@ bool BytecodeNode::operator==(const BytecodeNode& other) const {
return true;
}
-std::ostream& operator<<(std::ostream& os, const BytecodeSourceInfo& info) {
- if (info.is_valid()) {
- char description = info.is_statement() ? 'S' : 'E';
- os << info.source_position() << ' ' << description << '>';
- }
- return os;
-}
-
std::ostream& operator<<(std::ostream& os, const BytecodeNode& node) {
node.Print(os);
return os;
diff --git a/deps/v8/src/interpreter/bytecode-pipeline.h b/deps/v8/src/interpreter/bytecode-node.h
index 23cad237dd..98e1577f45 100644
--- a/deps/v8/src/interpreter/bytecode-pipeline.h
+++ b/deps/v8/src/interpreter/bytecode-node.h
@@ -1,138 +1,22 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
+// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_INTERPRETER_BYTECODE_PIPELINE_H_
-#define V8_INTERPRETER_BYTECODE_PIPELINE_H_
+#ifndef V8_INTERPRETER_BYTECODE_NODE_H_
+#define V8_INTERPRETER_BYTECODE_NODE_H_
+
+#include <algorithm>
-#include "src/base/compiler-specific.h"
#include "src/globals.h"
-#include "src/interpreter/bytecode-register-allocator.h"
-#include "src/interpreter/bytecode-register.h"
+#include "src/interpreter/bytecode-source-info.h"
#include "src/interpreter/bytecodes.h"
-#include "src/objects.h"
-#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
namespace interpreter {
-class BytecodeLabel;
-class BytecodeNode;
-class BytecodeSourceInfo;
-
-// Interface for bytecode pipeline stages.
-class BytecodePipelineStage {
- public:
- virtual ~BytecodePipelineStage() {}
-
- // Write bytecode node |node| into pipeline. The node is only valid
- // for the duration of the call. Callee's should clone it if
- // deferring Write() to the next stage.
- virtual void Write(BytecodeNode* node) = 0;
-
- // Write jump bytecode node |node| which jumps to |label| into pipeline.
- // The node and label are only valid for the duration of the call. This call
- // implicitly ends the current basic block so should always write to the next
- // stage.
- virtual void WriteJump(BytecodeNode* node, BytecodeLabel* label) = 0;
-
- // Binds |label| to the current bytecode location. This call implicitly
- // ends the current basic block and so any deferred bytecodes should be
- // written to the next stage.
- virtual void BindLabel(BytecodeLabel* label) = 0;
-
- // Binds |label| to the location of |target|. This call implicitly
- // ends the current basic block and so any deferred bytecodes should be
- // written to the next stage.
- virtual void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) = 0;
-
- // Flush the pipeline and generate a bytecode array.
- virtual Handle<BytecodeArray> ToBytecodeArray(
- Isolate* isolate, int register_count, int parameter_count,
- Handle<FixedArray> handler_table) = 0;
-};
-
-// Source code position information.
-class BytecodeSourceInfo final {
- public:
- static const int kUninitializedPosition = -1;
-
- BytecodeSourceInfo()
- : position_type_(PositionType::kNone),
- source_position_(kUninitializedPosition) {}
-
- BytecodeSourceInfo(int source_position, bool is_statement)
- : position_type_(is_statement ? PositionType::kStatement
- : PositionType::kExpression),
- source_position_(source_position) {
- DCHECK_GE(source_position, 0);
- }
-
- // Makes instance into a statement position.
- void MakeStatementPosition(int source_position) {
- // Statement positions can be replaced by other statement
- // positions. For example , "for (x = 0; x < 3; ++x) 7;" has a
- // statement position associated with 7 but no bytecode associated
- // with it. Then Next is emitted after the body and has
- // statement position and overrides the existing one.
- position_type_ = PositionType::kStatement;
- source_position_ = source_position;
- }
-
- // Makes instance into an expression position. Instance should not
- // be a statement position otherwise it could be lost and impair the
- // debugging experience.
- void MakeExpressionPosition(int source_position) {
- DCHECK(!is_statement());
- position_type_ = PositionType::kExpression;
- source_position_ = source_position;
- }
-
- // Forces an instance into an expression position.
- void ForceExpressionPosition(int source_position) {
- position_type_ = PositionType::kExpression;
- source_position_ = source_position;
- }
-
- int source_position() const {
- DCHECK(is_valid());
- return source_position_;
- }
-
- bool is_statement() const {
- return position_type_ == PositionType::kStatement;
- }
- bool is_expression() const {
- return position_type_ == PositionType::kExpression;
- }
-
- bool is_valid() const { return position_type_ != PositionType::kNone; }
- void set_invalid() {
- position_type_ = PositionType::kNone;
- source_position_ = kUninitializedPosition;
- }
-
- bool operator==(const BytecodeSourceInfo& other) const {
- return position_type_ == other.position_type_ &&
- source_position_ == other.source_position_;
- }
-
- bool operator!=(const BytecodeSourceInfo& other) const {
- return position_type_ != other.position_type_ ||
- source_position_ != other.source_position_;
- }
-
- private:
- enum class PositionType : uint8_t { kNone, kExpression, kStatement };
-
- PositionType position_type_;
- int source_position_;
-};
-
// A container for a generated bytecode, it's operands, and source information.
-// These must be allocated by a BytecodeNodeAllocator instance.
-class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
+class V8_EXPORT_PRIVATE BytecodeNode final {
public:
INLINE(BytecodeNode(Bytecode bytecode,
BytecodeSourceInfo source_info = BytecodeSourceInfo()))
@@ -215,39 +99,9 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
BYTECODE_LIST(DEFINE_BYTECODE_NODE_CREATOR)
#undef DEFINE_BYTECODE_NODE_CREATOR
- // Replace the bytecode of this node with |bytecode| and keep the operands.
- void replace_bytecode(Bytecode bytecode) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(bytecode_),
- Bytecodes::NumberOfOperands(bytecode));
- bytecode_ = bytecode;
- }
-
- void update_operand0(uint32_t operand0) { SetOperand(0, operand0); }
-
// Print to stream |os|.
void Print(std::ostream& os) const;
- // Transform to a node representing |new_bytecode| which has one
- // operand more than the current bytecode.
- void Transform(Bytecode new_bytecode, uint32_t extra_operand) {
- DCHECK_EQ(Bytecodes::NumberOfOperands(new_bytecode),
- Bytecodes::NumberOfOperands(bytecode()) + 1);
- DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 1 ||
- Bytecodes::GetOperandType(new_bytecode, 0) ==
- Bytecodes::GetOperandType(bytecode(), 0));
- DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 2 ||
- Bytecodes::GetOperandType(new_bytecode, 1) ==
- Bytecodes::GetOperandType(bytecode(), 1));
- DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 3 ||
- Bytecodes::GetOperandType(new_bytecode, 2) ==
- Bytecodes::GetOperandType(bytecode(), 2));
- DCHECK(Bytecodes::NumberOfOperands(bytecode()) < 4);
-
- bytecode_ = new_bytecode;
- operand_count_++;
- SetOperand(operand_count() - 1, extra_operand);
- }
-
Bytecode bytecode() const { return bytecode_; }
uint32_t operand(int i) const {
@@ -256,6 +110,8 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
}
const uint32_t* operands() const { return operands_; }
+ void update_operand0(uint32_t operand0) { SetOperand(0, operand0); }
+
int operand_count() const { return operand_count_; }
OperandScale operand_scale() const { return operand_scale_; }
@@ -410,12 +266,10 @@ class V8_EXPORT_PRIVATE BytecodeNode final : NON_EXPORTED_BASE(ZoneObject) {
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
- const BytecodeSourceInfo& info);
-V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
const BytecodeNode& node);
} // namespace interpreter
} // namespace internal
} // namespace v8
-#endif // V8_INTERPRETER_BYTECODE_PIPELINE_H_
+#endif // V8_INTERPRETER_BYTECODE_NODE_H_
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.cc b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
index 583d99c227..859f0e1828 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.cc
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.cc
@@ -207,14 +207,10 @@ BytecodeRegisterOptimizer::BytecodeRegisterOptimizer(
// Calculate offset so register index values can be mapped into
// a vector of register metadata.
- if (parameter_count != 0) {
- register_info_table_offset_ =
- -Register::FromParameterIndex(0, parameter_count).index();
- } else {
- // TODO(oth): This path shouldn't be necessary in bytecode generated
- // from Javascript, but a set of tests do not include the JS receiver.
- register_info_table_offset_ = -accumulator_.index();
- }
+ // There is at least one parameter, which is the JS receiver.
+ DCHECK(parameter_count != 0);
+ register_info_table_offset_ =
+ -Register::FromParameterIndex(0, parameter_count).index();
// Initialize register map for parameters, locals, and the
// accumulator.
@@ -322,6 +318,15 @@ void BytecodeRegisterOptimizer::AddToEquivalenceSet(
void BytecodeRegisterOptimizer::RegisterTransfer(RegisterInfo* input_info,
RegisterInfo* output_info) {
+ bool output_is_observable =
+ RegisterIsObservable(output_info->register_value());
+ bool in_same_equivalence_set =
+ output_info->IsInSameEquivalenceSet(input_info);
+ if (in_same_equivalence_set &&
+ (!output_is_observable || output_info->materialized())) {
+ return; // Nothing more to do.
+ }
+
// Materialize an alternate in the equivalence set that
// |output_info| is leaving.
if (output_info->materialized()) {
@@ -329,12 +334,10 @@ void BytecodeRegisterOptimizer::RegisterTransfer(RegisterInfo* input_info,
}
// Add |output_info| to new equivalence set.
- if (!output_info->IsInSameEquivalenceSet(input_info)) {
+ if (!in_same_equivalence_set) {
AddToEquivalenceSet(input_info, output_info);
}
- bool output_is_observable =
- RegisterIsObservable(output_info->register_value());
if (output_is_observable) {
// Force store to be emitted when register is observable.
output_info->set_materialized(false);
diff --git a/deps/v8/src/interpreter/bytecode-register-optimizer.h b/deps/v8/src/interpreter/bytecode-register-optimizer.h
index 0e379a2599..494abb6c96 100644
--- a/deps/v8/src/interpreter/bytecode-register-optimizer.h
+++ b/deps/v8/src/interpreter/bytecode-register-optimizer.h
@@ -64,14 +64,18 @@ class V8_EXPORT_PRIVATE BytecodeRegisterOptimizer final
// Prepares for |bytecode|.
template <Bytecode bytecode, AccumulatorUse accumulator_use>
INLINE(void PrepareForBytecode()) {
- if (Bytecodes::IsJump(bytecode) || bytecode == Bytecode::kDebugger ||
- bytecode == Bytecode::kSuspendGenerator) {
+ if (Bytecodes::IsJump(bytecode) || Bytecodes::IsSwitch(bytecode) ||
+ bytecode == Bytecode::kDebugger ||
+ bytecode == Bytecode::kSuspendGenerator ||
+ bytecode == Bytecode::kResumeGenerator) {
// All state must be flushed before emitting
// - a jump bytecode (as the register equivalents at the jump target
- // aren't
- // known.
+ // aren't known)
+ // - a switch bytecode (as the register equivalents at the switch targets
+ // aren't known)
// - a call to the debugger (as it can manipulate locals and parameters),
// - a generator suspend (as this involves saving all registers).
+ // - a generator resume (as this involves restoring all registers).
Flush();
}
diff --git a/deps/v8/src/interpreter/bytecode-source-info.cc b/deps/v8/src/interpreter/bytecode-source-info.cc
new file mode 100644
index 0000000000..ed05b3e2e7
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-source-info.cc
@@ -0,0 +1,24 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/interpreter/bytecode-source-info.h"
+
+#include <iomanip>
+#include "src/source-position-table.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+std::ostream& operator<<(std::ostream& os, const BytecodeSourceInfo& info) {
+ if (info.is_valid()) {
+ char description = info.is_statement() ? 'S' : 'E';
+ os << info.source_position() << ' ' << description << '>';
+ }
+ return os;
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/interpreter/bytecode-source-info.h b/deps/v8/src/interpreter/bytecode-source-info.h
new file mode 100644
index 0000000000..790a6b2aa2
--- /dev/null
+++ b/deps/v8/src/interpreter/bytecode-source-info.h
@@ -0,0 +1,98 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTERPRETER_BYTECODE_SOURCE_INFO_H_
+#define V8_INTERPRETER_BYTECODE_SOURCE_INFO_H_
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+// Source code position information.
+class BytecodeSourceInfo final {
+ public:
+ static const int kUninitializedPosition = -1;
+
+ BytecodeSourceInfo()
+ : position_type_(PositionType::kNone),
+ source_position_(kUninitializedPosition) {}
+
+ BytecodeSourceInfo(int source_position, bool is_statement)
+ : position_type_(is_statement ? PositionType::kStatement
+ : PositionType::kExpression),
+ source_position_(source_position) {
+ DCHECK_GE(source_position, 0);
+ }
+
+ // Makes instance into a statement position.
+ void MakeStatementPosition(int source_position) {
+ // Statement positions can be replaced by other statement
+ // positions. For example , "for (x = 0; x < 3; ++x) 7;" has a
+ // statement position associated with 7 but no bytecode associated
+ // with it. Then Next is emitted after the body and has
+ // statement position and overrides the existing one.
+ position_type_ = PositionType::kStatement;
+ source_position_ = source_position;
+ }
+
+ // Makes instance into an expression position. Instance should not
+ // be a statement position otherwise it could be lost and impair the
+ // debugging experience.
+ void MakeExpressionPosition(int source_position) {
+ DCHECK(!is_statement());
+ position_type_ = PositionType::kExpression;
+ source_position_ = source_position;
+ }
+
+ // Forces an instance into an expression position.
+ void ForceExpressionPosition(int source_position) {
+ position_type_ = PositionType::kExpression;
+ source_position_ = source_position;
+ }
+
+ int source_position() const {
+ DCHECK(is_valid());
+ return source_position_;
+ }
+
+ bool is_statement() const {
+ return position_type_ == PositionType::kStatement;
+ }
+ bool is_expression() const {
+ return position_type_ == PositionType::kExpression;
+ }
+
+ bool is_valid() const { return position_type_ != PositionType::kNone; }
+ void set_invalid() {
+ position_type_ = PositionType::kNone;
+ source_position_ = kUninitializedPosition;
+ }
+
+ bool operator==(const BytecodeSourceInfo& other) const {
+ return position_type_ == other.position_type_ &&
+ source_position_ == other.source_position_;
+ }
+
+ bool operator!=(const BytecodeSourceInfo& other) const {
+ return position_type_ != other.position_type_ ||
+ source_position_ != other.source_position_;
+ }
+
+ private:
+ enum class PositionType : uint8_t { kNone, kExpression, kStatement };
+
+ PositionType position_type_;
+ int source_position_;
+};
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const BytecodeSourceInfo& info);
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTERPRETER_BYTECODE_SOURCE_INFO_H_
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index baf9e88963..83417fe879 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -284,6 +284,10 @@ namespace interpreter {
V(JumpIfJSReceiver, AccumulatorUse::kRead, OperandType::kUImm) \
V(JumpIfNotHole, AccumulatorUse::kRead, OperandType::kUImm) \
\
+ /* Smi-table lookup for switch statements */ \
+ V(SwitchOnSmiNoFeedback, AccumulatorUse::kRead, OperandType::kIdx, \
+ OperandType::kUImm, OperandType::kImm) \
+ \
/* Complex flow control For..in */ \
V(ForInPrepare, AccumulatorUse::kNone, OperandType::kReg, \
OperandType::kRegOutTriple) \
@@ -611,13 +615,18 @@ class V8_EXPORT_PRIVATE Bytecodes final {
return IsJump(bytecode) && !IsJumpIfToBoolean(bytecode);
}
+ // Returns true if the bytecode is a switch.
+ static constexpr bool IsSwitch(Bytecode bytecode) {
+ return bytecode == Bytecode::kSwitchOnSmiNoFeedback;
+ }
+
// Returns true if |bytecode| has no effects. These bytecodes only manipulate
// interpreter frame state and will never throw.
static constexpr bool IsWithoutExternalSideEffects(Bytecode bytecode) {
return (IsAccumulatorLoadWithoutEffects(bytecode) ||
IsRegisterLoadWithoutEffects(bytecode) ||
IsCompareWithoutEffects(bytecode) || bytecode == Bytecode::kNop ||
- IsJumpWithoutEffects(bytecode));
+ IsJumpWithoutEffects(bytecode) || IsSwitch(bytecode));
}
// Returns true if the bytecode is Ldar or Star.
@@ -640,7 +649,6 @@ class V8_EXPORT_PRIVATE Bytecodes final {
bytecode == Bytecode::kConstruct ||
bytecode == Bytecode::kCallWithSpread ||
bytecode == Bytecode::kConstructWithSpread ||
- bytecode == Bytecode::kInvokeIntrinsic ||
bytecode == Bytecode::kCallJSRuntime;
}
@@ -752,7 +760,8 @@ class V8_EXPORT_PRIVATE Bytecodes final {
// Returns the receiver mode of the given call bytecode.
static ConvertReceiverMode GetReceiverMode(Bytecode bytecode) {
- DCHECK(IsCallOrConstruct(bytecode));
+ DCHECK(IsCallOrConstruct(bytecode) ||
+ bytecode == Bytecode::kInvokeIntrinsic);
switch (bytecode) {
case Bytecode::kCallProperty:
case Bytecode::kCallProperty0:
diff --git a/deps/v8/src/interpreter/constant-array-builder.cc b/deps/v8/src/interpreter/constant-array-builder.cc
index 74d887a61a..f7e68f876e 100644
--- a/deps/v8/src/interpreter/constant-array-builder.cc
+++ b/deps/v8/src/interpreter/constant-array-builder.cc
@@ -38,11 +38,13 @@ void ConstantArrayBuilder::ConstantArraySlice::Unreserve() {
}
size_t ConstantArrayBuilder::ConstantArraySlice::Allocate(
- ConstantArrayBuilder::Entry entry) {
- DCHECK_GT(available(), 0u);
+ ConstantArrayBuilder::Entry entry, size_t count) {
+ DCHECK_GE(available(), count);
size_t index = constants_.size();
DCHECK_LT(index, capacity());
- constants_.push_back(entry);
+ for (size_t i = 0; i < count; ++i) {
+ constants_.push_back(entry);
+ }
return index + start_index();
}
@@ -65,7 +67,12 @@ void ConstantArrayBuilder::ConstantArraySlice::CheckAllElementsAreUnique(
Isolate* isolate) const {
std::set<Object*> elements;
for (const Entry& entry : constants_) {
+ // TODO(leszeks): Ignore jump tables because they have to be contiguous,
+ // so they can contain duplicates.
+ if (entry.IsJumpTableEntry()) continue;
+
Handle<Object> handle = entry.ToHandle(isolate);
+
if (elements.find(*handle) != elements.end()) {
std::ostringstream os;
os << "Duplicate constant found: " << Brief(*handle) << std::endl;
@@ -220,9 +227,14 @@ SINGLETON_CONSTANT_ENTRY_TYPES(INSERT_ENTRY)
ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateIndex(
ConstantArrayBuilder::Entry entry) {
+ return AllocateIndexArray(entry, 1);
+}
+
+ConstantArrayBuilder::index_t ConstantArrayBuilder::AllocateIndexArray(
+ ConstantArrayBuilder::Entry entry, size_t count) {
for (size_t i = 0; i < arraysize(idx_slice_); ++i) {
- if (idx_slice_[i]->available() > 0) {
- return static_cast<index_t>(idx_slice_[i]->Allocate(entry));
+ if (idx_slice_[i]->available() >= count) {
+ return static_cast<index_t>(idx_slice_[i]->Allocate(entry, count));
}
}
UNREACHABLE();
@@ -254,11 +266,24 @@ size_t ConstantArrayBuilder::InsertDeferred() {
return AllocateIndex(Entry::Deferred());
}
+size_t ConstantArrayBuilder::InsertJumpTable(size_t size) {
+ return AllocateIndexArray(Entry::UninitializedJumpTableSmi(), size);
+}
+
void ConstantArrayBuilder::SetDeferredAt(size_t index, Handle<Object> object) {
ConstantArraySlice* slice = IndexToSlice(index);
return slice->At(index).SetDeferred(object);
}
+void ConstantArrayBuilder::SetJumpTableSmi(size_t index, Smi* smi) {
+ ConstantArraySlice* slice = IndexToSlice(index);
+ // Allow others to reuse these Smis, but insert using emplace to avoid
+ // overwriting existing values in the Smi map (which may have a smaller
+ // operand size).
+ smi_map_.emplace(smi, static_cast<index_t>(index));
+ return slice->At(index).SetJumpTableSmi(smi);
+}
+
OperandSize ConstantArrayBuilder::CreateReservedEntry() {
for (size_t i = 0; i < arraysize(idx_slice_); ++i) {
if (idx_slice_[i]->available() > 0) {
@@ -311,7 +336,11 @@ Handle<Object> ConstantArrayBuilder::Entry::ToHandle(Isolate* isolate) const {
case Tag::kHandle:
return handle_;
case Tag::kSmi:
+ case Tag::kJumpTableSmi:
return handle(smi_, isolate);
+ case Tag::kUninitializedJumpTableSmi:
+ // TODO(leszeks): There's probably a better value we could use here.
+ return isolate->factory()->the_hole_value();
case Tag::kRawString:
return raw_string_->string();
case Tag::kHeapNumber:
diff --git a/deps/v8/src/interpreter/constant-array-builder.h b/deps/v8/src/interpreter/constant-array-builder.h
index 86e7c0818b..a50aa3519c 100644
--- a/deps/v8/src/interpreter/constant-array-builder.h
+++ b/deps/v8/src/interpreter/constant-array-builder.h
@@ -70,9 +70,18 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
// SetDeferredAt().
size_t InsertDeferred();
+ // Inserts |size| consecutive empty entries and returns the array index
+ // associated with the first reservation. Each entry's Smi value can be
+ // inserted by calling SetJumpTableSmi().
+ size_t InsertJumpTable(size_t size);
+
// Sets the deferred value at |index| to |object|.
void SetDeferredAt(size_t index, Handle<Object> object);
+ // Sets the jump table entry at |index| to |smi|. Note that |index| is the
+ // constant pool index, not the switch case value.
+ void SetJumpTableSmi(size_t index, Smi* smi);
+
// Creates a reserved entry in the constant pool and returns
// the size of the operand that'll be required to hold the entry
// when committed.
@@ -107,14 +116,29 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
static Entry Deferred() { return Entry(Tag::kDeferred); }
+ static Entry UninitializedJumpTableSmi() {
+ return Entry(Tag::kUninitializedJumpTableSmi);
+ }
+
bool IsDeferred() const { return tag_ == Tag::kDeferred; }
+ bool IsJumpTableEntry() const {
+ return tag_ == Tag::kUninitializedJumpTableSmi ||
+ tag_ == Tag::kJumpTableSmi;
+ }
+
void SetDeferred(Handle<Object> handle) {
DCHECK(tag_ == Tag::kDeferred);
tag_ = Tag::kHandle;
handle_ = handle;
}
+ void SetJumpTableSmi(Smi* smi) {
+ DCHECK(tag_ == Tag::kUninitializedJumpTableSmi);
+ tag_ = Tag::kJumpTableSmi;
+ smi_ = smi;
+ }
+
Handle<Object> ToHandle(Isolate* isolate) const;
private:
@@ -135,6 +159,8 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
kRawString,
kHeapNumber,
kScope,
+ kUninitializedJumpTableSmi,
+ kJumpTableSmi,
#define ENTRY_TAG(NAME, ...) k##NAME,
SINGLETON_CONSTANT_ENTRY_TYPES(ENTRY_TAG)
#undef ENTRY_TAG
@@ -142,6 +168,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
};
index_t AllocateIndex(Entry constant_entry);
+ index_t AllocateIndexArray(Entry constant_entry, size_t size);
index_t AllocateReservedEntry(Smi* value);
struct ConstantArraySlice final : public ZoneObject {
@@ -149,7 +176,7 @@ class V8_EXPORT_PRIVATE ConstantArrayBuilder final BASE_EMBEDDED {
OperandSize operand_size);
void Reserve();
void Unreserve();
- size_t Allocate(Entry entry);
+ size_t Allocate(Entry entry, size_t count = 1);
Entry& At(size_t index);
const Entry& At(size_t index) const;
diff --git a/deps/v8/src/interpreter/control-flow-builders.cc b/deps/v8/src/interpreter/control-flow-builders.cc
index 81041e6a3d..e4281667c2 100644
--- a/deps/v8/src/interpreter/control-flow-builders.cc
+++ b/deps/v8/src/interpreter/control-flow-builders.cc
@@ -47,21 +47,40 @@ void BlockBuilder::EndBlock() {
LoopBuilder::~LoopBuilder() {
DCHECK(continue_labels_.empty() || continue_labels_.is_bound());
- DCHECK(header_labels_.empty() || header_labels_.is_bound());
+ BindBreakTarget();
+ // Restore the parent jump table.
+ if (generator_jump_table_location_ != nullptr) {
+ *generator_jump_table_location_ = parent_generator_jump_table_;
+ }
}
-void LoopBuilder::LoopHeader(ZoneVector<BytecodeLabel>* additional_labels) {
+void LoopBuilder::LoopHeader() {
// Jumps from before the loop header into the loop violate ordering
// requirements of bytecode basic blocks. The only entry into a loop
// must be the loop header. Surely breaks is okay? Not if nested
// and misplaced between the headers.
DCHECK(break_labels_.empty() && continue_labels_.empty());
builder()->Bind(&loop_header_);
- if (additional_labels != nullptr) {
- for (auto& label : *additional_labels) {
- builder()->Bind(&label);
- }
+}
+
+void LoopBuilder::LoopHeaderInGenerator(
+ BytecodeJumpTable** generator_jump_table, int first_resume_id,
+ int resume_count) {
+ // Bind all the resume points that are inside the loop to be at the loop
+ // header.
+ for (int id = first_resume_id; id < first_resume_id + resume_count; ++id) {
+ builder()->Bind(*generator_jump_table, id);
}
+
+ // Create the loop header.
+ LoopHeader();
+
+ // Create a new jump table for after the loop header for only these
+ // resume points.
+ generator_jump_table_location_ = generator_jump_table;
+ parent_generator_jump_table_ = *generator_jump_table;
+ *generator_jump_table =
+ builder()->AllocateJumpTable(resume_count, first_resume_id);
}
void LoopBuilder::JumpToHeader(int loop_depth) {
@@ -74,11 +93,6 @@ void LoopBuilder::JumpToHeader(int loop_depth) {
builder()->JumpLoop(&loop_header_, level);
}
-void LoopBuilder::EndLoop() {
- BindBreakTarget();
- header_labels_.BindToLabel(builder(), loop_header_);
-}
-
void LoopBuilder::BindContinueTarget() { continue_labels_.Bind(builder()); }
SwitchBuilder::~SwitchBuilder() {
diff --git a/deps/v8/src/interpreter/control-flow-builders.h b/deps/v8/src/interpreter/control-flow-builders.h
index 313c9aa536..8cff017e78 100644
--- a/deps/v8/src/interpreter/control-flow-builders.h
+++ b/deps/v8/src/interpreter/control-flow-builders.h
@@ -90,13 +90,15 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
explicit LoopBuilder(BytecodeArrayBuilder* builder)
: BreakableControlFlowBuilder(builder),
continue_labels_(builder->zone()),
- header_labels_(builder->zone()) {}
+ generator_jump_table_location_(nullptr),
+ parent_generator_jump_table_(nullptr) {}
~LoopBuilder();
- void LoopHeader(ZoneVector<BytecodeLabel>* additional_labels = nullptr);
+ void LoopHeader();
+ void LoopHeaderInGenerator(BytecodeJumpTable** parent_generator_jump_table,
+ int first_resume_id, int resume_count);
void JumpToHeader(int loop_depth);
void BindContinueTarget();
- void EndLoop();
// This method is called when visiting continue statements in the AST.
// Inserts a jump to an unbound label that is patched when BindContinueTarget
@@ -111,7 +113,13 @@ class V8_EXPORT_PRIVATE LoopBuilder final : public BreakableControlFlowBuilder {
// Unbound labels that identify jumps for continue statements in the code and
// jumps from checking the loop condition to the header for do-while loops.
BytecodeLabels continue_labels_;
- BytecodeLabels header_labels_;
+
+ // While we're in the loop, we want to have a different jump table for
+ // generator switch statements. We restore it at the end of the loop.
+ // TODO(leszeks): Storing a pointer to the BytecodeGenerator's jump table
+ // field is ugly, figure out a better way to do this.
+ BytecodeJumpTable** generator_jump_table_location_;
+ BytecodeJumpTable* parent_generator_jump_table_;
};
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index b65c7c7501..070c89549b 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -49,9 +49,9 @@ InterpreterAssembler::InterpreterAssembler(CodeAssemblerState* state,
dispatch_table_.Bind(
Parameter(InterpreterDispatchDescriptor::kDispatchTable));
- if (FLAG_trace_ignition) {
- TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
- }
+#ifdef V8_TRACE_IGNITION
+ TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
+#endif
RegisterCallGenerationCallbacks([this] { CallPrologue(); },
[this] { CallEpilogue(); });
@@ -119,7 +119,7 @@ Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
Branch(Word32Equal(depth, Int32Constant(0)), &context_found, &context_search);
// Loop until the depth is 0.
- Bind(&context_search);
+ BIND(&context_search);
{
cur_depth.Bind(Int32Sub(cur_depth.value(), Int32Constant(1)));
cur_context.Bind(
@@ -129,7 +129,7 @@ Node* InterpreterAssembler::GetContextAtDepth(Node* context, Node* depth) {
&context_search);
}
- Bind(&context_found);
+ BIND(&context_found);
return cur_context.value();
}
@@ -147,7 +147,7 @@ void InterpreterAssembler::GotoIfHasContextExtensionUpToDepth(Node* context,
// Loop until the depth is 0.
Goto(&context_search);
- Bind(&context_search);
+ BIND(&context_search);
{
// TODO(leszeks): We only need to do this check if the context had a sloppy
// eval, we could pass in a context chain bitmask to figure out which
@@ -204,7 +204,7 @@ Node* InterpreterAssembler::RegisterLocation(Node* reg_index) {
}
Node* InterpreterAssembler::RegisterFrameOffset(Node* index) {
- return WordShl(index, kPointerSizeLog2);
+ return TimesPointerSize(index);
}
Node* InterpreterAssembler::LoadRegister(Register reg) {
@@ -598,7 +598,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(
Node* is_smi = TaggedIsSmi(function);
Branch(is_smi, &extra_checks, &call_function);
- Bind(&call_function);
+ BIND(&call_function);
{
// Increment the call count.
IncrementCallCount(feedback_vector, slot_id);
@@ -614,7 +614,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(
Goto(&end);
}
- Bind(&extra_checks);
+ BIND(&extra_checks);
{
Label check_initialized(this), mark_megamorphic(this),
create_allocation_site(this);
@@ -658,7 +658,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(
Goto(&mark_megamorphic);
}
- Bind(&check_initialized);
+ BIND(&check_initialized);
{
Comment("check if uninitialized");
// Check if it is uninitialized target first.
@@ -698,7 +698,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(
Goto(&call_function);
}
- Bind(&create_allocation_site);
+ BIND(&create_allocation_site);
{
CreateAllocationSiteInFeedbackVector(feedback_vector, SmiTag(slot_id));
@@ -708,7 +708,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(
Goto(&call_function);
}
- Bind(&mark_megamorphic);
+ BIND(&mark_megamorphic);
{
// Mark it as a megamorphic.
// MegamorphicSentinel is created as a part of Heap::InitialObjects
@@ -722,7 +722,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(
}
}
- Bind(&call);
+ BIND(&call);
{
Comment("Increment call count and call using Call builtin");
// Increment the call count.
@@ -739,7 +739,7 @@ Node* InterpreterAssembler::CallJSWithFeedback(
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
return return_value.value();
}
@@ -748,7 +748,8 @@ Node* InterpreterAssembler::CallJS(Node* function, Node* context,
ConvertReceiverMode receiver_mode,
TailCallMode tail_call_mode) {
DCHECK(Bytecodes::MakesCallAlongCriticalPath(bytecode_));
- DCHECK(Bytecodes::IsCallOrConstruct(bytecode_));
+ DCHECK(Bytecodes::IsCallOrConstruct(bytecode_) ||
+ bytecode_ == Bytecode::kInvokeIntrinsic);
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
Callable callable = CodeFactory::InterpreterPushArgsThenCall(
isolate(), receiver_mode, tail_call_mode,
@@ -804,7 +805,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
allocation_feedback.Bind(UndefinedConstant());
Branch(is_monomorphic, &call_construct_function, &extra_checks);
- Bind(&call_construct_function);
+ BIND(&call_construct_function);
{
Comment("call using ConstructFunction");
IncrementCallCount(feedback_vector, slot_id);
@@ -817,7 +818,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
Goto(&end);
}
- Bind(&extra_checks);
+ BIND(&extra_checks);
{
Label check_allocation_site(this), check_initialized(this),
initialize(this), mark_megamorphic(this);
@@ -840,7 +841,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
Node* is_smi = TaggedIsSmi(feedback_value);
Branch(is_smi, &initialize, &mark_megamorphic);
- Bind(&check_allocation_site);
+ BIND(&check_allocation_site);
{
Comment("check if it is an allocation site");
Node* is_allocation_site =
@@ -858,7 +859,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
Goto(&call_construct_function);
}
- Bind(&check_initialized);
+ BIND(&check_initialized);
{
// Check if it is uninitialized.
Comment("check if uninitialized");
@@ -867,7 +868,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
Branch(is_uninitialized, &initialize, &mark_megamorphic);
}
- Bind(&initialize);
+ BIND(&initialize);
{
Label create_allocation_site(this), create_weak_cell(this);
Comment("initialize the feedback element");
@@ -878,7 +879,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
Node* is_array_function = WordEqual(context_slot, constructor);
Branch(is_array_function, &create_allocation_site, &create_weak_cell);
- Bind(&create_allocation_site);
+ BIND(&create_allocation_site);
{
Node* site = CreateAllocationSiteInFeedbackVector(feedback_vector,
SmiTag(slot_id));
@@ -886,7 +887,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
Goto(&call_construct_function);
}
- Bind(&create_weak_cell);
+ BIND(&create_weak_cell);
{
CreateWeakCellInFeedbackVector(feedback_vector, SmiTag(slot_id),
constructor);
@@ -894,7 +895,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
}
}
- Bind(&mark_megamorphic);
+ BIND(&mark_megamorphic);
{
// MegamorphicSentinel is an immortal immovable object so
// write-barrier is not needed.
@@ -908,7 +909,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
}
}
- Bind(&call_construct);
+ BIND(&call_construct);
{
Comment("call using Construct builtin");
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
@@ -920,7 +921,7 @@ Node* InterpreterAssembler::Construct(Node* constructor, Node* context,
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
return return_value.value();
}
@@ -990,7 +991,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
Branch(condition, &ok, &interrupt_check);
// Perform interrupt and reset budget.
- Bind(&interrupt_check);
+ BIND(&interrupt_check);
{
CallRuntime(Runtime::kInterrupt, GetContext());
new_budget.Bind(Int32Constant(Interpreter::InterruptBudget()));
@@ -998,7 +999,7 @@ void InterpreterAssembler::UpdateInterruptBudget(Node* weight, bool backward) {
}
// Update budget.
- Bind(&ok);
+ BIND(&ok);
StoreNoWriteBarrier(MachineRepresentation::kWord32,
BytecodeArrayTaggedPointer(), budget_offset,
new_budget.value());
@@ -1011,9 +1012,9 @@ Node* InterpreterAssembler::Advance(int delta) {
}
Node* InterpreterAssembler::Advance(Node* delta, bool backward) {
- if (FLAG_trace_ignition) {
- TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
- }
+#ifdef V8_TRACE_IGNITION
+ TraceBytecode(Runtime::kInterpreterTraceBytecodeExit);
+#endif
Node* next_offset = backward ? IntPtrSub(BytecodeOffset(), delta)
: IntPtrAdd(BytecodeOffset(), delta);
bytecode_offset_.Bind(next_offset);
@@ -1039,9 +1040,9 @@ void InterpreterAssembler::JumpConditional(Node* condition, Node* delta) {
Label match(this), no_match(this);
Branch(condition, &match, &no_match);
- Bind(&match);
+ BIND(&match);
Jump(delta);
- Bind(&no_match);
+ BIND(&no_match);
Dispatch();
}
@@ -1070,13 +1071,13 @@ Node* InterpreterAssembler::StarDispatchLookahead(Node* target_bytecode) {
Node* is_star = WordEqual(target_bytecode, star_bytecode);
Branch(is_star, &do_inline_star, &done);
- Bind(&do_inline_star);
+ BIND(&do_inline_star);
{
InlineStar();
var_bytecode.Bind(LoadBytecode(BytecodeOffset()));
Goto(&done);
}
- Bind(&done);
+ BIND(&done);
return var_bytecode.value();
}
@@ -1087,9 +1088,9 @@ void InterpreterAssembler::InlineStar() {
bytecode_ = Bytecode::kStar;
accumulator_use_ = AccumulatorUse::kNone;
- if (FLAG_trace_ignition) {
- TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
- }
+#ifdef V8_TRACE_IGNITION
+ TraceBytecode(Runtime::kInterpreterTraceBytecodeEntry);
+#endif
StoreRegister(GetAccumulator(), BytecodeOperandReg(0));
DCHECK_EQ(accumulator_use_, Bytecodes::GetAccumulatorUse(bytecode_));
@@ -1119,7 +1120,7 @@ Node* InterpreterAssembler::DispatchToBytecode(Node* target_bytecode,
Node* target_code_entry =
Load(MachineType::Pointer(), DispatchTableRawPointer(),
- WordShl(target_bytecode, IntPtrConstant(kPointerSizeLog2)));
+ TimesPointerSize(target_bytecode));
return DispatchToBytecodeHandlerEntry(target_code_entry, new_bytecode_offset);
}
@@ -1172,7 +1173,7 @@ void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
Node* target_index = IntPtrAdd(base_index, next_bytecode);
Node* target_code_entry =
Load(MachineType::Pointer(), DispatchTableRawPointer(),
- WordShl(target_index, kPointerSizeLog2));
+ TimesPointerSize(target_index));
DispatchToBytecodeHandlerEntry(target_code_entry, next_bytecode_offset);
}
@@ -1187,7 +1188,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
var_value.Bind(value);
var_type_feedback->Bind(SmiConstant(BinaryOperationFeedback::kNone));
Goto(&loop);
- Bind(&loop);
+ BIND(&loop);
{
// Load the current {value}.
value = var_value.value();
@@ -1196,7 +1197,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
Label if_valueissmi(this), if_valueisnotsmi(this);
Branch(TaggedIsSmi(value), &if_valueissmi, &if_valueisnotsmi);
- Bind(&if_valueissmi);
+ BIND(&if_valueissmi);
{
// Convert the Smi {value}.
var_result.Bind(SmiToWord32(value));
@@ -1206,7 +1207,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
Goto(&done_loop);
}
- Bind(&if_valueisnotsmi);
+ BIND(&if_valueisnotsmi);
{
// Check if {value} is a HeapNumber.
Label if_valueisheapnumber(this),
@@ -1215,7 +1216,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
Branch(IsHeapNumberMap(value_map), &if_valueisheapnumber,
&if_valueisnotheapnumber);
- Bind(&if_valueisheapnumber);
+ BIND(&if_valueisheapnumber);
{
// Truncate the floating point value.
var_result.Bind(TruncateHeapNumberValueToWord32(value));
@@ -1225,7 +1226,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
Goto(&done_loop);
}
- Bind(&if_valueisnotheapnumber);
+ BIND(&if_valueisnotheapnumber);
{
// We do not require an Or with earlier feedback here because once we
// convert the value to a number, we cannot reach this path. We can
@@ -1239,7 +1240,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
Int32Constant(ODDBALL_TYPE));
Branch(is_oddball, &if_valueisoddball, &if_valueisnotoddball);
- Bind(&if_valueisoddball);
+ BIND(&if_valueisoddball);
{
// Convert Oddball to a Number and perform checks again.
var_value.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
@@ -1248,7 +1249,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
Goto(&loop);
}
- Bind(&if_valueisnotoddball);
+ BIND(&if_valueisnotoddball);
{
// Convert the {value} to a Number first.
Callable callable = CodeFactory::NonNumberToNumber(isolate());
@@ -1259,7 +1260,7 @@ Node* InterpreterAssembler::TruncateTaggedToWord32WithFeedback(
}
}
}
- Bind(&done_loop);
+ BIND(&done_loop);
return var_result.value();
}
@@ -1314,11 +1315,11 @@ void InterpreterAssembler::AbortIfWordNotEqual(Node* lhs, Node* rhs,
Label ok(this), abort(this, Label::kDeferred);
Branch(WordEqual(lhs, rhs), &ok, &abort);
- Bind(&abort);
+ BIND(&abort);
Abort(bailout_reason);
Goto(&ok);
- Bind(&ok);
+ BIND(&ok);
}
void InterpreterAssembler::MaybeDropFrames(Node* context) {
@@ -1331,14 +1332,14 @@ void InterpreterAssembler::MaybeDropFrames(Node* context) {
Label ok(this), drop_frames(this);
Branch(IntPtrEqual(restart_fp, null), &ok, &drop_frames);
- Bind(&drop_frames);
+ BIND(&drop_frames);
// We don't expect this call to return since the frame dropper tears down
// the stack and jumps into the function on the target frame to restart it.
CallStub(CodeFactory::FrameDropperTrampoline(isolate()), context, restart_fp);
Abort(kUnexpectedReturnFromFrameDropper);
Goto(&ok);
- Bind(&ok);
+ BIND(&ok);
}
void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
@@ -1353,8 +1354,7 @@ void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
static_cast<int>(bytecode_) * (static_cast<int>(Bytecode::kLast) + 1));
Node* counter_offset =
- WordShl(IntPtrAdd(source_bytecode_table_index, target_bytecode),
- IntPtrConstant(kPointerSizeLog2));
+ TimesPointerSize(IntPtrAdd(source_bytecode_table_index, target_bytecode));
Node* old_counter =
Load(MachineType::IntPtr(), counters_table, counter_offset);
@@ -1364,7 +1364,7 @@ void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
old_counter, IntPtrConstant(std::numeric_limits<uintptr_t>::max()));
Branch(counter_reached_max, &counter_saturated, &counter_ok);
- Bind(&counter_ok);
+ BIND(&counter_ok);
{
Node* new_counter = IntPtrAdd(old_counter, IntPtrConstant(1));
StoreNoWriteBarrier(MachineType::PointerRepresentation(), counters_table,
@@ -1372,7 +1372,7 @@ void InterpreterAssembler::TraceBytecodeDispatch(Node* target_bytecode) {
Goto(&counter_saturated);
}
- Bind(&counter_saturated);
+ BIND(&counter_saturated);
}
// static
@@ -1412,7 +1412,7 @@ Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
// BytecodeGraphBuilder::VisitResumeGenerator.
Label loop(this, &var_index), done_loop(this);
Goto(&loop);
- Bind(&loop);
+ BIND(&loop);
{
Node* index = var_index.value();
GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
@@ -1425,7 +1425,7 @@ Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
Goto(&loop);
}
- Bind(&done_loop);
+ BIND(&done_loop);
return array;
}
@@ -1445,7 +1445,7 @@ Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
// array contents to not keep them alive artificially.
Label loop(this, &var_index), done_loop(this);
Goto(&loop);
- Bind(&loop);
+ BIND(&loop);
{
Node* index = var_index.value();
GotoIfNot(UintPtrLessThan(index, register_count), &done_loop);
@@ -1460,7 +1460,7 @@ Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
var_index.Bind(IntPtrAdd(index, IntPtrConstant(1)));
Goto(&loop);
}
- Bind(&done_loop);
+ BIND(&done_loop);
return array;
}
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index 2a8f3c8810..b02e024d65 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -179,7 +179,7 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
feedback_vector, feedback_slot, &exit_point, &try_handler, &miss,
CodeStubAssembler::INTPTR_PARAMETERS);
- Bind(&done);
+ BIND(&done);
SetAccumulator(var_result.value());
Dispatch();
}
@@ -190,7 +190,7 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
Variable var_result(this, MachineRepresentation::kTagged);
ExitPoint exit_point(this, &done, &var_result);
- Bind(&try_handler);
+ BIND(&try_handler);
{
Node* context = GetContext();
Node* smi_slot = SmiTag(feedback_slot);
@@ -203,7 +203,7 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
&exit_point, &miss);
}
- Bind(&miss);
+ BIND(&miss);
{
Node* context = GetContext();
Node* smi_slot = SmiTag(feedback_slot);
@@ -215,7 +215,7 @@ class InterpreterLoadGlobalAssembler : public InterpreterAssembler {
accessor_asm.LoadGlobalIC_MissCase(&params, &exit_point);
}
- Bind(&done);
+ BIND(&done);
{
SetAccumulator(var_result.value());
Dispatch();
@@ -414,7 +414,7 @@ class InterpreterLookupContextSlotAssembler : public InterpreterAssembler {
}
// Slow path when we have to call out to the runtime.
- Bind(&slowpath);
+ BIND(&slowpath);
{
Node* name = LoadConstantPoolEntry(name_index);
Node* result = CallRuntime(function_id, context, name);
@@ -470,7 +470,7 @@ class InterpreterLookupGlobalAssembler : public InterpreterLoadGlobalAssembler {
}
// Slow path when we have to call out to the runtime
- Bind(&slowpath);
+ BIND(&slowpath);
{
Node* name_index = BytecodeOperandIdx(0);
Node* name = LoadConstantPoolEntry(name_index);
@@ -557,7 +557,7 @@ IGNITION_HANDLER(LdaNamedProperty, InterpreterAssembler) {
AccessorAssembler accessor_asm(state());
accessor_asm.LoadIC_BytecodeHandler(&params, &exit_point);
- Bind(&done);
+ BIND(&done);
{
SetAccumulator(var_result.value());
Dispatch();
@@ -735,7 +735,7 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
Branch(IntPtrGreaterThan(cell_index, IntPtrConstant(0)), &if_export,
&if_import);
- Bind(&if_export);
+ BIND(&if_export);
{
Node* regular_exports =
LoadObjectField(module, Module::kRegularExportsOffset);
@@ -746,7 +746,7 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
Goto(&end);
}
- Bind(&if_import);
+ BIND(&if_import);
{
Node* regular_imports =
LoadObjectField(module, Module::kRegularImportsOffset);
@@ -757,7 +757,7 @@ IGNITION_HANDLER(LdaModuleVariable, InterpreterAssembler) {
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
Dispatch();
}
@@ -777,7 +777,7 @@ IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) {
Branch(IntPtrGreaterThan(cell_index, IntPtrConstant(0)), &if_export,
&if_import);
- Bind(&if_export);
+ BIND(&if_export);
{
Node* regular_exports =
LoadObjectField(module, Module::kRegularExportsOffset);
@@ -788,14 +788,14 @@ IGNITION_HANDLER(StaModuleVariable, InterpreterAssembler) {
Goto(&end);
}
- Bind(&if_import);
+ BIND(&if_import);
{
// Not supported (probably never).
Abort(kUnsupportedModuleOperation);
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
Dispatch();
}
@@ -899,7 +899,7 @@ IGNITION_HANDLER(AddSmi, InterpreterAssembler) {
// {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path.
Branch(TaggedIsSmi(left), &fastpath, &slowpath);
- Bind(&fastpath);
+ BIND(&fastpath);
{
// Try fast Smi addition first.
Node* pair = IntPtrAddWithOverflow(BitcastTaggedToWord(left),
@@ -909,7 +909,7 @@ IGNITION_HANDLER(AddSmi, InterpreterAssembler) {
// Check if the Smi additon overflowed.
Label if_notoverflow(this);
Branch(overflow, &slowpath, &if_notoverflow);
- Bind(&if_notoverflow);
+ BIND(&if_notoverflow);
{
UpdateFeedback(SmiConstant(BinaryOperationFeedback::kSignedSmall),
feedback_vector, slot_index);
@@ -917,7 +917,7 @@ IGNITION_HANDLER(AddSmi, InterpreterAssembler) {
Goto(&end);
}
}
- Bind(&slowpath);
+ BIND(&slowpath);
{
Node* context = GetContext();
// TODO(ishell): pass slot as word-size value.
@@ -926,7 +926,7 @@ IGNITION_HANDLER(AddSmi, InterpreterAssembler) {
feedback_vector));
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
{
SetAccumulator(var_result.value());
Dispatch();
@@ -948,7 +948,7 @@ IGNITION_HANDLER(SubSmi, InterpreterAssembler) {
// {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path.
Branch(TaggedIsSmi(left), &fastpath, &slowpath);
- Bind(&fastpath);
+ BIND(&fastpath);
{
// Try fast Smi subtraction first.
Node* pair = IntPtrSubWithOverflow(BitcastTaggedToWord(left),
@@ -958,7 +958,7 @@ IGNITION_HANDLER(SubSmi, InterpreterAssembler) {
// Check if the Smi subtraction overflowed.
Label if_notoverflow(this);
Branch(overflow, &slowpath, &if_notoverflow);
- Bind(&if_notoverflow);
+ BIND(&if_notoverflow);
{
UpdateFeedback(SmiConstant(BinaryOperationFeedback::kSignedSmall),
feedback_vector, slot_index);
@@ -966,7 +966,7 @@ IGNITION_HANDLER(SubSmi, InterpreterAssembler) {
Goto(&end);
}
}
- Bind(&slowpath);
+ BIND(&slowpath);
{
Node* context = GetContext();
// TODO(ishell): pass slot as word-size value.
@@ -975,7 +975,7 @@ IGNITION_HANDLER(SubSmi, InterpreterAssembler) {
feedback_vector));
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
{
SetAccumulator(var_result.value());
Dispatch();
@@ -997,7 +997,7 @@ IGNITION_HANDLER(MulSmi, InterpreterAssembler) {
// {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path.
Branch(TaggedIsSmi(left), &fastpath, &slowpath);
- Bind(&fastpath);
+ BIND(&fastpath);
{
// Both {lhs} and {rhs} are Smis. The result is not necessarily a smi,
// in case of overflow.
@@ -1008,7 +1008,7 @@ IGNITION_HANDLER(MulSmi, InterpreterAssembler) {
UpdateFeedback(feedback, feedback_vector, slot_index);
Goto(&end);
}
- Bind(&slowpath);
+ BIND(&slowpath);
{
Node* context = GetContext();
// TODO(ishell): pass slot as word-size value.
@@ -1018,7 +1018,7 @@ IGNITION_HANDLER(MulSmi, InterpreterAssembler) {
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
{
SetAccumulator(var_result.value());
Dispatch();
@@ -1040,14 +1040,14 @@ IGNITION_HANDLER(DivSmi, InterpreterAssembler) {
// {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path.
Branch(TaggedIsSmi(left), &fastpath, &slowpath);
- Bind(&fastpath);
+ BIND(&fastpath);
{
var_result.Bind(TrySmiDiv(left, right, &slowpath));
UpdateFeedback(SmiConstant(BinaryOperationFeedback::kSignedSmall),
feedback_vector, slot_index);
Goto(&end);
}
- Bind(&slowpath);
+ BIND(&slowpath);
{
Node* context = GetContext();
// TODO(ishell): pass slot as word-size value.
@@ -1057,7 +1057,7 @@ IGNITION_HANDLER(DivSmi, InterpreterAssembler) {
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
{
SetAccumulator(var_result.value());
Dispatch();
@@ -1079,7 +1079,7 @@ IGNITION_HANDLER(ModSmi, InterpreterAssembler) {
// {right} is known to be a Smi.
// Check if the {left} is a Smi take the fast path.
Branch(TaggedIsSmi(left), &fastpath, &slowpath);
- Bind(&fastpath);
+ BIND(&fastpath);
{
// Both {lhs} and {rhs} are Smis. The result is not necessarily a smi.
var_result.Bind(SmiMod(left, right));
@@ -1089,7 +1089,7 @@ IGNITION_HANDLER(ModSmi, InterpreterAssembler) {
UpdateFeedback(feedback, feedback_vector, slot_index);
Goto(&end);
}
- Bind(&slowpath);
+ BIND(&slowpath);
{
Node* context = GetContext();
// TODO(ishell): pass slot as word-size value.
@@ -1099,7 +1099,7 @@ IGNITION_HANDLER(ModSmi, InterpreterAssembler) {
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
{
SetAccumulator(var_result.value());
Dispatch();
@@ -1172,7 +1172,7 @@ class InterpreterBitwiseBinaryOpAssembler : public InterpreterAssembler {
AbortIfWordNotEqual(result_map, HeapNumberMapConstant(),
kExpectedHeapNumber);
Goto(&ok);
- Bind(&ok);
+ BIND(&ok);
}
Node* input_feedback =
@@ -1422,21 +1422,21 @@ IGNITION_HANDLER(ToNumber, InterpreterAssembler) {
Node* object_map = LoadMap(object);
Branch(IsHeapNumberMap(object_map), &if_objectisnumber, &if_objectisother);
- Bind(&if_objectissmi);
+ BIND(&if_objectissmi);
{
var_result.Bind(object);
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kSignedSmall));
Goto(&if_done);
}
- Bind(&if_objectisnumber);
+ BIND(&if_objectisnumber);
{
var_result.Bind(object);
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNumber));
Goto(&if_done);
}
- Bind(&if_objectisother);
+ BIND(&if_objectisother);
{
// Convert the {object} to a Number.
Callable callable = CodeFactory::NonNumberToNumber(isolate());
@@ -1445,7 +1445,7 @@ IGNITION_HANDLER(ToNumber, InterpreterAssembler) {
Goto(&if_done);
}
- Bind(&if_done);
+ BIND(&if_done);
StoreRegister(var_result.value(), BytecodeOperandReg(0));
// Record the type feedback collected for {object}.
@@ -1495,14 +1495,14 @@ IGNITION_HANDLER(Inc, InterpreterAssembler) {
value_var.Bind(value);
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNone));
Goto(&start);
- Bind(&start);
+ BIND(&start);
{
value = value_var.value();
Label if_issmi(this), if_isnotsmi(this);
Branch(TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
- Bind(&if_issmi);
+ BIND(&if_issmi);
{
// Try fast Smi addition first.
Node* one = SmiConstant(Smi::FromInt(1));
@@ -1514,35 +1514,35 @@ IGNITION_HANDLER(Inc, InterpreterAssembler) {
Label if_overflow(this), if_notoverflow(this);
Branch(overflow, &if_overflow, &if_notoverflow);
- Bind(&if_notoverflow);
+ BIND(&if_notoverflow);
var_type_feedback.Bind(
SmiOr(var_type_feedback.value(),
SmiConstant(BinaryOperationFeedback::kSignedSmall)));
result_var.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
Goto(&end);
- Bind(&if_overflow);
+ BIND(&if_overflow);
{
var_finc_value.Bind(SmiToFloat64(value));
Goto(&do_finc);
}
}
- Bind(&if_isnotsmi);
+ BIND(&if_isnotsmi);
{
// Check if the value is a HeapNumber.
Label if_valueisnumber(this), if_valuenotnumber(this, Label::kDeferred);
Node* value_map = LoadMap(value);
Branch(IsHeapNumberMap(value_map), &if_valueisnumber, &if_valuenotnumber);
- Bind(&if_valueisnumber);
+ BIND(&if_valueisnumber);
{
// Load the HeapNumber value.
var_finc_value.Bind(LoadHeapNumberValue(value));
Goto(&do_finc);
}
- Bind(&if_valuenotnumber);
+ BIND(&if_valuenotnumber);
{
// We do not require an Or with earlier feedback here because once we
// convert the value to a number, we cannot reach this path. We can
@@ -1556,7 +1556,7 @@ IGNITION_HANDLER(Inc, InterpreterAssembler) {
Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE));
Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
- Bind(&if_valueisoddball);
+ BIND(&if_valueisoddball);
{
// Convert Oddball to Number and check again.
value_var.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
@@ -1565,7 +1565,7 @@ IGNITION_HANDLER(Inc, InterpreterAssembler) {
Goto(&start);
}
- Bind(&if_valuenotoddball);
+ BIND(&if_valuenotoddball);
{
// Convert to a Number first and try again.
Callable callable = CodeFactory::NonNumberToNumber(isolate());
@@ -1577,7 +1577,7 @@ IGNITION_HANDLER(Inc, InterpreterAssembler) {
}
}
- Bind(&do_finc);
+ BIND(&do_finc);
{
Node* finc_value = var_finc_value.value();
Node* one = Float64Constant(1.0);
@@ -1589,7 +1589,7 @@ IGNITION_HANDLER(Inc, InterpreterAssembler) {
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
SetAccumulator(result_var.value());
@@ -1622,14 +1622,14 @@ IGNITION_HANDLER(Dec, InterpreterAssembler) {
var_type_feedback.Bind(SmiConstant(BinaryOperationFeedback::kNone));
value_var.Bind(value);
Goto(&start);
- Bind(&start);
+ BIND(&start);
{
value = value_var.value();
Label if_issmi(this), if_isnotsmi(this);
Branch(TaggedIsSmi(value), &if_issmi, &if_isnotsmi);
- Bind(&if_issmi);
+ BIND(&if_issmi);
{
// Try fast Smi subtraction first.
Node* one = SmiConstant(Smi::FromInt(1));
@@ -1641,35 +1641,35 @@ IGNITION_HANDLER(Dec, InterpreterAssembler) {
Label if_overflow(this), if_notoverflow(this);
Branch(overflow, &if_overflow, &if_notoverflow);
- Bind(&if_notoverflow);
+ BIND(&if_notoverflow);
var_type_feedback.Bind(
SmiOr(var_type_feedback.value(),
SmiConstant(BinaryOperationFeedback::kSignedSmall)));
result_var.Bind(BitcastWordToTaggedSigned(Projection(0, pair)));
Goto(&end);
- Bind(&if_overflow);
+ BIND(&if_overflow);
{
var_fdec_value.Bind(SmiToFloat64(value));
Goto(&do_fdec);
}
}
- Bind(&if_isnotsmi);
+ BIND(&if_isnotsmi);
{
// Check if the value is a HeapNumber.
Label if_valueisnumber(this), if_valuenotnumber(this, Label::kDeferred);
Node* value_map = LoadMap(value);
Branch(IsHeapNumberMap(value_map), &if_valueisnumber, &if_valuenotnumber);
- Bind(&if_valueisnumber);
+ BIND(&if_valueisnumber);
{
// Load the HeapNumber value.
var_fdec_value.Bind(LoadHeapNumberValue(value));
Goto(&do_fdec);
}
- Bind(&if_valuenotnumber);
+ BIND(&if_valuenotnumber);
{
// We do not require an Or with earlier feedback here because once we
// convert the value to a number, we cannot reach this path. We can
@@ -1683,7 +1683,7 @@ IGNITION_HANDLER(Dec, InterpreterAssembler) {
Word32Equal(instance_type, Int32Constant(ODDBALL_TYPE));
Branch(is_oddball, &if_valueisoddball, &if_valuenotoddball);
- Bind(&if_valueisoddball);
+ BIND(&if_valueisoddball);
{
// Convert Oddball to Number and check again.
value_var.Bind(LoadObjectField(value, Oddball::kToNumberOffset));
@@ -1692,7 +1692,7 @@ IGNITION_HANDLER(Dec, InterpreterAssembler) {
Goto(&start);
}
- Bind(&if_valuenotoddball);
+ BIND(&if_valuenotoddball);
{
// Convert to a Number first and try again.
Callable callable = CodeFactory::NonNumberToNumber(isolate());
@@ -1704,7 +1704,7 @@ IGNITION_HANDLER(Dec, InterpreterAssembler) {
}
}
- Bind(&do_fdec);
+ BIND(&do_fdec);
{
Node* fdec_value = var_fdec_value.value();
Node* one = Float64Constant(1.0);
@@ -1716,7 +1716,7 @@ IGNITION_HANDLER(Dec, InterpreterAssembler) {
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
SetAccumulator(result_var.value());
@@ -1735,17 +1735,17 @@ IGNITION_HANDLER(ToBooleanLogicalNot, InterpreterAssembler) {
Node* true_value = BooleanConstant(true);
Node* false_value = BooleanConstant(false);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
- Bind(&if_true);
+ BIND(&if_true);
{
result.Bind(false_value);
Goto(&end);
}
- Bind(&if_false);
+ BIND(&if_false);
{
result.Bind(true_value);
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
SetAccumulator(result.value());
Dispatch();
}
@@ -1761,12 +1761,12 @@ IGNITION_HANDLER(LogicalNot, InterpreterAssembler) {
Node* true_value = BooleanConstant(true);
Node* false_value = BooleanConstant(false);
Branch(WordEqual(value, true_value), &if_true, &if_false);
- Bind(&if_true);
+ BIND(&if_true);
{
result.Bind(false_value);
Goto(&end);
}
- Bind(&if_false);
+ BIND(&if_false);
{
if (FLAG_debug_code) {
AbortIfWordNotEqual(value, false_value,
@@ -1775,7 +1775,7 @@ IGNITION_HANDLER(LogicalNot, InterpreterAssembler) {
result.Bind(true_value);
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
SetAccumulator(result.value());
Dispatch();
}
@@ -1800,8 +1800,8 @@ IGNITION_HANDLER(DeletePropertyStrict, InterpreterAssembler) {
Node* object = LoadRegister(reg_index);
Node* key = GetAccumulator();
Node* context = GetContext();
- Node* result =
- CallRuntime(Runtime::kDeleteProperty_Strict, context, object, key);
+ Node* result = CallBuiltin(Builtins::kDeleteProperty, context, object, key,
+ SmiConstant(STRICT));
SetAccumulator(result);
Dispatch();
}
@@ -1815,8 +1815,8 @@ IGNITION_HANDLER(DeletePropertySloppy, InterpreterAssembler) {
Node* object = LoadRegister(reg_index);
Node* key = GetAccumulator();
Node* context = GetContext();
- Node* result =
- CallRuntime(Runtime::kDeleteProperty_Sloppy, context, object, key);
+ Node* result = CallBuiltin(Builtins::kDeleteProperty, context, object, key,
+ SmiConstant(SLOPPY));
SetAccumulator(result);
Dispatch();
}
@@ -2124,313 +2124,41 @@ class InterpreterCompareOpAssembler : public InterpreterAssembler {
Node* lhs = LoadRegister(reg_index);
Node* rhs = GetAccumulator();
Node* context = GetContext();
- Node* slot_index = BytecodeOperandIdx(1);
- Node* feedback_vector = LoadFeedbackVector();
-
- Variable var_result(this, MachineRepresentation::kTagged),
- var_fcmp_lhs(this, MachineRepresentation::kFloat64),
- var_fcmp_rhs(this, MachineRepresentation::kFloat64),
- non_number_value(this, MachineRepresentation::kTagged),
- maybe_smi_value(this, MachineRepresentation::kTagged);
- Label lhs_is_not_smi(this), do_fcmp(this), slow_path(this),
- fast_path_dispatch(this);
-
- GotoIf(TaggedIsNotSmi(lhs), &lhs_is_not_smi);
- {
- Label rhs_is_not_smi(this);
- GotoIf(TaggedIsNotSmi(rhs), &rhs_is_not_smi);
- {
- Comment("Do integer comparison");
- UpdateFeedback(SmiConstant(CompareOperationFeedback::kSignedSmall),
- feedback_vector, slot_index);
- Node* result;
- switch (compare_op) {
- case Token::LT:
- result = SelectBooleanConstant(SmiLessThan(lhs, rhs));
- break;
- case Token::LTE:
- result = SelectBooleanConstant(SmiLessThanOrEqual(lhs, rhs));
- break;
- case Token::GT:
- result = SelectBooleanConstant(SmiLessThan(rhs, lhs));
- break;
- case Token::GTE:
- result = SelectBooleanConstant(SmiLessThanOrEqual(rhs, lhs));
- break;
- case Token::EQ:
- case Token::EQ_STRICT:
- result = SelectBooleanConstant(WordEqual(lhs, rhs));
- break;
- default:
- UNREACHABLE();
- }
- var_result.Bind(result);
- Goto(&fast_path_dispatch);
- }
-
- Bind(&rhs_is_not_smi);
- {
- Node* rhs_map = LoadMap(rhs);
- Label rhs_is_not_number(this);
- GotoIfNot(IsHeapNumberMap(rhs_map), &rhs_is_not_number);
-
- Comment("Convert lhs to float and load HeapNumber value from rhs");
- var_fcmp_lhs.Bind(SmiToFloat64(lhs));
- var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fcmp);
-
- Bind(&rhs_is_not_number);
- {
- non_number_value.Bind(rhs);
- maybe_smi_value.Bind(lhs);
- Goto(&slow_path);
- }
- }
- }
-
- Bind(&lhs_is_not_smi);
- {
- Label rhs_is_not_smi(this), lhs_is_not_number(this),
- rhs_is_not_number(this);
-
- Node* lhs_map = LoadMap(lhs);
- GotoIfNot(IsHeapNumberMap(lhs_map), &lhs_is_not_number);
-
- GotoIfNot(TaggedIsSmi(rhs), &rhs_is_not_smi);
- Comment("Convert rhs to double and load HeapNumber value from lhs");
- var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fcmp_rhs.Bind(SmiToFloat64(rhs));
- Goto(&do_fcmp);
-
- Bind(&rhs_is_not_smi);
- {
- Node* rhs_map = LoadMap(rhs);
- GotoIfNot(IsHeapNumberMap(rhs_map), &rhs_is_not_number);
-
- Comment("Load HeapNumber values from lhs and rhs");
- var_fcmp_lhs.Bind(LoadHeapNumberValue(lhs));
- var_fcmp_rhs.Bind(LoadHeapNumberValue(rhs));
- Goto(&do_fcmp);
- }
-
- Bind(&lhs_is_not_number);
- {
- non_number_value.Bind(lhs);
- maybe_smi_value.Bind(rhs);
- Goto(&slow_path);
- }
-
- Bind(&rhs_is_not_number);
- {
- non_number_value.Bind(rhs);
- maybe_smi_value.Bind(lhs);
- Goto(&slow_path);
- }
- }
-
- Bind(&do_fcmp);
- {
- Comment("Do floating point comparison");
- Node* lhs_float = var_fcmp_lhs.value();
- Node* rhs_float = var_fcmp_rhs.value();
- UpdateFeedback(SmiConstant(CompareOperationFeedback::kNumber),
- feedback_vector, slot_index);
-
- // Perform a fast floating point comparison.
- Node* result;
- switch (compare_op) {
- case Token::LT:
- result = SelectBooleanConstant(Float64LessThan(lhs_float, rhs_float));
- break;
- case Token::LTE:
- result = SelectBooleanConstant(
- Float64LessThanOrEqual(lhs_float, rhs_float));
- break;
- case Token::GT:
- result =
- SelectBooleanConstant(Float64GreaterThan(lhs_float, rhs_float));
- break;
- case Token::GTE:
- result = SelectBooleanConstant(
- Float64GreaterThanOrEqual(lhs_float, rhs_float));
- break;
- case Token::EQ:
- case Token::EQ_STRICT: {
- Label check_nan(this);
- var_result.Bind(BooleanConstant(false));
- Branch(Float64Equal(lhs_float, rhs_float), &check_nan,
- &fast_path_dispatch);
- Bind(&check_nan);
- result = SelectBooleanConstant(Float64Equal(lhs_float, lhs_float));
- } break;
- default:
- UNREACHABLE();
- }
- var_result.Bind(result);
- Goto(&fast_path_dispatch);
- }
-
- Bind(&fast_path_dispatch);
- {
- SetAccumulator(var_result.value());
- Dispatch();
+ Variable var_type_feedback(this, MachineRepresentation::kTagged);
+ Node* result;
+ switch (compare_op) {
+ case Token::EQ:
+ result = Equal(lhs, rhs, context, &var_type_feedback);
+ break;
+ case Token::EQ_STRICT:
+ result = StrictEqual(lhs, rhs, &var_type_feedback);
+ break;
+ case Token::LT:
+ result = RelationalComparison(CodeStubAssembler::kLessThan, lhs, rhs,
+ context, &var_type_feedback);
+ break;
+ case Token::GT:
+ result = RelationalComparison(CodeStubAssembler::kGreaterThan, lhs, rhs,
+ context, &var_type_feedback);
+ break;
+ case Token::LTE:
+ result = RelationalComparison(CodeStubAssembler::kLessThanOrEqual, lhs,
+ rhs, context, &var_type_feedback);
+ break;
+ case Token::GTE:
+ result = RelationalComparison(CodeStubAssembler::kGreaterThanOrEqual,
+ lhs, rhs, context, &var_type_feedback);
+ break;
+ default:
+ UNREACHABLE();
}
- // Marking a block with more than one predecessor causes register allocator
- // to fail (v8:5998). Add a dummy block as a workaround.
- Label slow_path_deferred(this, Label::kDeferred);
- Bind(&slow_path);
- Goto(&slow_path_deferred);
-
- Bind(&slow_path_deferred);
- {
- // When we reach here, one of the operands is not a Smi / HeapNumber and
- // the other operand could be of any type. The cases where both of them
- // are HeapNumbers / Smis are handled earlier.
- Comment("Collect feedback for non HeapNumber cases.");
- Label update_feedback_and_do_compare(this);
- Variable var_type_feedback(this, MachineRepresentation::kTaggedSigned);
- var_type_feedback.Bind(SmiConstant(CompareOperationFeedback::kAny));
-
- if (Token::IsOrderedRelationalCompareOp(compare_op)) {
- Label check_for_oddball(this);
- // Check for NumberOrOddball feedback.
- Node* non_number_instance_type =
- LoadInstanceType(non_number_value.value());
- GotoIf(
- Word32Equal(non_number_instance_type, Int32Constant(ODDBALL_TYPE)),
- &check_for_oddball);
-
- // Check for string feedback.
- GotoIfNot(IsStringInstanceType(non_number_instance_type),
- &update_feedback_and_do_compare);
-
- GotoIf(TaggedIsSmi(maybe_smi_value.value()),
- &update_feedback_and_do_compare);
-
- Node* maybe_smi_instance_type =
- LoadInstanceType(maybe_smi_value.value());
- GotoIfNot(IsStringInstanceType(maybe_smi_instance_type),
- &update_feedback_and_do_compare);
-
- var_type_feedback.Bind(SmiConstant(CompareOperationFeedback::kString));
- Goto(&update_feedback_and_do_compare);
-
- Bind(&check_for_oddball);
- {
- Label compare_with_oddball_feedback(this);
- GotoIf(TaggedIsSmi(maybe_smi_value.value()),
- &compare_with_oddball_feedback);
-
- Node* maybe_smi_instance_type =
- LoadInstanceType(maybe_smi_value.value());
- GotoIf(Word32Equal(maybe_smi_instance_type,
- Int32Constant(HEAP_NUMBER_TYPE)),
- &compare_with_oddball_feedback);
-
- Branch(
- Word32Equal(maybe_smi_instance_type, Int32Constant(ODDBALL_TYPE)),
- &compare_with_oddball_feedback, &update_feedback_and_do_compare);
-
- Bind(&compare_with_oddball_feedback);
- {
- var_type_feedback.Bind(
- SmiConstant(CompareOperationFeedback::kNumberOrOddball));
- Goto(&update_feedback_and_do_compare);
- }
- }
- } else {
- Label not_string(this), both_are_strings(this);
-
- DCHECK(Token::IsEqualityOp(compare_op));
-
- // If one of them is a Smi and the other is not a number, record "Any"
- // feedback. Equality comparisons do not need feedback about oddballs.
- GotoIf(TaggedIsSmi(maybe_smi_value.value()),
- &update_feedback_and_do_compare);
-
- Node* maybe_smi_instance_type =
- LoadInstanceType(maybe_smi_value.value());
- Node* non_number_instance_type =
- LoadInstanceType(non_number_value.value());
- GotoIfNot(IsStringInstanceType(maybe_smi_instance_type), &not_string);
-
- // If one value is string and other isn't record "Any" feedback.
- Branch(IsStringInstanceType(non_number_instance_type),
- &both_are_strings, &update_feedback_and_do_compare);
-
- Bind(&both_are_strings);
- {
- Node* operand1_feedback = SelectSmiConstant(
- Word32Equal(Word32And(maybe_smi_instance_type,
- Int32Constant(kIsNotInternalizedMask)),
- Int32Constant(kInternalizedTag)),
- CompareOperationFeedback::kInternalizedString,
- CompareOperationFeedback::kString);
-
- Node* operand2_feedback = SelectSmiConstant(
- Word32Equal(Word32And(non_number_instance_type,
- Int32Constant(kIsNotInternalizedMask)),
- Int32Constant(kInternalizedTag)),
- CompareOperationFeedback::kInternalizedString,
- CompareOperationFeedback::kString);
-
- var_type_feedback.Bind(SmiOr(operand1_feedback, operand2_feedback));
- Goto(&update_feedback_and_do_compare);
- }
-
- Bind(&not_string);
- {
- // Check if both operands are of type JSReceiver.
- GotoIfNot(IsJSReceiverInstanceType(maybe_smi_instance_type),
- &update_feedback_and_do_compare);
-
- GotoIfNot(IsJSReceiverInstanceType(non_number_instance_type),
- &update_feedback_and_do_compare);
-
- var_type_feedback.Bind(
- SmiConstant(CompareOperationFeedback::kReceiver));
- Goto(&update_feedback_and_do_compare);
- }
- }
-
- Bind(&update_feedback_and_do_compare);
- {
- Comment("Do the full compare operation");
- UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
- Node* result;
- switch (compare_op) {
- case Token::EQ:
- result = Equal(lhs, rhs, context);
- break;
- case Token::EQ_STRICT:
- result = StrictEqual(lhs, rhs);
- break;
- case Token::LT:
- result = RelationalComparison(CodeStubAssembler::kLessThan, lhs,
- rhs, context);
- break;
- case Token::GT:
- result = RelationalComparison(CodeStubAssembler::kGreaterThan, lhs,
- rhs, context);
- break;
- case Token::LTE:
- result = RelationalComparison(CodeStubAssembler::kLessThanOrEqual,
- lhs, rhs, context);
- break;
- case Token::GTE:
- result = RelationalComparison(
- CodeStubAssembler::kGreaterThanOrEqual, lhs, rhs, context);
- break;
- default:
- UNREACHABLE();
- }
- var_result.Bind(result);
- SetAccumulator(var_result.value());
- Dispatch();
- }
- }
+ Node* slot_index = BytecodeOperandIdx(1);
+ Node* feedback_vector = LoadFeedbackVector();
+ UpdateFeedback(var_type_feedback.value(), feedback_vector, slot_index);
+ SetAccumulator(result);
+ Dispatch();
}
};
@@ -2543,7 +2271,7 @@ IGNITION_HANDLER(TestUndetectable, InterpreterAssembler) {
SetAccumulator(result);
Goto(&end);
- Bind(&end);
+ BIND(&end);
Dispatch();
}
@@ -2594,37 +2322,37 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
Switch(literal_flag, &abort, cases, labels, arraysize(cases));
- Bind(&abort);
+ BIND(&abort);
{
Comment("Abort");
Abort(BailoutReason::kUnexpectedTestTypeofLiteralFlag);
Goto(&if_false);
}
- Bind(&if_number);
+ BIND(&if_number);
{
Comment("IfNumber");
GotoIfNumber(object, &if_true);
Goto(&if_false);
}
- Bind(&if_string);
+ BIND(&if_string);
{
Comment("IfString");
GotoIf(TaggedIsSmi(object), &if_false);
Branch(IsString(object), &if_true, &if_false);
}
- Bind(&if_symbol);
+ BIND(&if_symbol);
{
Comment("IfSymbol");
GotoIf(TaggedIsSmi(object), &if_false);
Branch(IsSymbol(object), &if_true, &if_false);
}
- Bind(&if_boolean);
+ BIND(&if_boolean);
{
Comment("IfBoolean");
GotoIf(WordEqual(object, BooleanConstant(true)), &if_true);
Branch(WordEqual(object, BooleanConstant(false)), &if_true, &if_false);
}
- Bind(&if_undefined);
+ BIND(&if_undefined);
{
Comment("IfUndefined");
GotoIf(TaggedIsSmi(object), &if_false);
@@ -2636,7 +2364,7 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
Branch(Word32Equal(undetectable_bit, Int32Constant(0)), &if_false,
&if_true);
}
- Bind(&if_function);
+ BIND(&if_function);
{
Comment("IfFunction");
GotoIf(TaggedIsSmi(object), &if_false);
@@ -2649,7 +2377,7 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
Int32Constant(1 << Map::kIsCallable)),
&if_true, &if_false);
}
- Bind(&if_object);
+ BIND(&if_object);
{
Comment("IfObject");
GotoIf(TaggedIsSmi(object), &if_false);
@@ -2667,29 +2395,29 @@ IGNITION_HANDLER(TestTypeOf, InterpreterAssembler) {
Branch(Word32Equal(callable_undetectable, Int32Constant(0)), &if_true,
&if_false);
}
- Bind(&if_other);
+ BIND(&if_other);
{
// Typeof doesn't return any other string value.
Goto(&if_false);
}
- Bind(&if_false);
+ BIND(&if_false);
{
SetAccumulator(BooleanConstant(false));
Goto(&end);
}
- Bind(&if_true);
+ BIND(&if_true);
{
SetAccumulator(BooleanConstant(true));
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
Dispatch();
}
// Jump <imm>
//
-// Jump by number of bytes represented by the immediate operand |imm|.
+// Jump by the number of bytes represented by the immediate operand |imm|.
IGNITION_HANDLER(Jump, InterpreterAssembler) {
Node* relative_jump = BytecodeOperandUImmWord(0);
Jump(relative_jump);
@@ -2697,7 +2425,8 @@ IGNITION_HANDLER(Jump, InterpreterAssembler) {
// JumpConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool.
IGNITION_HANDLER(JumpConstant, InterpreterAssembler) {
Node* index = BytecodeOperandIdx(0);
Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
@@ -2706,7 +2435,7 @@ IGNITION_HANDLER(JumpConstant, InterpreterAssembler) {
// JumpIfTrue <imm>
//
-// Jump by number of bytes represented by an immediate operand if the
+// Jump by the number of bytes represented by an immediate operand if the
// accumulator contains true. This only works for boolean inputs, and
// will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) {
@@ -2720,9 +2449,9 @@ IGNITION_HANDLER(JumpIfTrue, InterpreterAssembler) {
// JumpIfTrueConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the accumulator contains true. This only works for boolean inputs, and
-// will misbehave if passed arbitrary input values.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the accumulator contains true. This only works for boolean inputs,
+// and will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* index = BytecodeOperandIdx(0);
@@ -2735,7 +2464,7 @@ IGNITION_HANDLER(JumpIfTrueConstant, InterpreterAssembler) {
// JumpIfFalse <imm>
//
-// Jump by number of bytes represented by an immediate operand if the
+// Jump by the number of bytes represented by an immediate operand if the
// accumulator contains false. This only works for boolean inputs, and
// will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) {
@@ -2749,9 +2478,9 @@ IGNITION_HANDLER(JumpIfFalse, InterpreterAssembler) {
// JumpIfFalseConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the accumulator contains false. This only works for boolean inputs, and
-// will misbehave if passed arbitrary input values.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the accumulator contains false. This only works for boolean inputs,
+// and will misbehave if passed arbitrary input values.
IGNITION_HANDLER(JumpIfFalseConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* index = BytecodeOperandIdx(0);
@@ -2764,71 +2493,71 @@ IGNITION_HANDLER(JumpIfFalseConstant, InterpreterAssembler) {
// JumpIfToBooleanTrue <imm>
//
-// Jump by number of bytes represented by an immediate operand if the object
+// Jump by the number of bytes represented by an immediate operand if the object
// referenced by the accumulator is true when the object is cast to boolean.
IGNITION_HANDLER(JumpIfToBooleanTrue, InterpreterAssembler) {
Node* value = GetAccumulator();
Node* relative_jump = BytecodeOperandUImmWord(0);
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
- Bind(&if_true);
+ BIND(&if_true);
Jump(relative_jump);
- Bind(&if_false);
+ BIND(&if_false);
Dispatch();
}
// JumpIfToBooleanTrueConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the object referenced by the accumulator is true when the object is cast
-// to boolean.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the object referenced by the accumulator is true when the object is
+// cast to boolean.
IGNITION_HANDLER(JumpIfToBooleanTrueConstant, InterpreterAssembler) {
Node* value = GetAccumulator();
Node* index = BytecodeOperandIdx(0);
Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
- Bind(&if_true);
+ BIND(&if_true);
Jump(relative_jump);
- Bind(&if_false);
+ BIND(&if_false);
Dispatch();
}
// JumpIfToBooleanFalse <imm>
//
-// Jump by number of bytes represented by an immediate operand if the object
+// Jump by the number of bytes represented by an immediate operand if the object
// referenced by the accumulator is false when the object is cast to boolean.
IGNITION_HANDLER(JumpIfToBooleanFalse, InterpreterAssembler) {
Node* value = GetAccumulator();
Node* relative_jump = BytecodeOperandUImmWord(0);
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
- Bind(&if_true);
+ BIND(&if_true);
Dispatch();
- Bind(&if_false);
+ BIND(&if_false);
Jump(relative_jump);
}
// JumpIfToBooleanFalseConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the object referenced by the accumulator is false when the object is cast
-// to boolean.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the object referenced by the accumulator is false when the object is
+// cast to boolean.
IGNITION_HANDLER(JumpIfToBooleanFalseConstant, InterpreterAssembler) {
Node* value = GetAccumulator();
Node* index = BytecodeOperandIdx(0);
Node* relative_jump = LoadAndUntagConstantPoolEntry(index);
Label if_true(this), if_false(this);
BranchIfToBooleanIsTrue(value, &if_true, &if_false);
- Bind(&if_true);
+ BIND(&if_true);
Dispatch();
- Bind(&if_false);
+ BIND(&if_false);
Jump(relative_jump);
}
// JumpIfNull <imm>
//
-// Jump by number of bytes represented by an immediate operand if the object
+// Jump by the number of bytes represented by an immediate operand if the object
// referenced by the accumulator is the null constant.
IGNITION_HANDLER(JumpIfNull, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
@@ -2839,8 +2568,8 @@ IGNITION_HANDLER(JumpIfNull, InterpreterAssembler) {
// JumpIfNullConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the object referenced by the accumulator is the null constant.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the object referenced by the accumulator is the null constant.
IGNITION_HANDLER(JumpIfNullConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* null_value = HeapConstant(isolate()->factory()->null_value());
@@ -2851,7 +2580,7 @@ IGNITION_HANDLER(JumpIfNullConstant, InterpreterAssembler) {
// JumpIfNotNull <imm>
//
-// Jump by number of bytes represented by an immediate operand if the object
+// Jump by the number of bytes represented by an immediate operand if the object
// referenced by the accumulator is not the null constant.
IGNITION_HANDLER(JumpIfNotNull, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
@@ -2862,8 +2591,8 @@ IGNITION_HANDLER(JumpIfNotNull, InterpreterAssembler) {
// JumpIfNotNullConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the object referenced by the accumulator is not the null constant.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the object referenced by the accumulator is not the null constant.
IGNITION_HANDLER(JumpIfNotNullConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* null_value = HeapConstant(isolate()->factory()->null_value());
@@ -2874,7 +2603,7 @@ IGNITION_HANDLER(JumpIfNotNullConstant, InterpreterAssembler) {
// JumpIfUndefined <imm>
//
-// Jump by number of bytes represented by an immediate operand if the object
+// Jump by the number of bytes represented by an immediate operand if the object
// referenced by the accumulator is the undefined constant.
IGNITION_HANDLER(JumpIfUndefined, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
@@ -2885,8 +2614,8 @@ IGNITION_HANDLER(JumpIfUndefined, InterpreterAssembler) {
// JumpIfUndefinedConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the object referenced by the accumulator is the undefined constant.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the object referenced by the accumulator is the undefined constant.
IGNITION_HANDLER(JumpIfUndefinedConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* undefined_value = HeapConstant(isolate()->factory()->undefined_value());
@@ -2897,7 +2626,7 @@ IGNITION_HANDLER(JumpIfUndefinedConstant, InterpreterAssembler) {
// JumpIfNotUndefined <imm>
//
-// Jump by number of bytes represented by an immediate operand if the object
+// Jump by the number of bytes represented by an immediate operand if the object
// referenced by the accumulator is not the undefined constant.
IGNITION_HANDLER(JumpIfNotUndefined, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
@@ -2908,8 +2637,9 @@ IGNITION_HANDLER(JumpIfNotUndefined, InterpreterAssembler) {
// JumpIfNotUndefinedConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the object referenced by the accumulator is not the undefined constant.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the object referenced by the accumulator is not the undefined
+// constant.
IGNITION_HANDLER(JumpIfNotUndefinedConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* undefined_value = HeapConstant(isolate()->factory()->undefined_value());
@@ -2920,7 +2650,7 @@ IGNITION_HANDLER(JumpIfNotUndefinedConstant, InterpreterAssembler) {
// JumpIfJSReceiver <imm>
//
-// Jump by number of bytes represented by an immediate operand if the object
+// Jump by the number of bytes represented by an immediate operand if the object
// referenced by the accumulator is a JSReceiver.
IGNITION_HANDLER(JumpIfJSReceiver, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
@@ -2929,19 +2659,19 @@ IGNITION_HANDLER(JumpIfJSReceiver, InterpreterAssembler) {
Label if_object(this), if_notobject(this, Label::kDeferred), if_notsmi(this);
Branch(TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
- Bind(&if_notsmi);
+ BIND(&if_notsmi);
Branch(IsJSReceiver(accumulator), &if_object, &if_notobject);
- Bind(&if_object);
+ BIND(&if_object);
Jump(relative_jump);
- Bind(&if_notobject);
+ BIND(&if_notobject);
Dispatch();
}
// JumpIfJSReceiverConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool if
-// the object referenced by the accumulator is a JSReceiver.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the object referenced by the accumulator is a JSReceiver.
IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* index = BytecodeOperandIdx(0);
@@ -2950,19 +2680,19 @@ IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
Label if_object(this), if_notobject(this), if_notsmi(this);
Branch(TaggedIsSmi(accumulator), &if_notobject, &if_notsmi);
- Bind(&if_notsmi);
+ BIND(&if_notsmi);
Branch(IsJSReceiver(accumulator), &if_object, &if_notobject);
- Bind(&if_object);
+ BIND(&if_object);
Jump(relative_jump);
- Bind(&if_notobject);
+ BIND(&if_notobject);
Dispatch();
}
// JumpIfNotHole <imm>
//
-// Jump by number of bytes represented by an immediate operand if the object
+// Jump by the number of bytes represented by an immediate operand if the object
// referenced by the accumulator is the hole.
IGNITION_HANDLER(JumpIfNotHole, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
@@ -2973,8 +2703,8 @@ IGNITION_HANDLER(JumpIfNotHole, InterpreterAssembler) {
// JumpIfNotHoleConstant <idx>
//
-// Jump by number of bytes in the Smi in the |idx| entry in the constant pool
-// if the object referenced by the accumulator is the hole constant.
+// Jump by the number of bytes in the Smi in the |idx| entry in the constant
+// pool if the object referenced by the accumulator is the hole constant.
IGNITION_HANDLER(JumpIfNotHoleConstant, InterpreterAssembler) {
Node* accumulator = GetAccumulator();
Node* the_hole_value = HeapConstant(isolate()->factory()->the_hole_value());
@@ -2985,7 +2715,7 @@ IGNITION_HANDLER(JumpIfNotHoleConstant, InterpreterAssembler) {
// JumpLoop <imm> <loop_depth>
//
-// Jump by number of bytes represented by the immediate operand |imm|. Also
+// Jump by the number of bytes represented by the immediate operand |imm|. Also
// performs a loop nesting check and potentially triggers OSR in case the
// current OSR level matches (or exceeds) the specified |loop_depth|.
IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
@@ -2999,10 +2729,10 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
Node* condition = Int32GreaterThanOrEqual(loop_depth, osr_level);
Branch(condition, &ok, &osr_armed);
- Bind(&ok);
+ BIND(&ok);
JumpBackward(relative_jump);
- Bind(&osr_armed);
+ BIND(&osr_armed);
{
Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate());
Node* target = HeapConstant(callable.code());
@@ -3012,6 +2742,37 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
}
}
+// SwitchOnSmiNoFeedback <table_start> <table_length> <case_value_base>
+//
+// Jump by the number of bytes defined by a Smi in a table in the constant pool,
+// where the table starts at |table_start| and has |table_length| entries.
+// The table is indexed by the accumulator, minus |case_value_base|. If the
+// case_value falls outside of the table |table_length|, fall-through to the
+// next bytecode.
+IGNITION_HANDLER(SwitchOnSmiNoFeedback, InterpreterAssembler) {
+ Node* acc = GetAccumulator();
+ Node* table_start = BytecodeOperandIdx(0);
+ Node* table_length = BytecodeOperandUImmWord(1);
+ Node* case_value_base = BytecodeOperandImmIntPtr(2);
+
+ Label fall_through(this);
+
+ // The accumulator must be a Smi.
+ // TODO(leszeks): Add a bytecode with type feedback that allows other
+ // accumulator values.
+ CSA_ASSERT(this, TaggedIsSmi(acc));
+
+ Node* case_value = IntPtrSub(SmiUntag(acc), case_value_base);
+ GotoIf(IntPtrLessThan(case_value, IntPtrConstant(0)), &fall_through);
+ GotoIf(IntPtrGreaterThanOrEqual(case_value, table_length), &fall_through);
+ Node* entry = IntPtrAdd(table_start, case_value);
+ Node* relative_jump = LoadAndUntagConstantPoolEntry(entry);
+ Jump(relative_jump);
+
+ BIND(&fall_through);
+ Dispatch();
+}
+
// CreateRegExpLiteral <pattern_idx> <literal_idx> <flags>
//
// Creates a regular expression literal for literal index <literal_idx> with
@@ -3045,7 +2806,7 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
IsSetWord32<CreateArrayLiteralFlags::FastShallowCloneBit>(bytecode_flags),
&fast_shallow_clone, &call_runtime);
- Bind(&fast_shallow_clone);
+ BIND(&fast_shallow_clone);
{
ConstructorBuiltinsAssembler constructor_assembler(state());
Node* result = constructor_assembler.EmitFastCloneShallowArray(
@@ -3054,7 +2815,7 @@ IGNITION_HANDLER(CreateArrayLiteral, InterpreterAssembler) {
Dispatch();
}
- Bind(&call_runtime);
+ BIND(&call_runtime);
{
Node* flags_raw = DecodeWordFromWord32<CreateArrayLiteralFlags::FlagsBits>(
bytecode_flags);
@@ -3079,23 +2840,21 @@ IGNITION_HANDLER(CreateObjectLiteral, InterpreterAssembler) {
// Check if we can do a fast clone or have to call the runtime.
Label if_fast_clone(this), if_not_fast_clone(this, Label::kDeferred);
- Node* fast_clone_properties_count = DecodeWordFromWord32<
- CreateObjectLiteralFlags::FastClonePropertiesCountBits>(bytecode_flags);
- Branch(WordNotEqual(fast_clone_properties_count, IntPtrConstant(0)),
+ Branch(IsSetWord32<CreateObjectLiteralFlags::FastCloneSupportedBit>(
+ bytecode_flags),
&if_fast_clone, &if_not_fast_clone);
- Bind(&if_fast_clone);
+ BIND(&if_fast_clone);
{
// If we can do a fast clone do the fast-path in FastCloneShallowObjectStub.
ConstructorBuiltinsAssembler constructor_assembler(state());
Node* result = constructor_assembler.EmitFastCloneShallowObject(
- &if_not_fast_clone, closure, literal_index,
- fast_clone_properties_count);
+ &if_not_fast_clone, closure, literal_index);
StoreRegister(result, BytecodeOperandReg(3));
Dispatch();
}
- Bind(&if_not_fast_clone);
+ BIND(&if_not_fast_clone);
{
// If we can't do a fast clone, call into the runtime.
Node* index = BytecodeOperandIdx(0);
@@ -3135,7 +2894,7 @@ IGNITION_HANDLER(CreateClosure, InterpreterAssembler) {
shared, feedback_vector, vector_index, context));
Dispatch();
- Bind(&call_runtime);
+ BIND(&call_runtime);
{
Node* tenured_raw =
DecodeWordFromWord32<CreateClosureFlags::PretenuredBit>(flags);
@@ -3249,7 +3008,7 @@ IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) {
Node* compare = Word32And(compiler_hints, duplicate_parameters_bit);
Branch(compare, &if_duplicate_parameters, &if_not_duplicate_parameters);
- Bind(&if_not_duplicate_parameters);
+ BIND(&if_not_duplicate_parameters);
{
ArgumentsBuiltinsAssembler constructor_assembler(state());
Node* result =
@@ -3258,7 +3017,7 @@ IGNITION_HANDLER(CreateMappedArguments, InterpreterAssembler) {
Dispatch();
}
- Bind(&if_duplicate_parameters);
+ BIND(&if_duplicate_parameters);
{
Node* result =
CallRuntime(Runtime::kNewSloppyArguments_Generic, context, closure);
@@ -3301,10 +3060,10 @@ IGNITION_HANDLER(StackCheck, InterpreterAssembler) {
Node* interrupt = StackCheckTriggeredInterrupt();
Branch(interrupt, &stack_check_interrupt, &ok);
- Bind(&ok);
+ BIND(&ok);
Dispatch();
- Bind(&stack_check_interrupt);
+ BIND(&stack_check_interrupt);
{
Node* context = GetContext();
CallRuntime(Runtime::kStackGuard, context);
@@ -3427,7 +3186,7 @@ IGNITION_HANDLER(ForInPrepare, InterpreterForInPrepareAssembler) {
cache_length);
Dispatch();
- Bind(&call_runtime);
+ BIND(&call_runtime);
{
Node* result_triple =
CallRuntime(Runtime::kForInPrepare, context, receiver);
@@ -3438,7 +3197,7 @@ IGNITION_HANDLER(ForInPrepare, InterpreterForInPrepareAssembler) {
cache_length);
Dispatch();
}
- Bind(&nothing_to_iterate);
+ BIND(&nothing_to_iterate);
{
// Receiver is null or undefined or descriptors are zero length.
Node* zero = SmiConstant(0);
@@ -3468,13 +3227,13 @@ IGNITION_HANDLER(ForInNext, InterpreterAssembler) {
Label if_fast(this), if_slow(this, Label::kDeferred);
Node* receiver_map = LoadMap(receiver);
Branch(WordEqual(receiver_map, cache_type), &if_fast, &if_slow);
- Bind(&if_fast);
+ BIND(&if_fast);
{
// Enum cache in use for {receiver}, the {key} is definitely valid.
SetAccumulator(key);
Dispatch();
}
- Bind(&if_slow);
+ BIND(&if_slow);
{
// Record the fact that we hit the for-in slow path.
Node* vector_index = BytecodeOperandIdx(3);
@@ -3505,17 +3264,17 @@ IGNITION_HANDLER(ForInContinue, InterpreterAssembler) {
// Check if {index} is at {cache_length} already.
Label if_true(this), if_false(this), end(this);
Branch(WordEqual(index, cache_length), &if_true, &if_false);
- Bind(&if_true);
+ BIND(&if_true);
{
SetAccumulator(BooleanConstant(false));
Goto(&end);
}
- Bind(&if_false);
+ BIND(&if_false);
{
SetAccumulator(BooleanConstant(true));
Goto(&end);
}
- Bind(&end);
+ BIND(&end);
Dispatch();
}
@@ -3574,7 +3333,7 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
STATIC_ASSERT(LastStepAction == StepIn);
Node* step_next = Int32Constant(StepNext);
Branch(Int32LessThanOrEqual(step_next, step_action), &if_stepping, &ok);
- Bind(&ok);
+ BIND(&ok);
Node* array =
LoadObjectField(generator, JSGeneratorObject::kRegisterFileOffset);
@@ -3598,7 +3357,7 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
static_cast<int>(SuspendFlags::kAsyncGeneratorAwait))),
&if_asyncgeneratorawait, &if_notasyncgeneratorawait);
- Bind(&if_notasyncgeneratorawait);
+ BIND(&if_notasyncgeneratorawait);
{
// For ordinary yields (and for AwaitExpressions in Async Functions, which
// are implemented as ordinary yields), it is safe to write over the
@@ -3608,7 +3367,7 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
Goto(&merge);
}
- Bind(&if_asyncgeneratorawait);
+ BIND(&if_asyncgeneratorawait);
{
// An AwaitExpression in an Async Generator requires writing to the
// [await_input_or_debug_pos] field.
@@ -3619,10 +3378,10 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
Goto(&merge);
}
- Bind(&merge);
+ BIND(&merge);
Dispatch();
- Bind(&if_stepping);
+ BIND(&if_stepping);
{
Node* context = GetContext();
CallRuntime(Runtime::kDebugRecordGenerator, context, generator);
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
index bdd079ab84..e8572ba1d4 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-intrinsics-generator.cc
@@ -89,7 +89,7 @@ Node* IntrinsicsGenerator::InvokeIntrinsic(Node* function_id, Node* context,
__ Switch(function_id, &abort, cases, labels, arraysize(cases));
#define HANDLE_CASE(name, lower_case, expected_arg_count) \
- __ Bind(&lower_case); \
+ __ BIND(&lower_case); \
if (FLAG_debug_code && expected_arg_count >= 0) { \
AbortIfArgCountMismatch(expected_arg_count, arg_count); \
} \
@@ -98,14 +98,14 @@ Node* IntrinsicsGenerator::InvokeIntrinsic(Node* function_id, Node* context,
INTRINSICS_LIST(HANDLE_CASE)
#undef HANDLE_CASE
- __ Bind(&abort);
+ __ BIND(&abort);
{
__ Abort(BailoutReason::kUnexpectedFunctionIDForInvokeIntrinsic);
result.Bind(__ UndefinedConstant());
__ Goto(&end);
}
- __ Bind(&end);
+ __ BIND(&end);
return result.value();
}
@@ -133,19 +133,19 @@ Node* IntrinsicsGenerator::IsInstanceType(Node* input, int type) {
Node* condition = CompareInstanceType(arg, type, kInstanceTypeEqual);
__ Branch(condition, &return_true, &return_false);
- __ Bind(&return_true);
+ __ BIND(&return_true);
{
return_value.Bind(__ BooleanConstant(true));
__ Goto(&end);
}
- __ Bind(&return_false);
+ __ BIND(&return_false);
{
return_value.Bind(__ BooleanConstant(false));
__ Goto(&end);
}
- __ Bind(&end);
+ __ BIND(&end);
return return_value.value();
}
@@ -166,19 +166,19 @@ Node* IntrinsicsGenerator::IsJSReceiver(Node* input, Node* arg_count,
kInstanceTypeGreaterThanOrEqual);
__ Branch(condition, &return_true, &return_false);
- __ Bind(&return_true);
+ __ BIND(&return_true);
{
return_value.Bind(__ BooleanConstant(true));
__ Goto(&end);
}
- __ Bind(&return_false);
+ __ BIND(&return_false);
{
return_value.Bind(__ BooleanConstant(false));
__ Goto(&end);
}
- __ Bind(&end);
+ __ BIND(&end);
return return_value.value();
}
@@ -197,6 +197,36 @@ Node* IntrinsicsGenerator::IsTypedArray(Node* input, Node* arg_count,
return IsInstanceType(input, JS_TYPED_ARRAY_TYPE);
}
+Node* IntrinsicsGenerator::IsJSMap(Node* input, Node* arg_count,
+ Node* context) {
+ return IsInstanceType(input, JS_MAP_TYPE);
+}
+
+Node* IntrinsicsGenerator::IsJSMapIterator(Node* input, Node* arg_count,
+ Node* context) {
+ return IsInstanceType(input, JS_MAP_ITERATOR_TYPE);
+}
+
+Node* IntrinsicsGenerator::IsJSSet(Node* input, Node* arg_count,
+ Node* context) {
+ return IsInstanceType(input, JS_SET_TYPE);
+}
+
+Node* IntrinsicsGenerator::IsJSSetIterator(Node* input, Node* arg_count,
+ Node* context) {
+ return IsInstanceType(input, JS_SET_ITERATOR_TYPE);
+}
+
+Node* IntrinsicsGenerator::IsJSWeakMap(Node* input, Node* arg_count,
+ Node* context) {
+ return IsInstanceType(input, JS_WEAK_MAP_TYPE);
+}
+
+Node* IntrinsicsGenerator::IsJSWeakSet(Node* input, Node* arg_count,
+ Node* context) {
+ return IsInstanceType(input, JS_WEAK_SET_TYPE);
+}
+
Node* IntrinsicsGenerator::IsSmi(Node* input, Node* arg_count, Node* context) {
// TODO(ishell): Use SelectBooleanConstant here.
InterpreterAssembler::Variable return_value(assembler_,
@@ -207,19 +237,19 @@ Node* IntrinsicsGenerator::IsSmi(Node* input, Node* arg_count, Node* context) {
Node* arg = __ LoadRegister(input);
__ Branch(__ TaggedIsSmi(arg), &if_smi, &if_not_smi);
- __ Bind(&if_smi);
+ __ BIND(&if_smi);
{
return_value.Bind(__ BooleanConstant(true));
__ Goto(&end);
}
- __ Bind(&if_not_smi);
+ __ BIND(&if_not_smi);
{
return_value.Bind(__ BooleanConstant(false));
__ Goto(&end);
}
- __ Bind(&end);
+ __ BIND(&end);
return return_value.value();
}
@@ -305,7 +335,7 @@ Node* IntrinsicsGenerator::Call(Node* args_reg, Node* arg_count,
__ GotoIfNot(comparison, &arg_count_positive);
__ Abort(kWrongArgumentCountForInvokeIntrinsic);
__ Goto(&arg_count_positive);
- __ Bind(&arg_count_positive);
+ __ BIND(&arg_count_positive);
}
Node* result = __ CallJS(function, context, receiver_arg, target_args_count,
@@ -344,7 +374,7 @@ Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(Node* args_reg,
return_value.Bind(iterator);
__ Goto(&done);
- __ Bind(&not_receiver);
+ __ BIND(&not_receiver);
{
return_value.Bind(
__ CallRuntime(Runtime::kThrowSymbolIteratorInvalid, context));
@@ -353,7 +383,7 @@ Node* IntrinsicsGenerator::CreateAsyncFromSyncIterator(Node* args_reg,
__ Goto(&done);
}
- __ Bind(&done);
+ __ BIND(&done);
return return_value.value();
}
@@ -369,6 +399,53 @@ Node* IntrinsicsGenerator::AsyncGeneratorGetAwaitInputOrDebugPos(
return value;
}
+Node* IntrinsicsGenerator::CreateJSGeneratorObject(Node* input, Node* arg_count,
+ Node* context) {
+ return IntrinsicAsBuiltinCall(input, context,
+ Builtins::kCreateGeneratorObject);
+}
+
+Node* IntrinsicsGenerator::GeneratorGetContext(Node* args_reg, Node* arg_count,
+ Node* context) {
+ Node* generator = __ LoadRegister(args_reg);
+ Node* const value =
+ __ LoadObjectField(generator, JSGeneratorObject::kContextOffset);
+
+ return value;
+}
+
+Node* IntrinsicsGenerator::GeneratorGetInputOrDebugPos(Node* args_reg,
+ Node* arg_count,
+ Node* context) {
+ Node* generator = __ LoadRegister(args_reg);
+ Node* const value =
+ __ LoadObjectField(generator, JSGeneratorObject::kInputOrDebugPosOffset);
+
+ return value;
+}
+
+Node* IntrinsicsGenerator::GeneratorGetResumeMode(Node* args_reg,
+ Node* arg_count,
+ Node* context) {
+ Node* generator = __ LoadRegister(args_reg);
+ Node* const value =
+ __ LoadObjectField(generator, JSGeneratorObject::kResumeModeOffset);
+
+ return value;
+}
+
+Node* IntrinsicsGenerator::GeneratorClose(Node* args_reg, Node* arg_count,
+ Node* context) {
+ Node* generator = __ LoadRegister(args_reg);
+ Node* const value =
+ __ LoadObjectField(generator, JSGeneratorObject::kResumeModeOffset);
+ __ StoreObjectFieldNoWriteBarrier(
+ generator, JSGeneratorObject::kContinuationOffset,
+ __ SmiConstant(JSGeneratorObject::kGeneratorClosed));
+
+ return value;
+}
+
Node* IntrinsicsGenerator::AsyncGeneratorReject(Node* input, Node* arg_count,
Node* context) {
return IntrinsicAsBuiltinCall(input, context,
@@ -387,7 +464,7 @@ void IntrinsicsGenerator::AbortIfArgCountMismatch(int expected, Node* actual) {
__ GotoIf(comparison, &match);
__ Abort(kWrongArgumentCountForInvokeIntrinsic);
__ Goto(&match);
- __ Bind(&match);
+ __ BIND(&match);
}
} // namespace interpreter
diff --git a/deps/v8/src/interpreter/interpreter-intrinsics.h b/deps/v8/src/interpreter/interpreter-intrinsics.h
index 137bdbf9cb..3a69069532 100644
--- a/deps/v8/src/interpreter/interpreter-intrinsics.h
+++ b/deps/v8/src/interpreter/interpreter-intrinsics.h
@@ -13,26 +13,37 @@ namespace interpreter {
// List of supported intrisics, with upper case name, lower case name and
// expected number of arguments (-1 denoting argument count is variable).
-#define INTRINSICS_LIST(V) \
- V(AsyncGeneratorGetAwaitInputOrDebugPos, \
- async_generator_get_await_input_or_debug_pos, 1) \
- V(AsyncGeneratorReject, async_generator_reject, 2) \
- V(AsyncGeneratorResolve, async_generator_resolve, 3) \
- V(Call, call, -1) \
- V(ClassOf, class_of, 1) \
- V(CreateIterResultObject, create_iter_result_object, 2) \
- V(CreateAsyncFromSyncIterator, create_async_from_sync_iterator, 1) \
- V(HasProperty, has_property, 2) \
- V(IsArray, is_array, 1) \
- V(IsJSProxy, is_js_proxy, 1) \
- V(IsJSReceiver, is_js_receiver, 1) \
- V(IsSmi, is_smi, 1) \
- V(IsTypedArray, is_typed_array, 1) \
- V(SubString, sub_string, 3) \
- V(ToString, to_string, 1) \
- V(ToLength, to_length, 1) \
- V(ToInteger, to_integer, 1) \
- V(ToNumber, to_number, 1) \
+#define INTRINSICS_LIST(V) \
+ V(AsyncGeneratorGetAwaitInputOrDebugPos, \
+ async_generator_get_await_input_or_debug_pos, 1) \
+ V(AsyncGeneratorReject, async_generator_reject, 2) \
+ V(AsyncGeneratorResolve, async_generator_resolve, 3) \
+ V(CreateJSGeneratorObject, create_js_generator_object, 2) \
+ V(GeneratorGetContext, generator_get_context, 1) \
+ V(GeneratorGetResumeMode, generator_get_resume_mode, 1) \
+ V(GeneratorGetInputOrDebugPos, generator_get_input_or_debug_pos, 1) \
+ V(GeneratorClose, generator_close, 1) \
+ V(Call, call, -1) \
+ V(ClassOf, class_of, 1) \
+ V(CreateIterResultObject, create_iter_result_object, 2) \
+ V(CreateAsyncFromSyncIterator, create_async_from_sync_iterator, 1) \
+ V(HasProperty, has_property, 2) \
+ V(IsArray, is_array, 1) \
+ V(IsJSMap, is_js_map, 1) \
+ V(IsJSMapIterator, is_js_map_iterator, 1) \
+ V(IsJSProxy, is_js_proxy, 1) \
+ V(IsJSReceiver, is_js_receiver, 1) \
+ V(IsJSSet, is_js_set, 1) \
+ V(IsJSSetIterator, is_js_set_iterator, 1) \
+ V(IsJSWeakMap, is_js_weak_map, 1) \
+ V(IsJSWeakSet, is_js_weak_set, 1) \
+ V(IsSmi, is_smi, 1) \
+ V(IsTypedArray, is_typed_array, 1) \
+ V(SubString, sub_string, 3) \
+ V(ToString, to_string, 1) \
+ V(ToLength, to_length, 1) \
+ V(ToInteger, to_integer, 1) \
+ V(ToNumber, to_number, 1) \
V(ToObject, to_object, 1)
class IntrinsicsHelper {
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index 42b2b18ad1..b793ae5310 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -16,6 +16,7 @@
#include "src/log.h"
#include "src/objects.h"
#include "src/setup-isolate.h"
+#include "src/visitors.h"
namespace v8 {
namespace internal {
@@ -109,14 +110,14 @@ size_t Interpreter::GetDispatchTableIndex(Bytecode bytecode,
return 0;
}
-void Interpreter::IterateDispatchTable(ObjectVisitor* v) {
+void Interpreter::IterateDispatchTable(RootVisitor* v) {
for (int i = 0; i < kDispatchTableSize; i++) {
Address code_entry = dispatch_table_[i];
Object* code = code_entry == nullptr
? nullptr
: Code::GetCodeFromTargetAddress(code_entry);
Object* old_code = code;
- v->VisitPointer(&code);
+ v->VisitRootPointer(Root::kDispatchTable, &code);
if (code != old_code) {
dispatch_table_[i] = reinterpret_cast<Code*>(code)->entry();
}
@@ -158,8 +159,7 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::PrepareJobImpl() {
OFStream os(stdout);
std::unique_ptr<char[]> name = info()->GetDebugName();
os << "[generating bytecode for function: " << info()->GetDebugName().get()
- << "]" << std::endl
- << std::flush;
+ << "]" << std::endl;
}
return SUCCEEDED;
@@ -198,7 +198,7 @@ InterpreterCompilationJob::Status InterpreterCompilationJob::FinalizeJobImpl() {
if (print_bytecode_) {
OFStream os(stdout);
- bytecodes->Print(os);
+ bytecodes->Disassemble(os);
os << std::flush;
}
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 4dc6241c24..2df29bee41 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -24,6 +24,7 @@ class Callable;
class CompilationInfo;
class CompilationJob;
class SetupIsolateDelegate;
+class RootVisitor;
namespace interpreter {
@@ -44,7 +45,7 @@ class Interpreter {
Code* GetBytecodeHandler(Bytecode bytecode, OperandScale operand_scale);
// GC support.
- void IterateDispatchTable(ObjectVisitor* v);
+ void IterateDispatchTable(RootVisitor* v);
// Disassembler support (only useful with ENABLE_DISASSEMBLER defined).
const char* LookupNameOfBytecodeHandler(Code* code);
diff --git a/deps/v8/src/intl.cc b/deps/v8/src/intl.cc
new file mode 100644
index 0000000000..0d3c507989
--- /dev/null
+++ b/deps/v8/src/intl.cc
@@ -0,0 +1,403 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#include "src/intl.h"
+
+#include <memory>
+
+#include "src/factory.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/string-case.h"
+#include "unicode/calendar.h"
+#include "unicode/gregocal.h"
+#include "unicode/timezone.h"
+#include "unicode/ustring.h"
+#include "unicode/uvernum.h"
+#include "unicode/uversion.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+inline bool IsASCIIUpper(uint16_t ch) { return ch >= 'A' && ch <= 'Z'; }
+
+const uint8_t kToLower[256] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23,
+ 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B,
+ 0x3C, 0x3D, 0x3E, 0x3F, 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73,
+ 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B,
+ 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F, 0x80, 0x81, 0x82, 0x83,
+ 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B,
+ 0x9C, 0x9D, 0x9E, 0x9F, 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
+ 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF, 0xB0, 0xB1, 0xB2, 0xB3,
+ 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
+ 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB,
+ 0xEC, 0xED, 0xEE, 0xEF, 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xD7,
+ 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xDF, 0xE0, 0xE1, 0xE2, 0xE3,
+ 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
+ 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB,
+ 0xFC, 0xFD, 0xFE, 0xFF,
+};
+
+inline uint16_t ToLatin1Lower(uint16_t ch) {
+ return static_cast<uint16_t>(kToLower[ch]);
+}
+
+inline uint16_t ToASCIIUpper(uint16_t ch) {
+ return ch & ~((ch >= 'a' && ch <= 'z') << 5);
+}
+
+// Does not work for U+00DF (sharp-s), U+00B5 (micron), U+00FF.
+inline uint16_t ToLatin1Upper(uint16_t ch) {
+ DCHECK(ch != 0xDF && ch != 0xB5 && ch != 0xFF);
+ return ch &
+ ~(((ch >= 'a' && ch <= 'z') || (((ch & 0xE0) == 0xE0) && ch != 0xF7))
+ << 5);
+}
+
+template <typename Char>
+bool ToUpperFastASCII(const Vector<const Char>& src,
+ Handle<SeqOneByteString> result) {
+ // Do a faster loop for the case where all the characters are ASCII.
+ uint16_t ored = 0;
+ int32_t index = 0;
+ for (auto it = src.begin(); it != src.end(); ++it) {
+ uint16_t ch = static_cast<uint16_t>(*it);
+ ored |= ch;
+ result->SeqOneByteStringSet(index++, ToASCIIUpper(ch));
+ }
+ return !(ored & ~0x7F);
+}
+
+const uint16_t sharp_s = 0xDF;
+
+template <typename Char>
+bool ToUpperOneByte(const Vector<const Char>& src, uint8_t* dest,
+ int* sharp_s_count) {
+ // Still pretty-fast path for the input with non-ASCII Latin-1 characters.
+
+ // There are two special cases.
+ // 1. U+00B5 and U+00FF are mapped to a character beyond U+00FF.
+ // 2. Lower case sharp-S converts to "SS" (two characters)
+ *sharp_s_count = 0;
+ for (auto it = src.begin(); it != src.end(); ++it) {
+ uint16_t ch = static_cast<uint16_t>(*it);
+ if (V8_UNLIKELY(ch == sharp_s)) {
+ ++(*sharp_s_count);
+ continue;
+ }
+ if (V8_UNLIKELY(ch == 0xB5 || ch == 0xFF)) {
+ // Since this upper-cased character does not fit in an 8-bit string, we
+ // need to take the 16-bit path.
+ return false;
+ }
+ *dest++ = ToLatin1Upper(ch);
+ }
+
+ return true;
+}
+
+template <typename Char>
+void ToUpperWithSharpS(const Vector<const Char>& src,
+ Handle<SeqOneByteString> result) {
+ int32_t dest_index = 0;
+ for (auto it = src.begin(); it != src.end(); ++it) {
+ uint16_t ch = static_cast<uint16_t>(*it);
+ if (ch == sharp_s) {
+ result->SeqOneByteStringSet(dest_index++, 'S');
+ result->SeqOneByteStringSet(dest_index++, 'S');
+ } else {
+ result->SeqOneByteStringSet(dest_index++, ToLatin1Upper(ch));
+ }
+ }
+}
+
+inline int FindFirstUpperOrNonAscii(String* s, int length) {
+ for (int index = 0; index < length; ++index) {
+ uint16_t ch = s->Get(index);
+ if (V8_UNLIKELY(IsASCIIUpper(ch) || ch & ~0x7F)) {
+ return index;
+ }
+ }
+ return length;
+}
+
+} // namespace
+
+const uint8_t* ToLatin1LowerTable() { return &kToLower[0]; }
+
+const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
+ std::unique_ptr<uc16[]>* dest,
+ int32_t length) {
+ DCHECK(flat.IsFlat());
+ if (flat.IsOneByte()) {
+ if (!*dest) {
+ dest->reset(NewArray<uc16>(length));
+ CopyChars(dest->get(), flat.ToOneByteVector().start(), length);
+ }
+ return reinterpret_cast<const UChar*>(dest->get());
+ } else {
+ return reinterpret_cast<const UChar*>(flat.ToUC16Vector().start());
+ }
+}
+
+MUST_USE_RESULT Object* LocaleConvertCase(Handle<String> s, Isolate* isolate,
+ bool is_to_upper, const char* lang) {
+ auto case_converter = is_to_upper ? u_strToUpper : u_strToLower;
+ int32_t src_length = s->length();
+ int32_t dest_length = src_length;
+ UErrorCode status;
+ Handle<SeqTwoByteString> result;
+ std::unique_ptr<uc16[]> sap;
+
+ if (dest_length == 0) return isolate->heap()->empty_string();
+
+ // This is not a real loop. It'll be executed only once (no overflow) or
+ // twice (overflow).
+ for (int i = 0; i < 2; ++i) {
+ // Case conversion can increase the string length (e.g. sharp-S => SS) so
+ // that we have to handle RangeError exceptions here.
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result, isolate->factory()->NewRawTwoByteString(dest_length));
+ DisallowHeapAllocation no_gc;
+ DCHECK(s->IsFlat());
+ String::FlatContent flat = s->GetFlatContent();
+ const UChar* src = GetUCharBufferFromFlat(flat, &sap, src_length);
+ status = U_ZERO_ERROR;
+ dest_length = case_converter(reinterpret_cast<UChar*>(result->GetChars()),
+ dest_length, src, src_length, lang, &status);
+ if (status != U_BUFFER_OVERFLOW_ERROR) break;
+ }
+
+ // In most cases, the output will fill the destination buffer completely
+ // leading to an unterminated string (U_STRING_NOT_TERMINATED_WARNING).
+ // Only in rare cases, it'll be shorter than the destination buffer and
+ // |result| has to be truncated.
+ DCHECK(U_SUCCESS(status));
+ if (V8_LIKELY(status == U_STRING_NOT_TERMINATED_WARNING)) {
+ DCHECK(dest_length == result->length());
+ return *result;
+ }
+ if (U_SUCCESS(status)) {
+ DCHECK(dest_length < result->length());
+ return *Handle<SeqTwoByteString>::cast(
+ SeqString::Truncate(result, dest_length));
+ }
+ return *s;
+}
+
+// A stripped-down version of ConvertToLower that can only handle flat one-byte
+// strings and does not allocate. Note that {src} could still be, e.g., a
+// one-byte sliced string with a two-byte parent string.
+// Called from TF builtins.
+MUST_USE_RESULT Object* ConvertOneByteToLower(String* src, String* dst,
+ Isolate* isolate) {
+ DCHECK_EQ(src->length(), dst->length());
+ DCHECK(src->HasOnlyOneByteChars());
+ DCHECK(src->IsFlat());
+ DCHECK(dst->IsSeqOneByteString());
+
+ DisallowHeapAllocation no_gc;
+
+ const int length = src->length();
+ String::FlatContent src_flat = src->GetFlatContent();
+ uint8_t* dst_data = SeqOneByteString::cast(dst)->GetChars();
+
+ if (src_flat.IsOneByte()) {
+ const uint8_t* src_data = src_flat.ToOneByteVector().start();
+
+ bool has_changed_character = false;
+ int index_to_first_unprocessed =
+ FastAsciiConvert<true>(reinterpret_cast<char*>(dst_data),
+ reinterpret_cast<const char*>(src_data), length,
+ &has_changed_character);
+
+ if (index_to_first_unprocessed == length) {
+ return has_changed_character ? dst : src;
+ }
+
+ // If not ASCII, we keep the result up to index_to_first_unprocessed and
+ // process the rest.
+ for (int index = index_to_first_unprocessed; index < length; ++index) {
+ dst_data[index] = ToLatin1Lower(static_cast<uint16_t>(src_data[index]));
+ }
+ } else {
+ DCHECK(src_flat.IsTwoByte());
+ int index_to_first_unprocessed = FindFirstUpperOrNonAscii(src, length);
+ if (index_to_first_unprocessed == length) return src;
+
+ const uint16_t* src_data = src_flat.ToUC16Vector().start();
+ CopyChars(dst_data, src_data, index_to_first_unprocessed);
+ for (int index = index_to_first_unprocessed; index < length; ++index) {
+ dst_data[index] = ToLatin1Lower(static_cast<uint16_t>(src_data[index]));
+ }
+ }
+
+ return dst;
+}
+
+MUST_USE_RESULT Object* ConvertToLower(Handle<String> s, Isolate* isolate) {
+ if (!s->HasOnlyOneByteChars()) {
+ // Use a slower implementation for strings with characters beyond U+00FF.
+ return LocaleConvertCase(s, isolate, false, "");
+ }
+
+ int length = s->length();
+
+ // We depend here on the invariant that the length of a Latin1
+ // string is invariant under ToLowerCase, and the result always
+ // fits in the Latin1 range in the *root locale*. It does not hold
+ // for ToUpperCase even in the root locale.
+
+ // Scan the string for uppercase and non-ASCII characters for strings
+ // shorter than a machine-word without any memory allocation overhead.
+ // TODO(jshin): Apply this to a longer input by breaking FastAsciiConvert()
+ // to two parts, one for scanning the prefix with no change and the other for
+ // handling ASCII-only characters.
+
+ bool is_short = length < static_cast<int>(sizeof(uintptr_t));
+ if (is_short) {
+ bool is_lower_ascii = FindFirstUpperOrNonAscii(*s, length) == length;
+ if (is_lower_ascii) return *s;
+ }
+
+ Handle<SeqOneByteString> result =
+ isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
+
+ return ConvertOneByteToLower(*s, *result, isolate);
+}
+
+MUST_USE_RESULT Object* ConvertToUpper(Handle<String> s, Isolate* isolate) {
+ int32_t length = s->length();
+ if (s->HasOnlyOneByteChars() && length > 0) {
+ Handle<SeqOneByteString> result =
+ isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
+
+ DCHECK(s->IsFlat());
+ int sharp_s_count;
+ bool is_result_single_byte;
+ {
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = s->GetFlatContent();
+ uint8_t* dest = result->GetChars();
+ if (flat.IsOneByte()) {
+ Vector<const uint8_t> src = flat.ToOneByteVector();
+ bool has_changed_character = false;
+ int index_to_first_unprocessed =
+ FastAsciiConvert<false>(reinterpret_cast<char*>(result->GetChars()),
+ reinterpret_cast<const char*>(src.start()),
+ length, &has_changed_character);
+ if (index_to_first_unprocessed == length)
+ return has_changed_character ? *result : *s;
+ // If not ASCII, we keep the result up to index_to_first_unprocessed and
+ // process the rest.
+ is_result_single_byte =
+ ToUpperOneByte(src.SubVector(index_to_first_unprocessed, length),
+ dest + index_to_first_unprocessed, &sharp_s_count);
+ } else {
+ DCHECK(flat.IsTwoByte());
+ Vector<const uint16_t> src = flat.ToUC16Vector();
+ if (ToUpperFastASCII(src, result)) return *result;
+ is_result_single_byte = ToUpperOneByte(src, dest, &sharp_s_count);
+ }
+ }
+
+ // Go to the full Unicode path if there are characters whose uppercase
+ // is beyond the Latin-1 range (cannot be represented in OneByteString).
+ if (V8_UNLIKELY(!is_result_single_byte)) {
+ return LocaleConvertCase(s, isolate, true, "");
+ }
+
+ if (sharp_s_count == 0) return *result;
+
+ // We have sharp_s_count sharp-s characters, but the result is still
+ // in the Latin-1 range.
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ isolate->factory()->NewRawOneByteString(length + sharp_s_count));
+ DisallowHeapAllocation no_gc;
+ String::FlatContent flat = s->GetFlatContent();
+ if (flat.IsOneByte()) {
+ ToUpperWithSharpS(flat.ToOneByteVector(), result);
+ } else {
+ ToUpperWithSharpS(flat.ToUC16Vector(), result);
+ }
+
+ return *result;
+ }
+
+ return LocaleConvertCase(s, isolate, true, "");
+}
+
+MUST_USE_RESULT Object* ConvertCase(Handle<String> s, bool is_upper,
+ Isolate* isolate) {
+ return is_upper ? ConvertToUpper(s, isolate) : ConvertToLower(s, isolate);
+}
+
+ICUTimezoneCache::ICUTimezoneCache() : timezone_(nullptr) { Clear(); }
+
+ICUTimezoneCache::~ICUTimezoneCache() { Clear(); }
+
+const char* ICUTimezoneCache::LocalTimezone(double time_ms) {
+ bool is_dst = DaylightSavingsOffset(time_ms) != 0;
+ char* name = is_dst ? dst_timezone_name_ : timezone_name_;
+ if (name[0] == '\0') {
+ icu::UnicodeString result;
+ GetTimeZone()->getDisplayName(is_dst, icu::TimeZone::LONG, result);
+ result += '\0';
+
+ icu::CheckedArrayByteSink byte_sink(name, kMaxTimezoneChars);
+ result.toUTF8(byte_sink);
+ CHECK(!byte_sink.Overflowed());
+ }
+ return const_cast<const char*>(name);
+}
+
+icu::TimeZone* ICUTimezoneCache::GetTimeZone() {
+ if (timezone_ == nullptr) {
+ timezone_ = icu::TimeZone::createDefault();
+ }
+ return timezone_;
+}
+
+bool ICUTimezoneCache::GetOffsets(double time_ms, int32_t* raw_offset,
+ int32_t* dst_offset) {
+ UErrorCode status = U_ZERO_ERROR;
+ GetTimeZone()->getOffset(time_ms, false, *raw_offset, *dst_offset, status);
+ return U_SUCCESS(status);
+}
+
+double ICUTimezoneCache::DaylightSavingsOffset(double time_ms) {
+ int32_t raw_offset, dst_offset;
+ if (!GetOffsets(time_ms, &raw_offset, &dst_offset)) return 0;
+ return dst_offset;
+}
+
+double ICUTimezoneCache::LocalTimeOffset() {
+ int32_t raw_offset, dst_offset;
+ if (!GetOffsets(icu::Calendar::getNow(), &raw_offset, &dst_offset)) return 0;
+ return raw_offset;
+}
+
+void ICUTimezoneCache::Clear() {
+ delete timezone_;
+ timezone_ = nullptr;
+ timezone_name_[0] = '\0';
+ dst_timezone_name_[0] = '\0';
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/intl.h b/deps/v8/src/intl.h
new file mode 100644
index 0000000000..90683fe7f6
--- /dev/null
+++ b/deps/v8/src/intl.h
@@ -0,0 +1,69 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#ifndef V8_INTL_H_
+#define V8_INTL_H_
+
+#include "src/base/timezone-cache.h"
+#include "src/objects.h"
+#include "unicode/uversion.h"
+
+namespace U_ICU_NAMESPACE {
+class TimeZone;
+}
+
+namespace v8 {
+namespace internal {
+
+const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
+ std::unique_ptr<uc16[]>* dest,
+ int32_t length);
+MUST_USE_RESULT Object* LocaleConvertCase(Handle<String> s, Isolate* isolate,
+ bool is_to_upper, const char* lang);
+MUST_USE_RESULT Object* ConvertToLower(Handle<String> s, Isolate* isolate);
+MUST_USE_RESULT Object* ConvertToUpper(Handle<String> s, Isolate* isolate);
+MUST_USE_RESULT Object* ConvertCase(Handle<String> s, bool is_upper,
+ Isolate* isolate);
+
+MUST_USE_RESULT Object* ConvertOneByteToLower(String* src, String* dst,
+ Isolate* isolate);
+
+const uint8_t* ToLatin1LowerTable();
+
+// ICUTimezoneCache calls out to ICU for TimezoneCache
+// functionality in a straightforward way.
+class ICUTimezoneCache : public base::TimezoneCache {
+ public:
+ ICUTimezoneCache();
+
+ ~ICUTimezoneCache() override;
+
+ const char* LocalTimezone(double time_ms) override;
+
+ double DaylightSavingsOffset(double time_ms) override;
+
+ double LocalTimeOffset() override;
+
+ void Clear() override;
+
+ private:
+ icu::TimeZone* GetTimeZone();
+
+ bool GetOffsets(double time_ms, int32_t* raw_offset, int32_t* dst_offset);
+
+ icu::TimeZone* timezone_;
+
+ static const int32_t kMaxTimezoneChars = 100;
+ char timezone_name_[kMaxTimezoneChars];
+ char dst_timezone_name_[kMaxTimezoneChars];
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INTL_H_
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index ffed04d6e0..faa04848cf 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -50,6 +50,7 @@
#include "src/tracing/tracing-category-observer.h"
#include "src/v8.h"
#include "src/version.h"
+#include "src/visitors.h"
#include "src/vm-state-inl.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
@@ -200,8 +201,7 @@ Address Isolate::get_address_from_id(Isolate::AddressId id) {
return isolate_addresses_[id];
}
-
-char* Isolate::Iterate(ObjectVisitor* v, char* thread_storage) {
+char* Isolate::Iterate(RootVisitor* v, char* thread_storage) {
ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(thread_storage);
Iterate(v, thread);
return thread_storage + sizeof(ThreadLocalTop);
@@ -213,19 +213,18 @@ void Isolate::IterateThread(ThreadVisitor* v, char* t) {
v->VisitThread(this, thread);
}
-
-void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
+void Isolate::Iterate(RootVisitor* v, ThreadLocalTop* thread) {
// Visit the roots from the top for a given thread.
- v->VisitPointer(&thread->pending_exception_);
- v->VisitPointer(&(thread->pending_message_obj_));
- v->VisitPointer(bit_cast<Object**>(&(thread->context_)));
- v->VisitPointer(&thread->scheduled_exception_);
+ v->VisitRootPointer(Root::kTop, &thread->pending_exception_);
+ v->VisitRootPointer(Root::kTop, &thread->pending_message_obj_);
+ v->VisitRootPointer(Root::kTop, bit_cast<Object**>(&(thread->context_)));
+ v->VisitRootPointer(Root::kTop, &thread->scheduled_exception_);
for (v8::TryCatch* block = thread->try_catch_handler();
block != NULL;
block = block->next_) {
- v->VisitPointer(bit_cast<Object**>(&(block->exception_)));
- v->VisitPointer(bit_cast<Object**>(&(block->message_obj_)));
+ v->VisitRootPointer(Root::kTop, bit_cast<Object**>(&(block->exception_)));
+ v->VisitRootPointer(Root::kTop, bit_cast<Object**>(&(block->message_obj_)));
}
// Iterate over pointers on native execution stack.
@@ -234,14 +233,12 @@ void Isolate::Iterate(ObjectVisitor* v, ThreadLocalTop* thread) {
}
}
-
-void Isolate::Iterate(ObjectVisitor* v) {
+void Isolate::Iterate(RootVisitor* v) {
ThreadLocalTop* current_t = thread_local_top();
Iterate(v, current_t);
}
-
-void Isolate::IterateDeferredHandles(ObjectVisitor* visitor) {
+void Isolate::IterateDeferredHandles(RootVisitor* visitor) {
for (DeferredHandles* deferred = deferred_handles_head_;
deferred != NULL;
deferred = deferred->next_) {
@@ -620,7 +617,7 @@ MaybeHandle<JSReceiver> Isolate::CaptureAndSetDetailedStackTrace(
if (capture_stack_trace_for_uncaught_exceptions_) {
// Capture stack trace for a detailed exception message.
Handle<Name> key = factory()->detailed_stack_trace_symbol();
- Handle<JSArray> stack_trace = CaptureCurrentStackTrace(
+ Handle<FixedArray> stack_trace = CaptureCurrentStackTrace(
stack_trace_for_uncaught_exceptions_frame_limit_,
stack_trace_for_uncaught_exceptions_options_);
RETURN_ON_EXCEPTION(
@@ -643,13 +640,13 @@ MaybeHandle<JSReceiver> Isolate::CaptureAndSetSimpleStackTrace(
return error_object;
}
-
-Handle<JSArray> Isolate::GetDetailedStackTrace(Handle<JSObject> error_object) {
+Handle<FixedArray> Isolate::GetDetailedStackTrace(
+ Handle<JSObject> error_object) {
Handle<Name> key_detailed = factory()->detailed_stack_trace_symbol();
Handle<Object> stack_trace =
JSReceiver::GetDataProperty(error_object, key_detailed);
- if (stack_trace->IsJSArray()) return Handle<JSArray>::cast(stack_trace);
- return Handle<JSArray>();
+ if (stack_trace->IsFixedArray()) return Handle<FixedArray>::cast(stack_trace);
+ return Handle<FixedArray>();
}
@@ -666,6 +663,32 @@ class CaptureStackTraceHelper {
Handle<StackFrameInfo> NewStackFrameObject(
const FrameSummary::JavaScriptFrameSummary& summ) {
+ int code_offset;
+ Handle<ByteArray> source_position_table;
+ Object* maybe_cache;
+ Handle<UnseededNumberDictionary> cache;
+ if (!FLAG_optimize_for_size) {
+ code_offset = summ.code_offset();
+ source_position_table =
+ handle(summ.abstract_code()->source_position_table(), isolate_);
+ maybe_cache = summ.abstract_code()->stack_frame_cache();
+ if (maybe_cache->IsUnseededNumberDictionary()) {
+ cache = handle(UnseededNumberDictionary::cast(maybe_cache));
+ } else {
+ cache = UnseededNumberDictionary::New(isolate_, 1);
+ }
+ int entry = cache->FindEntry(code_offset);
+ if (entry != UnseededNumberDictionary::kNotFound) {
+ Handle<StackFrameInfo> frame(
+ StackFrameInfo::cast(cache->ValueAt(entry)));
+ DCHECK(frame->function_name()->IsString());
+ Handle<String> function_name = summ.FunctionName();
+ if (function_name->Equals(String::cast(frame->function_name()))) {
+ return frame;
+ }
+ }
+ }
+
Handle<StackFrameInfo> frame = factory()->NewStackFrameInfo();
Handle<Script> script = Handle<Script>::cast(summ.script());
Script::PositionInfo info;
@@ -684,6 +707,14 @@ class CaptureStackTraceHelper {
frame->set_function_name(*function_name);
frame->set_is_constructor(summ.is_constructor());
frame->set_is_wasm(false);
+ if (!FLAG_optimize_for_size) {
+ auto new_cache =
+ UnseededNumberDictionary::AtNumberPut(cache, code_offset, frame);
+ if (*new_cache != *cache || !maybe_cache->IsUnseededNumberDictionary()) {
+ AbstractCode::SetStackFrameCache(summ.abstract_code(), new_cache);
+ }
+ }
+ frame->set_id(next_id());
return frame;
}
@@ -705,25 +736,30 @@ class CaptureStackTraceHelper {
info->set_column_number(position);
info->set_script_id(summ.script()->id());
info->set_is_wasm(true);
+ info->set_id(next_id());
return info;
}
private:
inline Factory* factory() { return isolate_->factory(); }
+ int next_id() const {
+ int id = isolate_->last_stack_frame_info_id() + 1;
+ isolate_->set_last_stack_frame_info_id(id);
+ return id;
+ }
+
Isolate* isolate_;
};
-Handle<JSArray> Isolate::CaptureCurrentStackTrace(
+Handle<FixedArray> Isolate::CaptureCurrentStackTrace(
int frame_limit, StackTrace::StackTraceOptions options) {
DisallowJavascriptExecution no_js(this);
CaptureStackTraceHelper helper(this);
// Ensure no negative values.
int limit = Max(frame_limit, 0);
- Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
- Handle<FixedArray> stack_trace_elems(
- FixedArray::cast(stack_trace->elements()), this);
+ Handle<FixedArray> stack_trace_elems = factory()->NewFixedArray(limit);
int frames_seen = 0;
for (StackTraceFrameIterator it(this); !it.done() && (frames_seen < limit);
@@ -744,9 +780,8 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
frames_seen++;
}
}
-
- stack_trace->set_length(Smi::FromInt(frames_seen));
- return stack_trace;
+ stack_trace_elems->Shrink(frames_seen);
+ return stack_trace_elems;
}
@@ -1637,7 +1672,7 @@ bool Isolate::ComputeLocationFromStackTrace(MessageLocation* target,
Handle<JSMessageObject> Isolate::CreateMessage(Handle<Object> exception,
MessageLocation* location) {
- Handle<JSArray> stack_trace_object;
+ Handle<FixedArray> stack_trace_object;
if (capture_stack_trace_for_uncaught_exceptions_) {
if (exception->IsJSError()) {
// We fetch the stack trace that corresponds to this error object.
@@ -2311,7 +2346,7 @@ Isolate::Isolate(bool enable_serializer)
optimizing_compile_dispatcher_(NULL),
stress_deopt_count_(0),
next_optimization_id_(0),
-#if TRACE_MAPS
+#if V8_SFI_HAS_UNIQUE_ID
next_unique_sfi_id_(0),
#endif
is_running_microtasks_(false),
@@ -2976,8 +3011,8 @@ Map* Isolate::get_initial_js_array_map(ElementsKind kind) {
return nullptr;
}
-bool Isolate::use_crankshaft() {
- return FLAG_opt && FLAG_crankshaft && !serializer_enabled_ &&
+bool Isolate::use_optimizer() {
+ return FLAG_opt && !serializer_enabled_ &&
CpuFeatures::SupportsCrankshaft() && !is_precise_count_code_coverage();
}
@@ -3010,7 +3045,7 @@ void Isolate::ClearOSROptimizedCode() {
Object* context = heap()->native_contexts_list();
while (!context->IsUndefined(this)) {
Context* current_context = Context::cast(context);
- current_context->ClearOptimizedCodeMap();
+ current_context->ClearOSROptimizedCodeCache();
context = current_context->next_context_link();
}
}
@@ -3020,7 +3055,7 @@ void Isolate::EvictOSROptimizedCode(Code* code, const char* reason) {
Object* context = heap()->native_contexts_list();
while (!context->IsUndefined(this)) {
Context* current_context = Context::cast(context);
- current_context->EvictFromOptimizedCodeMap(code, reason);
+ current_context->EvictFromOSROptimizedCodeCache(code, reason);
context = current_context->next_context_link();
}
}
@@ -3364,7 +3399,7 @@ void Isolate::ReportPromiseReject(Handle<JSObject> promise,
Handle<Object> value,
v8::PromiseRejectEvent event) {
if (promise_reject_callback_ == NULL) return;
- Handle<JSArray> stack_trace;
+ Handle<FixedArray> stack_trace;
if (event == v8::kPromiseRejectWithNoHandler && value->IsJSObject()) {
stack_trace = GetDetailedStackTrace(Handle<JSObject>::cast(value));
}
@@ -3474,6 +3509,7 @@ void Isolate::RunMicrotasksInternal() {
while (pending_microtask_count() > 0) {
HandleScope scope(this);
int num_tasks = pending_microtask_count();
+ // Do not use factory()->microtask_queue() here; we need a fresh handle!
Handle<FixedArray> queue(heap()->microtask_queue(), this);
DCHECK(num_tasks <= queue->length());
set_pending_microtask_count(0);
@@ -3617,11 +3653,11 @@ void Isolate::SetTailCallEliminationEnabled(bool enabled) {
void Isolate::AddDetachedContext(Handle<Context> context) {
HandleScope scope(this);
Handle<WeakCell> cell = factory()->NewWeakCell(context);
- Handle<FixedArray> detached_contexts = factory()->detached_contexts();
- int length = detached_contexts->length();
- detached_contexts = factory()->CopyFixedArrayAndGrow(detached_contexts, 2);
- detached_contexts->set(length, Smi::kZero);
- detached_contexts->set(length + 1, *cell);
+ Handle<FixedArray> detached_contexts =
+ factory()->CopyFixedArrayAndGrow(factory()->detached_contexts(), 2);
+ int new_length = detached_contexts->length();
+ detached_contexts->set(new_length - 2, Smi::kZero);
+ detached_contexts->set(new_length - 1, *cell);
heap()->set_detached_contexts(*detached_contexts);
}
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index 3a8d1b3a03..d65a1f373a 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -34,6 +34,10 @@ namespace base {
class RandomNumberGenerator;
}
+namespace debug {
+class ConsoleDelegate;
+}
+
namespace internal {
class AccessCompilerData;
@@ -74,6 +78,7 @@ class Logger;
class MaterializedObjectStore;
class OptimizingCompileDispatcher;
class RegExpStack;
+class RootVisitor;
class RuntimeProfiler;
class SaveContext;
class SetupIsolateDelegate;
@@ -390,7 +395,7 @@ class ThreadLocalTop BASE_EMBEDDED {
V(int, suffix_table, (kBMMaxShift + 1)) \
ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
-typedef List<HeapObject*> DebugObjectCache;
+typedef std::vector<HeapObject*> DebugObjectCache;
#define ISOLATE_INIT_LIST(V) \
/* Assembler state. */ \
@@ -430,6 +435,7 @@ typedef List<HeapObject*> DebugObjectCache;
V(bool, needs_side_effect_check, false) \
/* Current code coverage mode */ \
V(debug::Coverage::Mode, code_coverage_mode, debug::Coverage::kBestEffort) \
+ V(int, last_stack_frame_info_id, 0) \
ISOLATE_INIT_SIMULATOR_LIST(V)
#define THREAD_LOCAL_TOP_ACCESSOR(type, name) \
@@ -666,7 +672,7 @@ class Isolate {
// exceptions. If an exception was thrown and not handled by an external
// handler the exception is scheduled to be rethrown when we return to running
// JavaScript code. If an exception is scheduled true is returned.
- bool OptionalRescheduleException(bool is_bottom_call);
+ V8_EXPORT_PRIVATE bool OptionalRescheduleException(bool is_bottom_call);
// Push and pop a promise and the current try-catch handler.
void PushPromise(Handle<JSObject> promise);
@@ -717,9 +723,8 @@ class Isolate {
void* ptr2, void* ptr3, void* ptr4,
void* ptr5, void* ptr6, void* ptr7,
void* ptr8, unsigned int magic2));
- Handle<JSArray> CaptureCurrentStackTrace(
- int frame_limit,
- StackTrace::StackTraceOptions options);
+ Handle<FixedArray> CaptureCurrentStackTrace(
+ int frame_limit, StackTrace::StackTraceOptions options);
Handle<Object> CaptureSimpleStackTrace(Handle<JSReceiver> error_object,
FrameSkipMode mode,
Handle<Object> caller);
@@ -728,7 +733,7 @@ class Isolate {
MaybeHandle<JSReceiver> CaptureAndSetSimpleStackTrace(
Handle<JSReceiver> error_object, FrameSkipMode mode,
Handle<Object> caller);
- Handle<JSArray> GetDetailedStackTrace(Handle<JSObject> error_object);
+ Handle<FixedArray> GetDetailedStackTrace(Handle<JSObject> error_object);
// Returns if the given context may access the given global object. If
// the result is false, the pending exception is guaranteed to be
@@ -750,6 +755,11 @@ class Isolate {
return MaybeHandle<T>();
}
+ void set_console_delegate(debug::ConsoleDelegate* delegate) {
+ console_delegate_ = delegate;
+ }
+ debug::ConsoleDelegate* console_delegate() { return console_delegate_; }
+
// Re-throw an exception. This involves no error reporting since error
// reporting was handled when the exception was thrown originally.
Object* ReThrow(Object* exception);
@@ -806,9 +816,9 @@ class Isolate {
void InvokeApiInterruptCallbacks();
// Administration
- void Iterate(ObjectVisitor* v);
- void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
- char* Iterate(ObjectVisitor* v, char* t);
+ void Iterate(RootVisitor* v);
+ void Iterate(RootVisitor* v, ThreadLocalTop* t);
+ char* Iterate(RootVisitor* v, char* t);
void IterateThread(ThreadVisitor* v, char* t);
// Returns the current native context.
@@ -987,7 +997,7 @@ class Isolate {
bool IsDead() { return has_fatal_error_; }
void SignalFatalError() { has_fatal_error_ = true; }
- bool use_crankshaft();
+ bool use_optimizer();
bool initialized_from_snapshot() { return initialized_from_snapshot_; }
@@ -1068,7 +1078,7 @@ class Isolate {
AccessCompilerData* access_compiler_data() { return access_compiler_data_; }
- void IterateDeferredHandles(ObjectVisitor* visitor);
+ void IterateDeferredHandles(RootVisitor* visitor);
void LinkDeferredHandles(DeferredHandles* deferred_handles);
void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
@@ -1157,7 +1167,7 @@ class Isolate {
std::string GetTurboCfgFileName();
-#if TRACE_MAPS
+#if V8_SFI_HAS_UNIQUE_ID
int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
#endif
@@ -1240,6 +1250,9 @@ class Isolate {
#ifdef USE_SIMULATOR
base::Mutex* simulator_i_cache_mutex() { return &simulator_i_cache_mutex_; }
+ base::Mutex* simulator_redirection_mutex() {
+ return &simulator_redirection_mutex_;
+ }
#endif
void set_allow_atomics_wait(bool set) { allow_atomics_wait_ = set; }
@@ -1278,10 +1291,10 @@ class Isolate {
// reset to nullptr.
void UnregisterFromReleaseAtTeardown(ManagedObjectFinalizer** finalizer_ptr);
- // Used by mjsunit tests to force d8 to wait for certain things to run.
- inline void IncrementWaitCountForTesting() { wait_count_++; }
- inline void DecrementWaitCountForTesting() { wait_count_--; }
- inline int GetWaitCountForTesting() { return wait_count_; }
+ size_t elements_deletion_counter() { return elements_deletion_counter_; }
+ void set_elements_deletion_counter(size_t value) {
+ elements_deletion_counter_ = value;
+ }
protected:
explicit Isolate(bool enable_serializer);
@@ -1528,7 +1541,7 @@ class Isolate {
int next_optimization_id_;
-#if TRACE_MAPS
+#if V8_SFI_HAS_UNIQUE_ID
int next_unique_sfi_id_;
#endif
@@ -1553,11 +1566,14 @@ class Isolate {
CancelableTaskManager* cancelable_task_manager_;
+ debug::ConsoleDelegate* console_delegate_ = nullptr;
+
v8::Isolate::AbortOnUncaughtExceptionCallback
abort_on_uncaught_exception_callback_;
#ifdef USE_SIMULATOR
base::Mutex simulator_i_cache_mutex_;
+ base::Mutex simulator_redirection_mutex_;
#endif
bool allow_atomics_wait_;
@@ -1566,7 +1582,7 @@ class Isolate {
size_t total_regexp_code_generated_;
- int wait_count_ = 0;
+ size_t elements_deletion_counter_ = 0;
friend class ExecutionAccess;
friend class HandleScopeImplementer;
diff --git a/deps/v8/src/js/array.js b/deps/v8/src/js/array.js
index 50780f659c..188f21c41e 100644
--- a/deps/v8/src/js/array.js
+++ b/deps/v8/src/js/array.js
@@ -18,8 +18,8 @@ var InternalArray = utils.InternalArray;
var InternalPackedArray = utils.InternalPackedArray;
var MaxSimple;
var MinSimple;
-var ObjectHasOwnProperty;
-var ObjectToString = utils.ImportNow("object_to_string");
+var ObjectHasOwnProperty = global.Object.prototype.hasOwnProperty;
+var ObjectToString = global.Object.prototype.toString;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
var unscopablesSymbol = utils.ImportNow("unscopables_symbol");
@@ -28,7 +28,6 @@ utils.Import(function(from) {
GetMethod = from.GetMethod;
MaxSimple = from.MaxSimple;
MinSimple = from.MinSimple;
- ObjectHasOwnProperty = from.ObjectHasOwnProperty;
});
// -------------------------------------------------------------------
@@ -403,7 +402,7 @@ function ArrayPop() {
n--;
var value = array[n];
- %DeleteProperty_Strict(array, n);
+ delete array[n];
array.length = n;
return value;
}
@@ -1001,57 +1000,6 @@ function ArraySort(comparefn) {
return InnerArraySort(array, length, comparefn);
}
-
-// The following functions cannot be made efficient on sparse arrays while
-// preserving the semantics, since the calls to the receiver function can add
-// or delete elements from the array.
-function InnerArrayFilter(f, receiver, array, length, result) {
- var result_length = 0;
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- if (%_Call(f, receiver, element, i, array)) {
- %CreateDataProperty(result, result_length, element);
- result_length++;
- }
- }
- }
- return result;
-}
-
-
-
-function ArrayFilter(f, receiver) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.filter");
-
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping and side effects are visible.
- var array = TO_OBJECT(this);
- var length = TO_LENGTH(array.length);
- if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
- var result = ArraySpeciesCreate(array, 0);
- return InnerArrayFilter(f, receiver, array, length, result);
-}
-
-function ArrayMap(f, receiver) {
- CHECK_OBJECT_COERCIBLE(this, "Array.prototype.map");
-
- // Pull out the length so that modifications to the length in the
- // loop will not affect the looping and side effects are visible.
- var array = TO_OBJECT(this);
- var length = TO_LENGTH(array.length);
- if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
- var result = ArraySpeciesCreate(array, length);
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- %CreateDataProperty(result, i, %_Call(f, receiver, element, i, array));
- }
- }
- return result;
-}
-
-
function ArrayLastIndexOf(element, index) {
CHECK_OBJECT_COERCIBLE(this, "Array.prototype.lastIndexOf");
@@ -1334,8 +1282,6 @@ var unscopables = {
keys: true,
};
-%ToFastProperties(unscopables);
-
%AddNamedProperty(GlobalArray.prototype, unscopablesSymbol, unscopables,
DONT_ENUM | READ_ONLY);
@@ -1384,8 +1330,6 @@ utils.InstallFunctions(GlobalArray.prototype, DONT_ENUM, [
"slice", getFunction("slice", ArraySlice, 2),
"splice", getFunction("splice", ArraySplice, 2),
"sort", getFunction("sort", ArraySort),
- "filter", getFunction("filter", ArrayFilter, 1),
- "map", getFunction("map", ArrayMap, 1),
"indexOf", getFunction("indexOf", null, 1),
"lastIndexOf", getFunction("lastIndexOf", ArrayLastIndexOf, 1),
"copyWithin", getFunction("copyWithin", ArrayCopyWithin, 2),
@@ -1442,7 +1386,6 @@ utils.Export(function(to) {
to.ArrayPush = ArrayPush;
to.ArrayToString = ArrayToString;
to.ArrayValues = IteratorFunctions.values,
- to.InnerArrayFilter = InnerArrayFilter;
to.InnerArrayFind = InnerArrayFind;
to.InnerArrayFindIndex = InnerArrayFindIndex;
to.InnerArrayJoin = InnerArrayJoin;
diff --git a/deps/v8/src/js/harmony-string-padding.js b/deps/v8/src/js/harmony-string-padding.js
deleted file mode 100644
index 1af2359def..0000000000
--- a/deps/v8/src/js/harmony-string-padding.js
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-(function(global, utils) {
-
-%CheckIsBootstrapping();
-
-// -------------------------------------------------------------------
-// Imports
-
-var GlobalString = global.String;
-
-// -------------------------------------------------------------------
-// http://tc39.github.io/proposal-string-pad-start-end/
-
-function StringPad(thisString, maxLength, fillString) {
- maxLength = TO_LENGTH(maxLength);
- var stringLength = thisString.length;
-
- if (maxLength <= stringLength) return "";
-
- if (IS_UNDEFINED(fillString)) {
- fillString = " ";
- } else {
- fillString = TO_STRING(fillString);
- if (fillString === "") {
- // If filler is the empty String, return S.
- return "";
- }
- }
-
- var fillLength = maxLength - stringLength;
- var repetitions = (fillLength / fillString.length) | 0;
- var remainingChars = (fillLength - fillString.length * repetitions) | 0;
-
- var filler = "";
- while (true) {
- if (repetitions & 1) filler += fillString;
- repetitions >>= 1;
- if (repetitions === 0) break;
- fillString += fillString;
- }
-
- if (remainingChars) {
- filler += %_SubString(fillString, 0, remainingChars);
- }
-
- return filler;
-}
-
-function StringPadStart(maxLength, fillString) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.padStart")
- var thisString = TO_STRING(this);
-
- return StringPad(thisString, maxLength, fillString) + thisString;
-}
-%FunctionSetLength(StringPadStart, 1);
-
-function StringPadEnd(maxLength, fillString) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.padEnd")
- var thisString = TO_STRING(this);
-
- return thisString + StringPad(thisString, maxLength, fillString);
-}
-%FunctionSetLength(StringPadEnd, 1);
-
-utils.InstallFunctions(GlobalString.prototype, DONT_ENUM, [
- "padStart", StringPadStart,
- "padEnd", StringPadEnd
-]);
-
-});
diff --git a/deps/v8/src/js/i18n.js b/deps/v8/src/js/intl.js
index aac38fe0d1..cb83cfc1f5 100644
--- a/deps/v8/src/js/i18n.js
+++ b/deps/v8/src/js/intl.js
@@ -33,7 +33,7 @@ var InstallFunctions = utils.InstallFunctions;
var InstallGetter = utils.InstallGetter;
var InternalArray = utils.InternalArray;
var MaxSimple;
-var ObjectHasOwnProperty = utils.ImportNow("ObjectHasOwnProperty");
+var ObjectHasOwnProperty = global.Object.prototype.hasOwnProperty;
var OverrideFunction = utils.OverrideFunction;
var patternSymbol = utils.ImportNow("intl_pattern_symbol");
var resolvedSymbol = utils.ImportNow("intl_resolved_symbol");
@@ -146,11 +146,18 @@ var AVAILABLE_LOCALES = {
*/
var DEFAULT_ICU_LOCALE = UNDEFINED;
-function GetDefaultICULocaleJS() {
+function GetDefaultICULocaleJS(service) {
if (IS_UNDEFINED(DEFAULT_ICU_LOCALE)) {
DEFAULT_ICU_LOCALE = %GetDefaultICULocale();
}
- return DEFAULT_ICU_LOCALE;
+ // Check that this is a valid default for this service,
+ // otherwise fall back to "und"
+ // TODO(littledan,jshin): AvailableLocalesOf sometimes excludes locales
+ // which don't require tailoring, but work fine with root data. Look into
+ // exposing this fact in ICU or the way Chrome bundles data.
+ return (IS_UNDEFINED(service) ||
+ HAS_OWN_PROPERTY(getAvailableLocalesOf(service), DEFAULT_ICU_LOCALE))
+ ? DEFAULT_ICU_LOCALE : "und";
}
/**
@@ -294,19 +301,16 @@ function supportedLocalesOf(service, locales, options) {
var requestedLocales = initializeLocaleList(locales);
- // Cache these, they don't ever change per service.
- if (IS_UNDEFINED(AVAILABLE_LOCALES[service])) {
- AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
- }
+ var availableLocales = getAvailableLocalesOf(service);
// Use either best fit or lookup algorithm to match locales.
if (matcher === 'best fit') {
return initializeLocaleList(bestFitSupportedLocalesOf(
- requestedLocales, AVAILABLE_LOCALES[service]));
+ requestedLocales, availableLocales));
}
return initializeLocaleList(lookupSupportedLocalesOf(
- requestedLocales, AVAILABLE_LOCALES[service]));
+ requestedLocales, availableLocales));
}
@@ -433,22 +437,19 @@ function lookupMatcher(service, requestedLocales) {
throw %make_error(kWrongServiceType, service);
}
- // Cache these, they don't ever change per service.
- if (IS_UNDEFINED(AVAILABLE_LOCALES[service])) {
- AVAILABLE_LOCALES[service] = getAvailableLocalesOf(service);
- }
+ var availableLocales = getAvailableLocalesOf(service);
for (var i = 0; i < requestedLocales.length; ++i) {
// Remove all extensions.
var locale = %RegExpInternalReplace(
GetAnyExtensionRE(), requestedLocales[i], '');
do {
- if (!IS_UNDEFINED(AVAILABLE_LOCALES[service][locale])) {
+ if (!IS_UNDEFINED(availableLocales[locale])) {
// Return the resolved locale and extension.
var extensionMatch = %regexp_internal_match(
GetUnicodeExtensionRE(), requestedLocales[i]);
var extension = IS_NULL(extensionMatch) ? '' : extensionMatch[0];
- return {'locale': locale, 'extension': extension, 'position': i};
+ return {locale: locale, extension: extension, position: i};
}
// Truncate locale if possible.
var pos = %StringLastIndexOf(locale, '-');
@@ -460,7 +461,11 @@ function lookupMatcher(service, requestedLocales) {
}
// Didn't find a match, return default.
- return {'locale': GetDefaultICULocaleJS(), 'extension': '', 'position': -1};
+ return {
+ locale: GetDefaultICULocaleJS(service),
+ extension: '',
+ position: -1
+ };
}
@@ -621,39 +626,17 @@ function makeArray(input) {
}
/**
- * It's sometimes desireable to leave user requested locale instead of ICU
- * supported one (zh-TW is equivalent to zh-Hant-TW, so we should keep shorter
- * one, if that was what user requested).
- * This function returns user specified tag if its maximized form matches ICU
- * resolved locale. If not we return ICU result.
- */
-function getOptimalLanguageTag(original, resolved) {
- // Returns Array<Object>, where each object has maximized and base properties.
- // Maximized: zh -> zh-Hans-CN
- // Base: zh-CN-u-ca-gregory -> zh-CN
- // Take care of grandfathered or simple cases.
- if (original === resolved) {
- return original;
- }
-
- var locales = %GetLanguageTagVariants([original, resolved]);
- if (locales[0].maximized !== locales[1].maximized) {
- return resolved;
- }
-
- // Preserve extensions of resolved locale, but swap base tags with original.
- var resolvedBase = new GlobalRegExp('^' + locales[1].base, 'g');
- return %RegExpInternalReplace(resolvedBase, resolved, locales[0].base);
-}
-
-
-/**
* Returns an Object that contains all of supported locales for a given
* service.
* In addition to the supported locales we add xx-ZZ locale for each xx-Yyyy-ZZ
* that is supported. This is required by the spec.
*/
function getAvailableLocalesOf(service) {
+ // Cache these, they don't ever change per service.
+ if (!IS_UNDEFINED(AVAILABLE_LOCALES[service])) {
+ return AVAILABLE_LOCALES[service];
+ }
+
var available = %AvailableLocalesOf(service);
for (var i in available) {
@@ -668,6 +651,8 @@ function getAvailableLocalesOf(service) {
}
}
+ AVAILABLE_LOCALES[service] = available;
+
return available;
}
@@ -719,8 +704,8 @@ function addWECPropertyIfDefined(object, property, value) {
* Returns titlecased word, aMeRricA -> America.
*/
function toTitleCaseWord(word) {
- return %StringToUpperCaseI18N(%_Call(StringSubstr, word, 0, 1)) +
- %StringToLowerCaseI18N(%_Call(StringSubstr, word, 1));
+ return %StringToUpperCaseIntl(%_Call(StringSubstr, word, 0, 1)) +
+ %StringToLowerCaseIntl(%_Call(StringSubstr, word, 1));
}
/**
@@ -741,7 +726,7 @@ function toTitleCaseTimezoneLocation(location) {
var parts = %StringSplit(match[2], separator, kMaxUint32);
for (var i = 1; i < parts.length; i++) {
var part = parts[i]
- var lowercasedPart = %StringToLowerCaseI18N(part);
+ var lowercasedPart = %StringToLowerCaseIntl(part);
result = result + separator +
((lowercasedPart !== 'es' &&
lowercasedPart !== 'of' && lowercasedPart !== 'au') ?
@@ -847,7 +832,7 @@ function isStructuallyValidLanguageTag(locale) {
return false;
}
- locale = %StringToLowerCaseI18N(locale);
+ locale = %StringToLowerCaseIntl(locale);
// Just return if it's a x- form. It's all private.
if (%StringIndexOf(locale, 'x-', 0) === 0) {
@@ -964,7 +949,7 @@ function CreateCollator(locales, options) {
// TODO(jshin): ICU now can take kb, kc, etc. Switch over to using ICU
// directly. See Collator::InitializeCollator and
- // Collator::CreateICUCollator in src/i18n.cc
+ // Collator::CreateICUCollator in src/objects/intl-objects.cc
// ICU can't take kb, kc... parameters through localeID, so we need to pass
// them as options.
// One exception is -co- which has to be part of the extension, but only for
@@ -1053,11 +1038,8 @@ function CollatorConstructor() {
InstallFunction(GlobalIntlCollator.prototype, 'resolvedOptions', function() {
var coll = Unwrap(this, 'collator', GlobalIntlCollator, 'resolvedOptions',
false);
- var locale = getOptimalLanguageTag(coll[resolvedSymbol].requestedLocale,
- coll[resolvedSymbol].locale);
-
return {
- locale: locale,
+ locale: coll[resolvedSymbol].locale,
usage: coll[resolvedSymbol].usage,
sensitivity: coll[resolvedSymbol].sensitivity,
ignorePunctuation: coll[resolvedSymbol].ignorePunctuation,
@@ -1191,7 +1173,7 @@ function CreateNumberFormat(locales, options) {
var currencyDisplay = getOption(
'currencyDisplay', 'string', ['code', 'symbol', 'name'], 'symbol');
if (internalOptions.style === 'currency') {
- defineWEProperty(internalOptions, 'currency', %StringToUpperCaseI18N(currency));
+ defineWEProperty(internalOptions, 'currency', %StringToUpperCaseIntl(currency));
defineWEProperty(internalOptions, 'currencyDisplay', currencyDisplay);
mnfdDefault = mxfdDefault = %CurrencyDigits(internalOptions.currency);
@@ -1276,11 +1258,8 @@ InstallFunction(GlobalIntlNumberFormat.prototype, 'resolvedOptions',
function() {
var format = Unwrap(this, 'numberformat', GlobalIntlNumberFormat,
'resolvedOptions', true);
- var locale = getOptimalLanguageTag(format[resolvedSymbol].requestedLocale,
- format[resolvedSymbol].locale);
-
var result = {
- locale: locale,
+ locale: format[resolvedSymbol].locale,
numberingSystem: format[resolvedSymbol].numberingSystem,
style: format[resolvedSymbol].style,
useGrouping: format[resolvedSymbol].useGrouping,
@@ -1658,11 +1637,8 @@ InstallFunction(GlobalIntlDateTimeFormat.prototype, 'resolvedOptions',
userCalendar = format[resolvedSymbol].calendar;
}
- var locale = getOptimalLanguageTag(format[resolvedSymbol].requestedLocale,
- format[resolvedSymbol].locale);
-
var result = {
- locale: locale,
+ locale: format[resolvedSymbol].locale,
numberingSystem: format[resolvedSymbol].numberingSystem,
calendar: userCalendar,
timeZone: format[resolvedSymbol].timeZone
@@ -1715,27 +1691,29 @@ function formatDate(formatter, dateValue) {
return %InternalDateFormat(formatter, new GlobalDate(dateMs));
}
-function FormatDateToParts(dateValue) {
- CHECK_OBJECT_COERCIBLE(this, "Intl.DateTimeFormat.prototype.formatToParts");
- if (!IS_OBJECT(this)) {
- throw %make_type_error(kCalledOnNonObject, this);
- }
- if (!%IsInitializedIntlObjectOfType(this, 'dateformat')) {
- throw %make_type_error(kIncompatibleMethodReceiver,
- 'Intl.DateTimeFormat.prototype.formatToParts',
- this);
- }
- var dateMs;
- if (IS_UNDEFINED(dateValue)) {
- dateMs = %DateCurrentTime();
- } else {
- dateMs = TO_NUMBER(dateValue);
- }
+InstallFunction(GlobalIntlDateTimeFormat.prototype, 'formatToParts',
+ function(dateValue) {
+ CHECK_OBJECT_COERCIBLE(this, "Intl.DateTimeFormat.prototype.formatToParts");
+ if (!IS_OBJECT(this)) {
+ throw %make_type_error(kCalledOnNonObject, this);
+ }
+ if (!%IsInitializedIntlObjectOfType(this, 'dateformat')) {
+ throw %make_type_error(kIncompatibleMethodReceiver,
+ 'Intl.DateTimeFormat.prototype.formatToParts',
+ this);
+ }
+ var dateMs;
+ if (IS_UNDEFINED(dateValue)) {
+ dateMs = %DateCurrentTime();
+ } else {
+ dateMs = TO_NUMBER(dateValue);
+ }
- if (!NUMBER_IS_FINITE(dateMs)) throw %make_range_error(kDateRange);
+ if (!NUMBER_IS_FINITE(dateMs)) throw %make_range_error(kDateRange);
- return %InternalDateFormatToParts(this, new GlobalDate(dateMs));
-}
+ return %InternalDateFormatToParts(this, new GlobalDate(dateMs));
+ }
+);
// Length is 1 as specified in ECMA 402 v2+
@@ -1757,7 +1735,7 @@ function canonicalizeTimeZoneID(tzID) {
tzID = TO_STRING(tzID);
// Special case handling (UTC, GMT).
- var upperID = %StringToUpperCaseI18N(tzID);
+ var upperID = %StringToUpperCaseIntl(tzID);
if (upperID === 'UTC' || upperID === 'GMT' ||
upperID === 'ETC/UTC' || upperID === 'ETC/GMT') {
return 'UTC';
@@ -1841,12 +1819,8 @@ InstallFunction(GlobalIntlv8BreakIterator.prototype, 'resolvedOptions',
var segmenter = Unwrap(this, 'breakiterator', GlobalIntlv8BreakIterator,
'resolvedOptions', false);
- var locale =
- getOptimalLanguageTag(segmenter[resolvedSymbol].requestedLocale,
- segmenter[resolvedSymbol].locale);
-
return {
- locale: locale,
+ locale: segmenter[resolvedSymbol].locale,
type: segmenter[resolvedSymbol].type
};
}
@@ -2014,31 +1988,19 @@ OverrideFunction(GlobalString.prototype, 'localeCompare', function(that) {
}
);
-
-// TODO(littledan): Rewrite these two functions as C++ builtins
-function ToLowerCaseI18N() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLowerCase");
- return %StringToLowerCaseI18N(TO_STRING(this));
-}
-
-function ToUpperCaseI18N() {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.toUpperCase");
- return %StringToUpperCaseI18N(TO_STRING(this));
-}
-
-function ToLocaleLowerCaseI18N(locales) {
+function ToLocaleLowerCaseIntl(locales) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleLowerCase");
return LocaleConvertCase(TO_STRING(this), locales, false);
}
-%FunctionSetLength(ToLocaleLowerCaseI18N, 0);
+%FunctionSetLength(ToLocaleLowerCaseIntl, 0);
-function ToLocaleUpperCaseI18N(locales) {
+function ToLocaleUpperCaseIntl(locales) {
CHECK_OBJECT_COERCIBLE(this, "String.prototype.toLocaleUpperCase");
return LocaleConvertCase(TO_STRING(this), locales, true);
}
-%FunctionSetLength(ToLocaleUpperCaseI18N, 0);
+%FunctionSetLength(ToLocaleUpperCaseIntl, 0);
/**
@@ -2119,24 +2081,15 @@ OverrideFunction(GlobalDate.prototype, 'toLocaleTimeString', function() {
}
);
-%FunctionRemovePrototype(FormatDateToParts);
-%FunctionRemovePrototype(ToLowerCaseI18N);
-%FunctionRemovePrototype(ToUpperCaseI18N);
-%FunctionRemovePrototype(ToLocaleLowerCaseI18N);
-%FunctionRemovePrototype(ToLocaleUpperCaseI18N);
+%FunctionRemovePrototype(ToLocaleLowerCaseIntl);
+%FunctionRemovePrototype(ToLocaleUpperCaseIntl);
-utils.SetFunctionName(FormatDateToParts, "formatToParts");
-utils.SetFunctionName(ToLowerCaseI18N, "toLowerCase");
-utils.SetFunctionName(ToUpperCaseI18N, "toUpperCase");
-utils.SetFunctionName(ToLocaleLowerCaseI18N, "toLocaleLowerCase");
-utils.SetFunctionName(ToLocaleUpperCaseI18N, "toLocaleUpperCase");
+utils.SetFunctionName(ToLocaleLowerCaseIntl, "toLocaleLowerCase");
+utils.SetFunctionName(ToLocaleUpperCaseIntl, "toLocaleUpperCase");
utils.Export(function(to) {
- to.FormatDateToParts = FormatDateToParts;
- to.ToLowerCaseI18N = ToLowerCaseI18N;
- to.ToUpperCaseI18N = ToUpperCaseI18N;
- to.ToLocaleLowerCaseI18N = ToLocaleLowerCaseI18N;
- to.ToLocaleUpperCaseI18N = ToLocaleUpperCaseI18N;
+ to.ToLocaleLowerCaseIntl = ToLocaleLowerCaseIntl;
+ to.ToLocaleUpperCaseIntl = ToLocaleUpperCaseIntl;
});
})
diff --git a/deps/v8/src/js/macros.py b/deps/v8/src/js/macros.py
index 3508cc8270..08f25b1f26 100644
--- a/deps/v8/src/js/macros.py
+++ b/deps/v8/src/js/macros.py
@@ -47,29 +47,28 @@ define kMaxUint32 = 4294967295;
macro IS_ARRAY(arg) = (%_IsArray(arg));
macro IS_ARRAYBUFFER(arg) = (%_ClassOf(arg) === 'ArrayBuffer');
macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
-macro IS_DATAVIEW(arg) = (%_ClassOf(arg) === 'DataView');
macro IS_DATE(arg) = (%IsDate(arg));
macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
macro IS_FUNCTION(arg) = (%IsFunction(arg));
macro IS_GENERATOR(arg) = (%_ClassOf(arg) === 'Generator');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
-macro IS_MAP(arg) = (%_ClassOf(arg) === 'Map');
-macro IS_MAP_ITERATOR(arg) = (%_ClassOf(arg) === 'Map Iterator');
+macro IS_MAP(arg) = (%_IsJSMap(arg));
+macro IS_MAP_ITERATOR(arg) = (%_IsJSMapIterator(arg));
macro IS_NULL(arg) = (arg === null);
macro IS_NULL_OR_UNDEFINED(arg) = (arg == null);
macro IS_NUMBER(arg) = (typeof(arg) === 'number');
macro IS_OBJECT(arg) = (typeof(arg) === 'object');
macro IS_PROXY(arg) = (%_IsJSProxy(arg));
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
-macro IS_SET(arg) = (%_ClassOf(arg) === 'Set');
-macro IS_SET_ITERATOR(arg) = (%_ClassOf(arg) === 'Set Iterator');
+macro IS_SET(arg) = (%_IsJSSet(arg));
+macro IS_SET_ITERATOR(arg) = (%_IsJSSetIterator(arg));
macro IS_SHAREDARRAYBUFFER(arg) = (%_ClassOf(arg) === 'SharedArrayBuffer');
macro IS_STRING(arg) = (typeof(arg) === 'string');
macro IS_SYMBOL(arg) = (typeof(arg) === 'symbol');
macro IS_TYPEDARRAY(arg) = (%_IsTypedArray(arg));
macro IS_UNDEFINED(arg) = (arg === (void 0));
-macro IS_WEAKMAP(arg) = (%_ClassOf(arg) === 'WeakMap');
-macro IS_WEAKSET(arg) = (%_ClassOf(arg) === 'WeakSet');
+macro IS_WEAKMAP(arg) = (%_IsJSWeakMap(arg));
+macro IS_WEAKSET(arg) = (%_IsJSWeakSet(arg));
# Macro for ES queries of the type: "Type(O) is Object."
macro IS_RECEIVER(arg) = (%_IsJSReceiver(arg));
diff --git a/deps/v8/src/js/max-min.js b/deps/v8/src/js/max-min.js
new file mode 100644
index 0000000000..4b7076ed22
--- /dev/null
+++ b/deps/v8/src/js/max-min.js
@@ -0,0 +1,28 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global, utils) {
+
+%CheckIsBootstrapping();
+
+function MaxSimple(a, b) {
+ return a > b ? a : b;
+}
+
+function MinSimple(a, b) {
+ return a > b ? b : a;
+}
+
+%SetForceInlineFlag(MaxSimple);
+%SetForceInlineFlag(MinSimple);
+
+// ----------------------------------------------------------------------------
+// Exports
+
+utils.Export(function(to) {
+ to.MaxSimple = MaxSimple;
+ to.MinSimple = MinSimple;
+});
+
+})
diff --git a/deps/v8/src/js/runtime.js b/deps/v8/src/js/runtime.js
deleted file mode 100644
index 550b3e4afb..0000000000
--- a/deps/v8/src/js/runtime.js
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This files contains runtime support implemented in JavaScript.
-
-// CAUTION: Some of the functions specified in this file are called
-// directly from compiled code. These are the functions with names in
-// ALL CAPS. The compiled code passes the first argument in 'this'.
-
-
-// The following declarations are shared with other native JS files.
-// They are all declared at this one spot to avoid redeclaration errors.
-
-(function(global, utils) {
-
-%CheckIsBootstrapping();
-
-var GlobalArray = global.Array;
-var GlobalBoolean = global.Boolean;
-var GlobalString = global.String;
-var speciesSymbol;
-
-utils.Import(function(from) {
- speciesSymbol = from.species_symbol;
-});
-
-// ----------------------------------------------------------------------------
-
-
-/* ---------------------------------
- - - - U t i l i t i e s - - -
- ---------------------------------
-*/
-
-
-function ToPositiveInteger(x, rangeErrorIndex) {
- var i = TO_INTEGER(x) + 0;
- if (i < 0) throw %make_range_error(rangeErrorIndex);
- return i;
-}
-
-
-function ToIndex(x, rangeErrorIndex) {
- var i = TO_INTEGER(x) + 0;
- if (i < 0 || i > kMaxSafeInteger) throw %make_range_error(rangeErrorIndex);
- return i;
-}
-
-
-function MaxSimple(a, b) {
- return a > b ? a : b;
-}
-
-
-function MinSimple(a, b) {
- return a > b ? b : a;
-}
-
-
-%SetForceInlineFlag(MaxSimple);
-%SetForceInlineFlag(MinSimple);
-
-
-// ES2015 7.3.20
-function SpeciesConstructor(object, defaultConstructor) {
- var constructor = object.constructor;
- if (IS_UNDEFINED(constructor)) {
- return defaultConstructor;
- }
- if (!IS_RECEIVER(constructor)) {
- throw %make_type_error(kConstructorNotReceiver);
- }
- var species = constructor[speciesSymbol];
- if (IS_NULL_OR_UNDEFINED(species)) {
- return defaultConstructor;
- }
- if (%IsConstructor(species)) {
- return species;
- }
- throw %make_type_error(kSpeciesNotConstructor);
-}
-
-//----------------------------------------------------------------------------
-
-// NOTE: Setting the prototype for Array must take place as early as
-// possible due to code generation for array literals. When
-// generating code for a array literal a boilerplate array is created
-// that is cloned when running the code. It is essential that the
-// boilerplate gets the right prototype.
-%FunctionSetPrototype(GlobalArray, new GlobalArray(0));
-
-// ----------------------------------------------------------------------------
-// Exports
-
-utils.Export(function(to) {
- to.MaxSimple = MaxSimple;
- to.MinSimple = MinSimple;
- to.ToPositiveInteger = ToPositiveInteger;
- to.ToIndex = ToIndex;
- to.SpeciesConstructor = SpeciesConstructor;
-});
-
-})
diff --git a/deps/v8/src/js/string.js b/deps/v8/src/js/string.js
index 1aa1700a1d..a3a59d5fde 100644
--- a/deps/v8/src/js/string.js
+++ b/deps/v8/src/js/string.js
@@ -52,48 +52,6 @@ function StringSearch(pattern) {
}
-// ECMA-262 section 15.5.4.13
-function StringSlice(start, end) {
- CHECK_OBJECT_COERCIBLE(this, "String.prototype.slice");
-
- var s = TO_STRING(this);
- var s_len = s.length;
- var start_i = TO_INTEGER(start);
- var end_i = s_len;
- if (!IS_UNDEFINED(end)) {
- end_i = TO_INTEGER(end);
- }
-
- if (start_i < 0) {
- start_i += s_len;
- if (start_i < 0) {
- start_i = 0;
- }
- } else {
- if (start_i > s_len) {
- return '';
- }
- }
-
- if (end_i < 0) {
- end_i += s_len;
- if (end_i < 0) {
- return '';
- }
- } else {
- if (end_i > s_len) {
- end_i = s_len;
- }
- }
-
- if (end_i <= start_i) {
- return '';
- }
-
- return %_SubString(s, start_i, end_i);
-}
-
-
// ES6 draft, revision 26 (2014-07-18), section B.2.3.2.1
function HtmlEscape(str) {
return %RegExpInternalReplace(/"/g, TO_STRING(str), "&quot;");
@@ -240,6 +198,60 @@ function StringCodePointAt(pos) {
return (first - 0xD800) * 0x400 + second + 0x2400;
}
+function StringPad(thisString, maxLength, fillString) {
+ maxLength = TO_LENGTH(maxLength);
+ var stringLength = thisString.length;
+
+ if (maxLength <= stringLength) return "";
+
+ if (IS_UNDEFINED(fillString)) {
+ fillString = " ";
+ } else {
+ fillString = TO_STRING(fillString);
+ if (fillString === "") {
+ // If filler is the empty String, return S.
+ return "";
+ }
+ }
+
+ var fillLength = maxLength - stringLength;
+ var repetitions = (fillLength / fillString.length) | 0;
+ var remainingChars = (fillLength - fillString.length * repetitions) | 0;
+
+ var filler = "";
+ while (true) {
+ if (repetitions & 1) filler += fillString;
+ repetitions >>= 1;
+ if (repetitions === 0) break;
+ fillString += fillString;
+ }
+
+ if (remainingChars) {
+ filler += %_SubString(fillString, 0, remainingChars);
+ }
+
+ return filler;
+}
+
+// ES#sec-string.prototype.padstart
+// String.prototype.padStart(maxLength [, fillString])
+function StringPadStart(maxLength, fillString) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.padStart");
+ var thisString = TO_STRING(this);
+
+ return StringPad(thisString, maxLength, fillString) + thisString;
+}
+%FunctionSetLength(StringPadStart, 1);
+
+// ES#sec-string.prototype.padend
+// String.prototype.padEnd(maxLength [, fillString])
+function StringPadEnd(maxLength, fillString) {
+ CHECK_OBJECT_COERCIBLE(this, "String.prototype.padEnd");
+ var thisString = TO_STRING(this);
+
+ return thisString + StringPad(thisString, maxLength, fillString);
+}
+%FunctionSetLength(StringPadEnd, 1);
// -------------------------------------------------------------------
// String methods related to templates
@@ -276,10 +288,10 @@ utils.InstallFunctions(GlobalString, DONT_ENUM, [
utils.InstallFunctions(GlobalString.prototype, DONT_ENUM, [
"codePointAt", StringCodePointAt,
"match", StringMatchJS,
+ "padEnd", StringPadEnd,
+ "padStart", StringPadStart,
"repeat", StringRepeat,
"search", StringSearch,
- "slice", StringSlice,
-
"link", StringLink,
"anchor", StringAnchor,
"fontcolor", StringFontcolor,
diff --git a/deps/v8/src/js/typedarray.js b/deps/v8/src/js/typedarray.js
index 26f724b3f4..1c65c32dbd 100644
--- a/deps/v8/src/js/typedarray.js
+++ b/deps/v8/src/js/typedarray.js
@@ -13,14 +13,12 @@
// array.js has to come before typedarray.js for this to work
var ArrayToString = utils.ImportNow("ArrayToString");
-var ArrayValues;
var GetIterator;
var GetMethod;
var GlobalArray = global.Array;
var GlobalArrayBuffer = global.ArrayBuffer;
var GlobalArrayBufferPrototype = GlobalArrayBuffer.prototype;
var GlobalObject = global.Object;
-var InnerArrayFilter;
var InnerArrayFind;
var InnerArrayFindIndex;
var InnerArrayJoin;
@@ -29,10 +27,8 @@ var InnerArrayToLocaleString;
var InternalArray = utils.InternalArray;
var MaxSimple;
var MinSimple;
-var SpeciesConstructor;
-var ToPositiveInteger;
-var ToIndex;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
+var speciesSymbol = utils.ImportNow("species_symbol");
var toStringTagSymbol = utils.ImportNow("to_string_tag_symbol");
macro TYPED_ARRAYS(FUNCTION)
@@ -56,10 +52,8 @@ TYPED_ARRAYS(DECLARE_GLOBALS)
var GlobalTypedArray = %object_get_prototype_of(GlobalUint8Array);
utils.Import(function(from) {
- ArrayValues = from.ArrayValues;
GetIterator = from.GetIterator;
GetMethod = from.GetMethod;
- InnerArrayFilter = from.InnerArrayFilter;
InnerArrayFind = from.InnerArrayFind;
InnerArrayFindIndex = from.InnerArrayFindIndex;
InnerArrayJoin = from.InnerArrayJoin;
@@ -67,13 +61,37 @@ utils.Import(function(from) {
InnerArrayToLocaleString = from.InnerArrayToLocaleString;
MaxSimple = from.MaxSimple;
MinSimple = from.MinSimple;
- SpeciesConstructor = from.SpeciesConstructor;
- ToPositiveInteger = from.ToPositiveInteger;
- ToIndex = from.ToIndex;
});
+// ES2015 7.3.20
+function SpeciesConstructor(object, defaultConstructor) {
+ var constructor = object.constructor;
+ if (IS_UNDEFINED(constructor)) {
+ return defaultConstructor;
+ }
+ if (!IS_RECEIVER(constructor)) {
+ throw %make_type_error(kConstructorNotReceiver);
+ }
+ var species = constructor[speciesSymbol];
+ if (IS_NULL_OR_UNDEFINED(species)) {
+ return defaultConstructor;
+ }
+ if (%IsConstructor(species)) {
+ return species;
+ }
+ throw %make_type_error(kSpeciesNotConstructor);
+}
+
// --------------- Typed Arrays ---------------------
+// ES6 section 22.2.3.5.1 ValidateTypedArray ( O )
+function ValidateTypedArray(array, methodName) {
+ if (!IS_TYPEDARRAY(array)) throw %make_type_error(kNotTypedArray);
+
+ if (%_ArrayBufferViewWasNeutered(array))
+ throw %make_type_error(kDetachedOperation, methodName);
+}
+
function TypedArrayDefaultConstructor(typedArray) {
switch (%_ClassOf(typedArray)) {
macro TYPED_ARRAY_CONSTRUCTOR_CASE(NAME, ELEMENT_SIZE)
@@ -94,20 +112,16 @@ function TypedArrayCreate(constructor, arg0, arg1, arg2) {
} else {
var newTypedArray = new constructor(arg0, arg1, arg2);
}
- if (!IS_TYPEDARRAY(newTypedArray)) throw %make_type_error(kNotTypedArray);
- // TODO(littledan): Check for being detached, here and elsewhere
- // All callers where the first argument is a Number have no additional
- // arguments.
+ ValidateTypedArray(newTypedArray, "TypedArrayCreate");
if (IS_NUMBER(arg0) && %_TypedArrayGetLength(newTypedArray) < arg0) {
throw %make_type_error(kTypedArrayTooShort);
}
return newTypedArray;
}
-function TypedArraySpeciesCreate(exemplar, arg0, arg1, arg2, conservative) {
+function TypedArraySpeciesCreate(exemplar, arg0, arg1, arg2) {
var defaultConstructor = TypedArrayDefaultConstructor(exemplar);
- var constructor = SpeciesConstructor(exemplar, defaultConstructor,
- conservative);
+ var constructor = SpeciesConstructor(exemplar, defaultConstructor);
return TypedArrayCreate(constructor, arg0, arg1, arg2);
}
@@ -212,10 +226,8 @@ function NAMESubArray(begin, end) {
var newLength = endInt - beginInt;
var beginByteOffset =
%_ArrayBufferViewGetByteOffset(this) + beginInt * ELEMENT_SIZE;
- // BUG(v8:4665): For web compatibility, subarray needs to always build an
- // instance of the default constructor.
- // TODO(littledan): Switch to the standard or standardize the fix
- return new GlobalNAME(%TypedArrayGetBuffer(this), beginByteOffset, newLength);
+ return TypedArraySpeciesCreate(this, %TypedArrayGetBuffer(this),
+ beginByteOffset, newLength);
}
endmacro
@@ -230,7 +242,7 @@ endmacro
TYPED_ARRAYS(TYPED_ARRAY_SUBARRAY_CASE)
}
throw %make_type_error(kIncompatibleMethodReceiver,
- "get TypedArray.prototype.subarray", this);
+ "get %TypedArray%.prototype.subarray", this);
}
%SetForceInlineFlag(TypedArraySubArray);
@@ -249,9 +261,6 @@ function TypedArraySetFromArrayLike(target, source, sourceLength, offset) {
}
}
-%InstallToContext([
- 'typed_array_set_from_array_like', TypedArraySetFromArrayLike]);
-
function TypedArraySetFromOverlappingTypedArray(target, source, offset) {
var sourceElementSize = source.BYTES_PER_ELEMENT;
var targetElementSize = target.BYTES_PER_ELEMENT;
@@ -312,6 +321,7 @@ function TypedArraySet(obj, offset) {
if (intOffset > %_MaxSmi()) {
throw %make_range_error(kTypedArraySetSourceTooLarge);
}
+
switch (%TypedArraySetFastCases(this, obj, intOffset)) {
// These numbers should be synchronized with runtime.cc.
case 0: // TYPED_ARRAY_SET_TYPED_ARRAY_SAME_TYPE
@@ -320,8 +330,12 @@ function TypedArraySet(obj, offset) {
TypedArraySetFromOverlappingTypedArray(this, obj, intOffset);
return;
case 2: // TYPED_ARRAY_SET_TYPED_ARRAY_NONOVERLAPPING
- TypedArraySetFromArrayLike(this,
- obj, %_TypedArrayGetLength(obj), intOffset);
+ if (intOffset === 0) {
+ %TypedArrayCopyElements(this, obj, %_TypedArrayGetLength(obj));
+ } else {
+ TypedArraySetFromArrayLike(
+ this, obj, %_TypedArrayGetLength(obj), intOffset);
+ }
return;
case 3: // TYPED_ARRAY_SET_NON_TYPED_ARRAY
var l = obj.length;
@@ -329,7 +343,7 @@ function TypedArraySet(obj, offset) {
if (IS_NUMBER(obj)) {
// For number as a first argument, throw TypeError
// instead of silently ignoring the call, so that
- // the user knows (s)he did something wrong.
+ // users know they did something wrong.
// (Consistent with Firefox and Blink/WebKit)
throw %make_type_error(kInvalidArgument);
}
@@ -352,28 +366,6 @@ function TypedArrayGetToStringTag() {
return name;
}
-function InnerTypedArrayEvery(f, receiver, array, length) {
- if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
-
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- if (!%_Call(f, receiver, element, i, array)) return false;
- }
- }
- return true;
-}
-
-// ES6 draft 05-05-15, section 22.2.3.7
-function TypedArrayEvery(f, receiver) {
- if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerTypedArrayEvery(f, receiver, this, length);
-}
-%FunctionSetLength(TypedArrayEvery, 1);
-
function InnerTypedArrayForEach(f, receiver, array, length) {
if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
@@ -396,7 +388,7 @@ function InnerTypedArrayForEach(f, receiver, array, length) {
// ES6 draft 08-24-14, section 22.2.3.12
function TypedArrayForEach(f, receiver) {
- if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
+ ValidateTypedArray(this, "%TypedArray%.prototype.forEach");
var length = %_TypedArrayGetLength(this);
@@ -404,15 +396,32 @@ function TypedArrayForEach(f, receiver) {
}
%FunctionSetLength(TypedArrayForEach, 1);
+// The following functions cannot be made efficient on sparse arrays while
+// preserving the semantics, since the calls to the receiver function can add
+// or delete elements from the array.
+function InnerTypedArrayFilter(f, receiver, array, length, result) {
+ var result_length = 0;
+ for (var i = 0; i < length; i++) {
+ if (i in array) {
+ var element = array[i];
+ if (%_Call(f, receiver, element, i, array)) {
+ %CreateDataProperty(result, result_length, element);
+ result_length++;
+ }
+ }
+ }
+ return result;
+}
+
// ES6 draft 07-15-13, section 22.2.3.9
function TypedArrayFilter(f, thisArg) {
- if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
+ ValidateTypedArray(this, "%TypeArray%.prototype.filter");
var length = %_TypedArrayGetLength(this);
if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
var result = new InternalArray();
- InnerArrayFilter(f, thisArg, this, length, result);
+ InnerTypedArrayFilter(f, thisArg, this, length, result);
var captured = result.length;
var output = TypedArraySpeciesCreate(this, captured);
for (var i = 0; i < captured; i++) {
@@ -425,7 +434,7 @@ function TypedArrayFilter(f, thisArg) {
// ES6 draft 07-15-13, section 22.2.3.10
function TypedArrayFind(predicate, thisArg) {
- if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
+ ValidateTypedArray(this, "%TypedArray%.prototype.find");
var length = %_TypedArrayGetLength(this);
@@ -436,7 +445,7 @@ function TypedArrayFind(predicate, thisArg) {
// ES6 draft 07-15-13, section 22.2.3.11
function TypedArrayFindIndex(predicate, thisArg) {
- if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
+ ValidateTypedArray(this, "%TypedArray%.prototype.findIndex");
var length = %_TypedArrayGetLength(this);
@@ -447,7 +456,7 @@ function TypedArrayFindIndex(predicate, thisArg) {
// ES6 draft 05-18-15, section 22.2.3.25
function TypedArraySort(comparefn) {
- if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
+ ValidateTypedArray(this, "%TypedArray%.prototype.sort");
var length = %_TypedArrayGetLength(this);
@@ -459,47 +468,9 @@ function TypedArraySort(comparefn) {
}
-// ES6 draft 07-15-13, section 22.2.3.18
-function TypedArrayMap(f, thisArg) {
- if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
- var result = TypedArraySpeciesCreate(this, length);
- if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
- for (var i = 0; i < length; i++) {
- var element = this[i];
- result[i] = %_Call(f, thisArg, element, i, this);
- }
- return result;
-}
-%FunctionSetLength(TypedArrayMap, 1);
-
-function InnerTypedArraySome(f, receiver, array, length) {
- if (!IS_CALLABLE(f)) throw %make_type_error(kCalledNonCallable, f);
-
- for (var i = 0; i < length; i++) {
- if (i in array) {
- var element = array[i];
- if (%_Call(f, receiver, element, i, array)) return true;
- }
- }
- return false;
-}
-
-// ES6 draft 05-05-15, section 22.2.3.24
-function TypedArraySome(f, receiver) {
- if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
-
- return InnerTypedArraySome(f, receiver, this, length);
-}
-%FunctionSetLength(TypedArraySome, 1);
-
-
// ES6 section 22.2.3.27
function TypedArrayToLocaleString() {
- if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
+ ValidateTypedArray(this, "%TypedArray%.prototype.toLocaleString");
var length = %_TypedArrayGetLength(this);
@@ -509,85 +480,13 @@ function TypedArrayToLocaleString() {
// ES6 section 22.2.3.14
function TypedArrayJoin(separator) {
- if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
+ ValidateTypedArray(this, "%TypedArray%.prototype.join");
var length = %_TypedArrayGetLength(this);
return InnerArrayJoin(separator, this, length);
}
-function InnerTypedArrayReduce(
- callback, current, array, length, argumentsLength) {
- if (!IS_CALLABLE(callback)) {
- throw %make_type_error(kCalledNonCallable, callback);
- }
-
- var i = 0;
- find_initial: if (argumentsLength < 2) {
- for (; i < length; i++) {
- if (i in array) {
- current = array[i++];
- break find_initial;
- }
- }
- throw %make_type_error(kReduceNoInitial);
- }
-
- for (; i < length; i++) {
- if (i in array) {
- var element = array[i];
- current = callback(current, element, i, array);
- }
- }
- return current;
-}
-
-// ES6 draft 07-15-13, section 22.2.3.19
-function TypedArrayReduce(callback, current) {
- if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
- return InnerTypedArrayReduce(
- callback, current, this, length, arguments.length);
-}
-%FunctionSetLength(TypedArrayReduce, 1);
-
-function InnerArrayReduceRight(callback, current, array, length,
- argumentsLength) {
- if (!IS_CALLABLE(callback)) {
- throw %make_type_error(kCalledNonCallable, callback);
- }
-
- var i = length - 1;
- find_initial: if (argumentsLength < 2) {
- for (; i >= 0; i--) {
- if (i in array) {
- current = array[i--];
- break find_initial;
- }
- }
- throw %make_type_error(kReduceNoInitial);
- }
-
- for (; i >= 0; i--) {
- if (i in array) {
- var element = array[i];
- current = callback(current, element, i, array);
- }
- }
- return current;
-}
-
-// ES6 draft 07-15-13, section 22.2.3.19
-function TypedArrayReduceRight(callback, current) {
- if (!IS_TYPEDARRAY(this)) throw %make_type_error(kNotTypedArray);
-
- var length = %_TypedArrayGetLength(this);
- return InnerArrayReduceRight(callback, current, this, length,
- arguments.length);
-}
-%FunctionSetLength(TypedArrayReduceRight, 1);
-
// ES6 draft 08-24-14, section 22.2.2.2
function TypedArrayOf() {
@@ -664,16 +563,11 @@ utils.InstallGetter(GlobalTypedArray.prototype, toStringTagSymbol,
utils.InstallFunctions(GlobalTypedArray.prototype, DONT_ENUM, [
"subarray", TypedArraySubArray,
"set", TypedArraySet,
- "every", TypedArrayEvery,
"filter", TypedArrayFilter,
"find", TypedArrayFind,
"findIndex", TypedArrayFindIndex,
"join", TypedArrayJoin,
"forEach", TypedArrayForEach,
- "map", TypedArrayMap,
- "reduce", TypedArrayReduce,
- "reduceRight", TypedArrayReduceRight,
- "some", TypedArraySome,
"sort", TypedArraySort,
"toLocaleString", TypedArrayToLocaleString
]);
diff --git a/deps/v8/src/js/v8natives.js b/deps/v8/src/js/v8natives.js
index 60a78d0ca9..5ae75bae22 100644
--- a/deps/v8/src/js/v8natives.js
+++ b/deps/v8/src/js/v8natives.js
@@ -11,14 +11,6 @@
var GlobalObject = global.Object;
var iteratorSymbol = utils.ImportNow("iterator_symbol");
-var ObjectToString = utils.ImportNow("object_to_string");
-
-// ----------------------------------------------------------------------------
-
-
-// Set up global object.
-var attributes = DONT_ENUM | DONT_DELETE | READ_ONLY;
-
// ----------------------------------------------------------------------------
// Object
@@ -67,7 +59,7 @@ function ObjectConstructor(x) {
// Set up non-enumerable functions on the Object.prototype object.
utils.InstallFunctions(GlobalObject.prototype, DONT_ENUM, [
- "toString", ObjectToString,
+ // toString is added in bootstrapper.cc
"toLocaleString", ObjectToLocaleString,
// valueOf is added in bootstrapper.cc.
"isPrototypeOf", ObjectIsPrototypeOf,
@@ -103,7 +95,6 @@ function GetIterator(obj, method) {
utils.Export(function(to) {
to.GetIterator = GetIterator;
to.GetMethod = GetMethod;
- to.ObjectHasOwnProperty = GlobalObject.prototype.hasOwnProperty;
});
})
diff --git a/deps/v8/src/json-parser.cc b/deps/v8/src/json-parser.cc
index de404e4c62..93d305df7a 100644
--- a/deps/v8/src/json-parser.cc
+++ b/deps/v8/src/json-parser.cc
@@ -13,6 +13,7 @@
#include "src/objects-inl.h"
#include "src/parsing/token.h"
#include "src/property-descriptor.h"
+#include "src/string-hasher.h"
#include "src/transitions.h"
#include "src/unicode-cache.h"
@@ -487,6 +488,46 @@ void JsonParser<seq_one_byte>::CommitStateToJsonObject(
}
}
+class ElementKindLattice {
+ private:
+ enum {
+ SMI_ELEMENTS,
+ NUMBER_ELEMENTS,
+ OBJECT_ELEMENTS,
+ };
+
+ public:
+ ElementKindLattice() : value_(SMI_ELEMENTS) {}
+
+ void Update(Handle<Object> o) {
+ if (o->IsSmi()) {
+ return;
+ } else if (o->IsHeapNumber()) {
+ if (value_ < NUMBER_ELEMENTS) value_ = NUMBER_ELEMENTS;
+ } else {
+ DCHECK(!o->IsNumber());
+ value_ = OBJECT_ELEMENTS;
+ }
+ }
+
+ ElementsKind GetElementsKind() const {
+ switch (value_) {
+ case SMI_ELEMENTS:
+ return FAST_SMI_ELEMENTS;
+ case NUMBER_ELEMENTS:
+ return FAST_DOUBLE_ELEMENTS;
+ case OBJECT_ELEMENTS:
+ return FAST_ELEMENTS;
+ default:
+ UNREACHABLE();
+ return FAST_ELEMENTS;
+ }
+ }
+
+ private:
+ int value_;
+};
+
// Parse a JSON array. Position must be right at '['.
template <bool seq_one_byte>
Handle<Object> JsonParser<seq_one_byte>::ParseJsonArray() {
@@ -494,26 +535,49 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonArray() {
ZoneList<Handle<Object> > elements(4, zone());
DCHECK_EQ(c0_, '[');
+ ElementKindLattice lattice;
+
AdvanceSkipWhitespace();
if (c0_ != ']') {
do {
Handle<Object> element = ParseJsonValue();
if (element.is_null()) return ReportUnexpectedCharacter();
elements.Add(element, zone());
+ lattice.Update(element);
} while (MatchSkipWhiteSpace(','));
if (c0_ != ']') {
return ReportUnexpectedCharacter();
}
}
AdvanceSkipWhitespace();
+
// Allocate a fixed array with all the elements.
- Handle<FixedArray> fast_elements =
- factory()->NewFixedArray(elements.length(), pretenure_);
- for (int i = 0, n = elements.length(); i < n; i++) {
- fast_elements->set(i, *elements[i]);
+
+ Handle<Object> json_array;
+ const ElementsKind kind = lattice.GetElementsKind();
+
+ switch (kind) {
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS: {
+ Handle<FixedArray> elems =
+ factory()->NewFixedArray(elements.length(), pretenure_);
+ for (int i = 0; i < elements.length(); i++) elems->set(i, *elements[i]);
+ json_array = factory()->NewJSArrayWithElements(elems, kind, pretenure_);
+ break;
+ }
+ case FAST_DOUBLE_ELEMENTS: {
+ Handle<FixedDoubleArray> elems = Handle<FixedDoubleArray>::cast(
+ factory()->NewFixedDoubleArray(elements.length(), pretenure_));
+ for (int i = 0; i < elements.length(); i++) {
+ elems->set(i, elements[i]->Number());
+ }
+ json_array = factory()->NewJSArrayWithElements(elems, kind, pretenure_);
+ break;
+ }
+ default:
+ UNREACHABLE();
}
- Handle<Object> json_array = factory()->NewJSArrayWithElements(
- fast_elements, FAST_ELEMENTS, pretenure_);
+
return scope.CloseAndEscape(json_array);
}
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index 93dff69709..6629072683 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -41,9 +41,15 @@ v8::Platform* CreateDefaultPlatform(
return platform;
}
+bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate,
+ MessageLoopBehavior behavior) {
+ return reinterpret_cast<DefaultPlatform*>(platform)->PumpMessageLoop(
+ isolate, behavior);
+}
-bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate) {
- return reinterpret_cast<DefaultPlatform*>(platform)->PumpMessageLoop(isolate);
+void EnsureEventLoopInitialized(v8::Platform* platform, v8::Isolate* isolate) {
+ return reinterpret_cast<DefaultPlatform*>(platform)
+ ->EnsureEventLoopInitialized(isolate);
}
void RunIdleTasks(v8::Platform* platform, v8::Isolate* isolate,
@@ -158,7 +164,30 @@ IdleTask* DefaultPlatform::PopTaskInMainThreadIdleQueue(v8::Isolate* isolate) {
return task;
}
-bool DefaultPlatform::PumpMessageLoop(v8::Isolate* isolate) {
+void DefaultPlatform::EnsureEventLoopInitialized(v8::Isolate* isolate) {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ if (event_loop_control_.count(isolate) == 0) {
+ event_loop_control_.insert(std::make_pair(
+ isolate, std::unique_ptr<base::Semaphore>(new base::Semaphore(0))));
+ }
+}
+
+void DefaultPlatform::WaitForForegroundWork(v8::Isolate* isolate) {
+ base::Semaphore* semaphore = nullptr;
+ {
+ base::LockGuard<base::Mutex> guard(&lock_);
+ DCHECK_EQ(event_loop_control_.count(isolate), 1);
+ semaphore = event_loop_control_[isolate].get();
+ }
+ DCHECK_NOT_NULL(semaphore);
+ semaphore->Wait();
+}
+
+bool DefaultPlatform::PumpMessageLoop(v8::Isolate* isolate,
+ MessageLoopBehavior behavior) {
+ if (behavior == MessageLoopBehavior::kWaitForWork) {
+ WaitForForegroundWork(isolate);
+ }
Task* task = NULL;
{
base::LockGuard<base::Mutex> guard(&lock_);
@@ -166,14 +195,14 @@ bool DefaultPlatform::PumpMessageLoop(v8::Isolate* isolate) {
// Move delayed tasks that hit their deadline to the main queue.
task = PopTaskInMainThreadDelayedQueue(isolate);
while (task != NULL) {
- main_thread_queue_[isolate].push(task);
+ ScheduleOnForegroundThread(isolate, task);
task = PopTaskInMainThreadDelayedQueue(isolate);
}
task = PopTaskInMainThreadQueue(isolate);
if (task == NULL) {
- return false;
+ return behavior == MessageLoopBehavior::kWaitForWork;
}
}
task->Run();
@@ -206,10 +235,17 @@ void DefaultPlatform::CallOnBackgroundThread(Task* task,
queue_.Append(task);
}
+void DefaultPlatform::ScheduleOnForegroundThread(v8::Isolate* isolate,
+ Task* task) {
+ main_thread_queue_[isolate].push(task);
+ if (event_loop_control_.count(isolate) != 0) {
+ event_loop_control_[isolate]->Signal();
+ }
+}
void DefaultPlatform::CallOnForegroundThread(v8::Isolate* isolate, Task* task) {
base::LockGuard<base::Mutex> guard(&lock_);
- main_thread_queue_[isolate].push(task);
+ ScheduleOnForegroundThread(isolate, task);
}
diff --git a/deps/v8/src/libplatform/default-platform.h b/deps/v8/src/libplatform/default-platform.h
index c786d85aae..4026864749 100644
--- a/deps/v8/src/libplatform/default-platform.h
+++ b/deps/v8/src/libplatform/default-platform.h
@@ -41,7 +41,10 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
void EnsureInitialized();
- bool PumpMessageLoop(v8::Isolate* isolate);
+ bool PumpMessageLoop(
+ v8::Isolate* isolate,
+ MessageLoopBehavior behavior = MessageLoopBehavior::kDoNotWait);
+ void EnsureEventLoopInitialized(v8::Isolate* isolate);
void RunIdleTasks(v8::Isolate* isolate, double idle_time_in_seconds);
@@ -81,6 +84,9 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
Task* PopTaskInMainThreadDelayedQueue(v8::Isolate* isolate);
IdleTask* PopTaskInMainThreadIdleQueue(v8::Isolate* isolate);
+ void WaitForForegroundWork(v8::Isolate* isolate);
+ void ScheduleOnForegroundThread(v8::Isolate* isolate, Task* task);
+
base::Mutex lock_;
bool initialized_;
int thread_pool_size_;
@@ -89,6 +95,7 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
TaskQueue queue_;
std::map<v8::Isolate*, std::queue<Task*>> main_thread_queue_;
std::map<v8::Isolate*, std::queue<IdleTask*>> main_thread_idle_queue_;
+ std::map<v8::Isolate*, std::unique_ptr<base::Semaphore>> event_loop_control_;
typedef std::pair<double, Task*> DelayedEntry;
std::map<v8::Isolate*,
diff --git a/deps/v8/src/libplatform/task-queue.h b/deps/v8/src/libplatform/task-queue.h
index 330527a09e..441b5b2d7f 100644
--- a/deps/v8/src/libplatform/task-queue.h
+++ b/deps/v8/src/libplatform/task-queue.h
@@ -11,7 +11,7 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
-#include "testing/gtest/include/gtest/gtest_prod.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
namespace v8 {
diff --git a/deps/v8/src/list-inl.h b/deps/v8/src/list-inl.h
index 5ef6d6e340..c29a420169 100644
--- a/deps/v8/src/list-inl.h
+++ b/deps/v8/src/list-inl.h
@@ -245,48 +245,6 @@ void List<T, P>::StableSort() {
ToVector().StableSort();
}
-
-template <typename T, typename P>
-int SortedListBSearch(const List<T>& list, P cmp) {
- int low = 0;
- int high = list.length() - 1;
- while (low <= high) {
- int mid = low + (high - low) / 2;
- T mid_elem = list[mid];
-
- if (cmp(&mid_elem) > 0) {
- high = mid - 1;
- continue;
- }
- if (cmp(&mid_elem) < 0) {
- low = mid + 1;
- continue;
- }
- // Found the elememt.
- return mid;
- }
- return -1;
-}
-
-
-template<typename T>
-class ElementCmp {
- public:
- explicit ElementCmp(T e) : elem_(e) {}
- int operator()(const T* other) {
- return PointerValueCompare(other, &elem_);
- }
- private:
- T elem_;
-};
-
-
-template <typename T>
-int SortedListBSearch(const List<T>& list, T elem) {
- return SortedListBSearch<T, ElementCmp<T> > (list, ElementCmp<T>(elem));
-}
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h
index b59ece463e..24b96e31a5 100644
--- a/deps/v8/src/list.h
+++ b/deps/v8/src/list.h
@@ -217,17 +217,6 @@ typedef List<Handle<Map> > MapHandleList;
typedef List<Handle<FieldType> > TypeHandleList;
typedef List<Handle<Code> > CodeHandleList;
-// Perform binary search for an element in an already sorted
-// list. Returns the index of the element of -1 if it was not found.
-// |cmp| is a predicate that takes a pointer to an element of the List
-// and returns +1 if it is greater, -1 if it is less than the element
-// being searched.
-template <typename T, class P>
-int SortedListBSearch(const List<T>& list, P cmp);
-template <typename T>
-int SortedListBSearch(const List<T>& list, T elem);
-
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/lookup.h b/deps/v8/src/lookup.h
index 710010bd03..9f8f64e85d 100644
--- a/deps/v8/src/lookup.h
+++ b/deps/v8/src/lookup.h
@@ -10,6 +10,7 @@
#include "src/isolate.h"
#include "src/objects.h"
#include "src/objects/descriptor-array.h"
+#include "src/objects/map.h"
namespace v8 {
namespace internal {
@@ -260,10 +261,11 @@ class V8_EXPORT_PRIVATE LookupIterator final BASE_EMBEDDED {
void WriteDataValue(Handle<Object> value, bool initializing_store);
inline void UpdateProtector() {
if (IsElement()) return;
+ // This list must be kept in sync with
+ // CodeStubAssembler::HasAssociatedProtector!
if (*name_ == heap()->is_concat_spreadable_symbol() ||
*name_ == heap()->constructor_string() ||
*name_ == heap()->species_symbol() ||
- *name_ == heap()->has_instance_symbol() ||
*name_ == heap()->iterator_symbol()) {
InternalUpdateProtector();
}
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index a6e0f2a2b8..77a402d5ca 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -65,6 +65,9 @@ enum AllocationFlags {
namespace v8 {
namespace internal {
+// Simulators only support C calls with up to kMaxCParameters parameters.
+static constexpr int kMaxCParameters = 9;
+
class FrameScope {
public:
explicit FrameScope(MacroAssembler* masm, StackFrame::Type type)
@@ -145,22 +148,22 @@ class FrameAndConstantPoolScope {
// Class for scoping the the unavailability of constant pool access.
class ConstantPoolUnavailableScope {
public:
- explicit ConstantPoolUnavailableScope(MacroAssembler* masm)
- : masm_(masm),
+ explicit ConstantPoolUnavailableScope(Assembler* assembler)
+ : assembler_(assembler),
old_constant_pool_available_(FLAG_enable_embedded_constant_pool &&
- masm->is_constant_pool_available()) {
+ assembler->is_constant_pool_available()) {
if (FLAG_enable_embedded_constant_pool) {
- masm_->set_constant_pool_available(false);
+ assembler->set_constant_pool_available(false);
}
}
~ConstantPoolUnavailableScope() {
if (FLAG_enable_embedded_constant_pool) {
- masm_->set_constant_pool_available(old_constant_pool_available_);
+ assembler_->set_constant_pool_available(old_constant_pool_available_);
}
}
private:
- MacroAssembler* masm_;
+ Assembler* assembler_;
int old_constant_pool_available_;
DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolUnavailableScope);
@@ -196,11 +199,11 @@ class NoCurrentFrameScope {
class Comment {
public:
- Comment(MacroAssembler* masm, const char* msg);
+ Comment(Assembler* assembler, const char* msg);
~Comment();
private:
- MacroAssembler* masm_;
+ Assembler* assembler_;
const char* msg_;
};
@@ -208,7 +211,7 @@ class Comment {
class Comment {
public:
- Comment(MacroAssembler*, const char*) {}
+ Comment(Assembler*, const char*) {}
};
#endif // DEBUG
diff --git a/deps/v8/src/map-updater.cc b/deps/v8/src/map-updater.cc
index bfd2f80443..9157f8d7aa 100644
--- a/deps/v8/src/map-updater.cc
+++ b/deps/v8/src/map-updater.cc
@@ -123,6 +123,9 @@ Handle<Map> MapUpdater::ReconfigureToDataField(int descriptor,
new_field_type_ = field_type;
}
+ GeneralizeIfTransitionableFastElementsKind(
+ &new_constness_, &new_representation_, &new_field_type_);
+
if (TryRecofigureToDataFieldInplace() == kEnd) return result_map_;
if (FindRootMap() == kEnd) return result_map_;
if (FindTargetMap() == kEnd) return result_map_;
@@ -134,6 +137,8 @@ Handle<Map> MapUpdater::ReconfigureToDataField(int descriptor,
Handle<Map> MapUpdater::ReconfigureElementsKind(ElementsKind elements_kind) {
DCHECK_EQ(kInitialized, state_);
new_elements_kind_ = elements_kind;
+ is_transitionable_fast_elements_kind_ =
+ IsTransitionableFastElementsKind(new_elements_kind_);
if (FindRootMap() == kEnd) return result_map_;
if (FindTargetMap() == kEnd) return result_map_;
@@ -153,6 +158,28 @@ Handle<Map> MapUpdater::Update() {
return result_map_;
}
+void MapUpdater::GeneralizeIfTransitionableFastElementsKind(
+ PropertyConstness* constness, Representation* representation,
+ Handle<FieldType>* field_type) {
+ DCHECK_EQ(is_transitionable_fast_elements_kind_,
+ IsTransitionableFastElementsKind(new_elements_kind_));
+ if (is_transitionable_fast_elements_kind_ &&
+ Map::IsInplaceGeneralizableField(*constness, *representation,
+ **field_type)) {
+ // We don't support propagation of field generalization through elements
+ // kind transitions because they are inserted into the transition tree
+ // before field transitions. In order to avoid complexity of handling
+ // such a case we ensure that all maps with transitionable elements kinds
+ // do not have fields that can be generalized in-place (without creation
+ // of a new map).
+ if (FLAG_track_constant_fields && FLAG_modify_map_inplace) {
+ *constness = kMutable;
+ }
+ DCHECK(representation->IsHeapObject());
+ *field_type = FieldType::Any(isolate_);
+ }
+}
+
void MapUpdater::GeneralizeField(Handle<Map> map, int modify_index,
PropertyConstness new_constness,
Representation new_representation,
@@ -489,6 +516,9 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
old_details.representation(), old_field_type, next_representation,
target_field_type, isolate_);
+ GeneralizeIfTransitionableFastElementsKind(
+ &next_constness, &next_representation, &next_field_type);
+
Handle<Object> wrapped_type(Map::WrapFieldType(next_field_type));
Descriptor d;
if (next_kind == kData) {
@@ -532,10 +562,17 @@ Handle<DescriptorArray> MapUpdater::BuildDescriptorArray() {
Descriptor d;
if (next_location == kField) {
- Handle<FieldType> old_field_type =
+ Handle<FieldType> next_field_type =
GetOrComputeFieldType(i, old_details.location(), next_representation);
- Handle<Object> wrapped_type(Map::WrapFieldType(old_field_type));
+ // If the |new_elements_kind_| is still transitionable then the old map's
+ // elements kind is also transitionable and therefore the old descriptors
+ // array must already have non in-place generalizable fields.
+ CHECK_IMPLIES(is_transitionable_fast_elements_kind_,
+ !Map::IsInplaceGeneralizableField(
+ next_constness, next_representation, *next_field_type));
+
+ Handle<Object> wrapped_type(Map::WrapFieldType(next_field_type));
Descriptor d;
if (next_kind == kData) {
DCHECK_IMPLIES(!FLAG_track_constant_fields, next_constness == kMutable);
diff --git a/deps/v8/src/map-updater.h b/deps/v8/src/map-updater.h
index 215aa07573..a1d052261c 100644
--- a/deps/v8/src/map-updater.h
+++ b/deps/v8/src/map-updater.h
@@ -2,13 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_MAP_RECONFIGURER_H_
-#define V8_MAP_RECONFIGURER_H_
+#ifndef V8_MAP_UPDATER_H_
+#define V8_MAP_UPDATER_H_
#include "src/elements-kind.h"
+#include "src/field-type.h"
#include "src/globals.h"
#include "src/handles.h"
-#include "src/objects.h"
+#include "src/objects/map.h"
#include "src/property-details.h"
namespace v8 {
@@ -48,7 +49,9 @@ class MapUpdater {
old_map_(old_map),
old_descriptors_(old_map->instance_descriptors(), isolate_),
old_nof_(old_map_->NumberOfOwnDescriptors()),
- new_elements_kind_(old_map_->elements_kind()) {
+ new_elements_kind_(old_map_->elements_kind()),
+ is_transitionable_fast_elements_kind_(
+ IsTransitionableFastElementsKind(new_elements_kind_)) {
// We shouldn't try to update remote objects.
DCHECK(!old_map->FindRootMap()->GetConstructor()->IsFunctionTemplateInfo());
}
@@ -145,6 +148,10 @@ class MapUpdater {
Handle<DescriptorArray> descriptors, int descriptor,
PropertyLocation location, Representation representation);
+ inline void GeneralizeIfTransitionableFastElementsKind(
+ PropertyConstness* constness, Representation* representation,
+ Handle<FieldType>* field_type);
+
void GeneralizeField(Handle<Map> map, int modify_index,
PropertyConstness new_constness,
Representation new_representation,
@@ -160,8 +167,9 @@ class MapUpdater {
State state_ = kInitialized;
ElementsKind new_elements_kind_;
+ bool is_transitionable_fast_elements_kind_;
- // If |modified_descriptor_| is not equal to -1 them the fields below form
+ // If |modified_descriptor_| is not equal to -1 then the fields below form
// an "update" of the |old_map_|'s descriptors.
int modified_descriptor_ = -1;
PropertyKind new_kind_ = kData;
@@ -180,4 +188,4 @@ class MapUpdater {
} // namespace internal
} // namespace v8
-#endif // V8_MAP_RECONFIGURER_H_
+#endif // V8_MAP_UPDATER_H_
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index e61ce90791..b71b9afce4 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -51,7 +51,7 @@ void MessageHandler::DefaultMessageReport(Isolate* isolate,
Handle<JSMessageObject> MessageHandler::MakeMessageObject(
Isolate* isolate, MessageTemplate::Template message,
const MessageLocation* location, Handle<Object> argument,
- Handle<JSArray> stack_frames) {
+ Handle<FixedArray> stack_frames) {
Factory* factory = isolate->factory();
int start = -1;
@@ -890,13 +890,14 @@ MaybeHandle<Object> AppendErrorString(Isolate* isolate, Handle<Object> error,
Handle<Object> pending_exception =
handle(isolate->pending_exception(), isolate);
isolate->clear_pending_exception();
+ isolate->set_external_caught_exception(false);
err_str = ErrorUtils::ToString(isolate, pending_exception);
if (err_str.is_null()) {
// Formatting the thrown exception threw again, give up.
DCHECK(isolate->has_pending_exception());
isolate->clear_pending_exception();
-
+ isolate->set_external_caught_exception(false);
builder->AppendCString("<error>");
} else {
// Formatted thrown exception successfully, append it.
@@ -991,6 +992,7 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
Handle<Object> pending_exception =
handle(isolate->pending_exception(), isolate);
isolate->clear_pending_exception();
+ isolate->set_external_caught_exception(false);
maybe_frame_string = ErrorUtils::ToString(isolate, pending_exception);
if (maybe_frame_string.is_null()) {
@@ -1222,6 +1224,7 @@ Handle<String> FormatMessage(Isolate* isolate, int template_index,
.ToHandle(&msg)) {
DCHECK(isolate->has_pending_exception());
isolate->clear_pending_exception();
+ isolate->set_external_caught_exception(false);
return isolate->factory()->NewStringFromAsciiChecked("<error>");
}
diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h
index 46ed6fce85..7df7288662 100644
--- a/deps/v8/src/messages.h
+++ b/deps/v8/src/messages.h
@@ -341,8 +341,7 @@ class ErrorUtils : public AllStatic {
T(NotAPromise, "% is not a promise") \
T(NotConstructor, "% is not a constructor") \
T(NotDateObject, "this is not a Date object.") \
- T(NotIntlObject, "% is not an i18n object.") \
- T(NotGeneric, "% is not generic") \
+ T(NotGeneric, "% requires that 'this' be a %") \
T(NotIterable, "% is not iterable") \
T(NotPropertyName, "% is not a valid property name") \
T(NotTypedArray, "this is not a typed array.") \
@@ -461,7 +460,6 @@ class ErrorUtils : public AllStatic {
T(RegExpInvalidReplaceString, "Invalid replacement string: '%'") \
T(RegExpNonObject, "% getter called on non-object %") \
T(RegExpNonRegExp, "% getter called on non-RegExp object") \
- T(ReinitializeIntl, "Trying to re-initialize % object.") \
T(ResolverNotAFunction, "Promise resolver % is not a function") \
T(RestrictedFunctionProperties, \
"'caller' and 'arguments' are restricted function properties and cannot " \
@@ -546,7 +544,9 @@ class ErrorUtils : public AllStatic {
T(ConstructorIsAccessor, "Class constructor may not be an accessor") \
T(ConstructorIsGenerator, "Class constructor may not be a generator") \
T(ConstructorIsAsync, "Class constructor may not be an async method") \
- T(DerivedConstructorReturn, \
+ T(ClassConstructorReturnedNonObject, \
+ "Class constructors may only return object or undefined") \
+ T(DerivedConstructorReturnedNonObject, \
"Derived constructors may only return object or undefined") \
T(DuplicateConstructor, "A class may only have one constructor") \
T(DuplicateExport, "Duplicate export of '%'") \
@@ -590,6 +590,7 @@ class ErrorUtils : public AllStatic {
T(MalformedRegExp, "Invalid regular expression: /%/: %") \
T(MalformedRegExpFlags, "Invalid regular expression flags") \
T(ModuleExportUndefined, "Export '%' is not defined in module") \
+ T(HtmlCommentInModule, "HTML comments are not allowed in modules") \
T(MultipleDefaultsInSwitch, \
"More than one default clause in switch statement") \
T(NewlineAfterThrow, "Illegal newline after throw") \
@@ -607,6 +608,8 @@ class ErrorUtils : public AllStatic {
T(ArgStringTerminatesParametersEarly, \
"Arg string terminates parameters early") \
T(UnexpectedEndOfArgString, "Unexpected end of arg string") \
+ T(RestDefaultInitializer, \
+ "Rest parameter may not have a default initializer") \
T(RuntimeWrongNumArgs, "Runtime function given wrong number of arguments") \
T(SuperNotCalled, \
"Must call super constructor in derived class before accessing 'this' or " \
@@ -691,6 +694,7 @@ class ErrorUtils : public AllStatic {
T(AsmJsInvalid, "Invalid asm.js: %") \
T(AsmJsCompiled, "Converted asm.js to WebAssembly: %") \
T(AsmJsInstantiated, "Instantiated asm.js: %") \
+ T(AsmJsLinkingFailed, "Linking failure in asm.js: %") \
/* DataCloneError messages */ \
T(DataCloneError, "% could not be cloned.") \
T(DataCloneErrorOutOfMemory, "Data cannot be cloned, out of memory.") \
@@ -733,7 +737,7 @@ class MessageHandler {
static Handle<JSMessageObject> MakeMessageObject(
Isolate* isolate, MessageTemplate::Template type,
const MessageLocation* location, Handle<Object> argument,
- Handle<JSArray> stack_frames);
+ Handle<FixedArray> stack_frames);
// Report a formatted message (needs JS allocation).
static void ReportMessage(Isolate* isolate, const MessageLocation* loc,
diff --git a/deps/v8/src/mips/OWNERS b/deps/v8/src/mips/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/src/mips/OWNERS
+++ b/deps/v8/src/mips/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 215c0efd88..9233913528 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -121,8 +121,17 @@ Address RelocInfo::target_address_address() {
// place, ready to be patched with the target. After jump optimization,
// that is the address of the instruction that follows J/JAL/JR/JALR
// instruction.
- return reinterpret_cast<Address>(
- pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
+ if (IsMipsArchVariant(kMips32r6)) {
+ // On R6 we don't move to the end of the instructions to be patched, but one
+ // instruction before, because if these instructions are at the end of the
+ // code object it can cause errors in the deserializer.
+ return reinterpret_cast<Address>(
+ pc_ +
+ (Assembler::kInstructionsFor32BitConstant - 1) * Assembler::kInstrSize);
+ } else {
+ return reinterpret_cast<Address>(
+ pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
+ }
}
@@ -357,23 +366,23 @@ template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
+ visitor->VisitEmbeddedPointer(host(), this);
} else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
+ visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::CELL) {
- visitor->VisitCell(this);
+ visitor->VisitCellPointer(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(this);
+ visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE ||
mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
- visitor->VisitInternalReference(this);
+ visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
+ visitor->VisitCodeAgeSequence(host(), this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
- visitor->VisitDebugTarget(this);
+ visitor->VisitDebugTarget(host(), this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(this);
+ visitor->VisitRuntimeEntry(host(), this);
}
}
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index ed3f50a817..3a37c16e5a 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -2077,6 +2077,7 @@ void Assembler::lui(Register rd, int32_t j) {
void Assembler::aui(Register rt, Register rs, int32_t j) {
// This instruction uses same opcode as 'lui'. The difference in encoding is
// 'lui' has zero reg. for rs field.
+ DCHECK(IsMipsArchVariant(kMips32r6));
DCHECK(!(rs.is(zero_reg)));
DCHECK(is_uint16(j));
GenInstrImmediate(LUI, rs, rt, j);
@@ -3548,13 +3549,20 @@ void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
// Compute new buffer size.
- CodeDesc desc; // The new buffer.
+ CodeDesc desc; // the new buffer
if (buffer_size_ < 1 * MB) {
desc.buffer_size = 2*buffer_size_;
} else {
desc.buffer_size = buffer_size_ + 1*MB;
}
- CHECK_GT(desc.buffer_size, 0); // No overflow.
+
+ // Some internal data structures overflow for very large buffers,
+ // they must ensure that kMaximalBufferSize is not too large.
+ if (desc.buffer_size > kMaximalBufferSize ||
+ static_cast<size_t>(desc.buffer_size) >
+ isolate_data().max_old_generation_size_) {
+ V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ }
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
@@ -3746,11 +3754,17 @@ void Assembler::CheckTrampolinePool() {
Address Assembler::target_address_at(Address pc) {
Instr instr1 = instr_at(pc);
Instr instr2 = instr_at(pc + kInstrSize);
- // Interpret 2 instructions generated by li: lui/ori
- if (IsLui(instr1) && IsOri(instr2)) {
- // Assemble the 32 bit value.
- return reinterpret_cast<Address>((GetImmediate16(instr1) << kLuiShift) |
- GetImmediate16(instr2));
+ // Interpret 2 instructions generated by li (lui/ori) or optimized pairs
+ // lui/jic, aui/jic or lui/jialc.
+ if (IsLui(instr1)) {
+ if (IsOri(instr2)) {
+ // Assemble the 32 bit value.
+ return reinterpret_cast<Address>((GetImmediate16(instr1) << kLuiShift) |
+ GetImmediate16(instr2));
+ } else if (IsJicOrJialc(instr2)) {
+ // Assemble the 32 bit value.
+ return reinterpret_cast<Address>(CreateTargetAddress(instr1, instr2));
+ }
}
// We should never get here, force a bad address if we do.
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 61043eff64..7df318b9ab 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -356,6 +356,9 @@ constexpr DoubleRegister kLithiumScratchDouble = f30;
constexpr DoubleRegister kDoubleRegZero = f28;
// Used on mips32r6 for compare operations.
constexpr DoubleRegister kDoubleCompareReg = f26;
+// MSA zero and scratch regs must have the same numbers as FPU zero and scratch
+constexpr Simd128Register kSimd128RegZero = w28;
+constexpr Simd128Register kSimd128ScratchReg = w30;
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
@@ -436,6 +439,8 @@ class Operand BASE_EMBEDDED {
Register rm() const { return rm_; }
+ RelocInfo::Mode rmode() const { return rmode_; }
+
private:
Register rm_;
int32_t imm32_; // Valid if rm_ == no_reg.
@@ -591,10 +596,19 @@ class Assembler : public AssemblerBase {
inline static void deserialization_set_special_target_at(
Isolate* isolate, Address instruction_payload, Code* code,
Address target) {
- set_target_address_at(
- isolate,
- instruction_payload - kInstructionsFor32BitConstant * kInstrSize, code,
- target);
+ if (IsMipsArchVariant(kMips32r6)) {
+ // On R6 the address location is shifted by one instruction
+ set_target_address_at(
+ isolate,
+ instruction_payload -
+ (kInstructionsFor32BitConstant - 1) * kInstrSize,
+ code, target);
+ } else {
+ set_target_address_at(
+ isolate,
+ instruction_payload - kInstructionsFor32BitConstant * kInstrSize,
+ code, target);
+ }
}
// This sets the internal reference at the pc.
@@ -625,7 +639,7 @@ class Assembler : public AssemblerBase {
// Distance between the instruction referring to the address of the call
// target and the return address.
#ifdef _MIPS_ARCH_MIPS32R6
- static constexpr int kCallTargetAddressOffset = 3 * kInstrSize;
+ static constexpr int kCallTargetAddressOffset = 2 * kInstrSize;
#else
static constexpr int kCallTargetAddressOffset = 4 * kInstrSize;
#endif
@@ -1913,6 +1927,9 @@ class Assembler : public AssemblerBase {
inline void CheckBuffer();
private:
+ // Avoid overflows for displacements etc.
+ static const int kMaximalBufferSize = 512 * MB;
+
inline static void set_target_internal_reference_encoded_at(Address pc,
Address target);
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 2f5fa2cec1..0fcdafca21 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -1102,9 +1102,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ lw(a1, MemOperand(a1));
__ li(a2, Operand(pending_handler_offset_address));
__ lw(a2, MemOperand(a2));
- __ Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Addu(t9, a1, a2);
- __ Jump(t9);
+ __ Jump(t9, Code::kHeaderSize - kHeapObjectTag);
}
@@ -1237,8 +1236,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ lw(t9, MemOperand(t0)); // Deref address.
// Call JSEntryTrampoline.
- __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
- __ Call(t9);
+ __ Call(t9, Code::kHeaderSize - kHeapObjectTag);
// Unlink this frame from the handler chain.
__ PopStackHandler();
@@ -1271,87 +1269,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Jump(ra);
}
-void RegExpExecStub::Generate(MacroAssembler* masm) {
-#ifdef V8_INTERPRETED_REGEXP
- // This case is handled prior to the RegExpExecStub call.
- __ Abort(kUnexpectedRegExpExecCall);
-#else // V8_INTERPRETED_REGEXP
- // Isolates: note we add an additional parameter here (isolate pointer).
- const int kRegExpExecuteArguments = 9;
- const int kParameterRegisters = 4;
- __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
-
- // Stack pointer now points to cell where return address is to be written.
- // Arguments are before that on the stack or in registers, meaning we
- // treat the return address as argument 5. Thus every argument after that
- // needs to be shifted back by 1. Since DirectCEntryStub will handle
- // allocating space for the c argument slots, we don't need to calculate
- // that into the argument positions on the stack. This is how the stack will
- // look (sp meaning the value of sp at this moment):
- // [sp + 5] - Argument 9
- // [sp + 4] - Argument 8
- // [sp + 3] - Argument 7
- // [sp + 2] - Argument 6
- // [sp + 1] - Argument 5
- // [sp + 0] - saved ra
-
- // Argument 9: Pass current isolate address.
- // CFunctionArgumentOperand handles MIPS stack argument slots.
- __ li(t1, Operand(ExternalReference::isolate_address(isolate())));
- __ sw(t1, MemOperand(sp, 5 * kPointerSize));
-
- // Argument 8: Indicate that this is a direct call from JavaScript.
- __ li(t1, Operand(1));
- __ sw(t1, MemOperand(sp, 4 * kPointerSize));
-
- // Argument 7: Start (high end) of backtracking stack memory area.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate());
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate());
- __ li(t1, Operand(address_of_regexp_stack_memory_address));
- __ lw(t1, MemOperand(t1, 0));
- __ li(t2, Operand(address_of_regexp_stack_memory_size));
- __ lw(t2, MemOperand(t2, 0));
- __ addu(t1, t1, t2);
- __ sw(t1, MemOperand(sp, 3 * kPointerSize));
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(t1, zero_reg);
- __ sw(t1, MemOperand(sp, 2 * kPointerSize));
-
- // Argument 5: static offsets vector buffer.
- __ li(
- t1,
- Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
- __ sw(t1, MemOperand(sp, 1 * kPointerSize));
-
- // Argument 4, a3: End of string data
- // Argument 3, a2: Start of string data
- CHECK(a3.is(RegExpExecDescriptor::StringEndRegister()));
- CHECK(a2.is(RegExpExecDescriptor::StringStartRegister()));
-
- // Argument 2 (a1): Previous index.
- CHECK(a1.is(RegExpExecDescriptor::LastIndexRegister()));
-
- // Argument 1 (a0): Subject string.
- CHECK(a0.is(RegExpExecDescriptor::StringRegister()));
-
- // Locate the code entry and call it.
- Register code_reg = RegExpExecDescriptor::CodeRegister();
- __ Addu(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm, code_reg);
-
- __ LeaveExitFrame(false, no_reg, true);
-
- // Return the smi-tagged result.
- __ SmiTag(v0);
- __ Ret();
-#endif // V8_INTERPRETED_REGEXP
-}
-
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// a0 : number of arguments to the construct function
@@ -1502,8 +1419,7 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// context at this point).
__ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kConstructStubOffset));
- __ Addu(at, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(at);
+ __ Jump(at, t0, Code::kHeaderSize - kHeapObjectTag);
__ bind(&non_function);
__ mov(a3, a1);
@@ -3057,9 +2973,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
}
Register scratch = call_data;
- if (!call_data_undefined()) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- }
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
// Push return value and default return value.
__ Push(scratch, scratch);
__ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index c96eb67724..0c2d2c7544 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -30,25 +30,22 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- if (FLAG_zap_code_space) {
- // Fail hard and early if we enter this code object again.
- byte* pointer = code->FindCodeAgeSequence();
- if (pointer != NULL) {
- pointer += kNoCodeAgeSequenceLength;
- } else {
- pointer = code->instruction_start();
- }
- CodePatcher patcher(isolate, pointer, 1);
- patcher.masm()->break_(0xCC);
-
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- int osr_offset = data->OsrPcOffset()->value();
- if (osr_offset > 0) {
- CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
- 1);
- osr_patcher.masm()->break_(0xCC);
- }
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(isolate, pointer, 1);
+ patcher.masm()->break_(0xCC);
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(isolate, code_start_address + osr_offset, 1);
+ osr_patcher.masm()->break_(0xCC);
}
DeoptimizationInputData* deopt_data =
@@ -326,14 +323,14 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Maximum size of a table entry generated below.
-const int Deoptimizer::table_entry_size_ = 2 * Assembler::kInstrSize;
+const int Deoptimizer::table_entry_size_ = 3 * Assembler::kInstrSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
// Create a sequence of deoptimization entries.
// Note that registers are still live when jumping to an entry.
- Label table_start, done, done_special, trampoline_jump;
+ Label table_start, done, trampoline_jump;
__ bind(&table_start);
int kMaxEntriesBranchReach = (1 << (kImm16Bits - 2))/
(table_entry_size_ / Assembler::kInstrSize);
@@ -346,6 +343,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK(is_int16(i));
__ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
__ li(at, i); // In the delay slot.
+ __ nop();
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
}
@@ -356,34 +354,29 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ Push(at);
} else {
// Uncommon case, the branch cannot reach.
- // Create mini trampoline and adjust id constants to get proper value at
- // the end of table.
- for (int i = kMaxEntriesBranchReach; i > 1; i--) {
+ // Create mini trampoline to reach the end of the table
+ for (int i = 0, j = 0; i < count(); i++, j++) {
Label start;
__ bind(&start);
DCHECK(is_int16(i));
- __ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
- __ li(at, - i); // In the delay slot.
+ if (j >= kMaxEntriesBranchReach) {
+ j = 0;
+ __ li(at, i);
+ __ bind(&trampoline_jump);
+ trampoline_jump = Label();
+ __ BranchShort(USE_DELAY_SLOT, &trampoline_jump);
+ __ nop();
+ } else {
+ __ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
+ __ li(at, i); // In the delay slot.
+ __ nop();
+ }
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
}
- // Entry with id == kMaxEntriesBranchReach - 1.
- __ bind(&trampoline_jump);
- __ BranchShort(USE_DELAY_SLOT, &done_special);
- __ li(at, -1);
-
- for (int i = kMaxEntriesBranchReach ; i < count(); i++) {
- Label start;
- __ bind(&start);
- DCHECK(is_int16(i));
- __ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
- __ li(at, i); // In the delay slot.
- }
DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
count() * table_entry_size_);
- __ bind(&done_special);
- __ addiu(at, at, kMaxEntriesBranchReach);
- __ bind(&done);
+ __ bind(&trampoline_jump);
__ Push(at);
}
}
diff --git a/deps/v8/src/mips/interface-descriptors-mips.cc b/deps/v8/src/mips/interface-descriptors-mips.cc
index 6e77ee835a..c1e8229e22 100644
--- a/deps/v8/src/mips/interface-descriptors-mips.cc
+++ b/deps/v8/src/mips/interface-descriptors-mips.cc
@@ -56,11 +56,6 @@ const Register MathPowIntegerDescriptor::exponent() {
return MathPowTaggedDescriptor::exponent();
}
-const Register RegExpExecDescriptor::StringRegister() { return a0; }
-const Register RegExpExecDescriptor::LastIndexRegister() { return a1; }
-const Register RegExpExecDescriptor::StringStartRegister() { return a2; }
-const Register RegExpExecDescriptor::StringEndRegister() { return a3; }
-const Register RegExpExecDescriptor::CodeRegister() { return t0; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
@@ -161,8 +156,19 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a1: the target to call
+ // a0: number of arguments
+ // a2: start index (to support rest parameters)
+ Register registers[] = {a1, a0, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1: the target to call
+ // a3: new target
+ // a0: number of arguments
// a2: start index (to support rest parameters)
- Register registers[] = {a1, a2};
+ Register registers[] = {a1, a3, a0, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 80d0505d70..6dd611e1f6 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -219,7 +219,7 @@ void MacroAssembler::RecordWriteField(
Addu(dst, object, Operand(offset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
- And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
+ And(t8, dst, Operand(kPointerSize - 1));
Branch(&ok, eq, t8, Operand(zero_reg));
stop("Unaligned cell in write barrier");
bind(&ok);
@@ -287,7 +287,7 @@ void MacroAssembler::RecordWriteForMap(Register object,
Addu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
- And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
+ And(at, dst, Operand(kPointerSize - 1));
Branch(&ok, eq, at, Operand(zero_reg));
stop("Unaligned cell in write barrier");
bind(&ok);
@@ -564,8 +564,13 @@ void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
subu(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
+ if (is_int16(-rt.imm32_) && !MustUseReg(rt.rmode_)) {
addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
+ } else if (!(-rt.imm32_ & kHiMask) && !MustUseReg(rt.rmode_)) { // Use load
+ // -imm and addu for cases where loading -imm generates one instruction.
+ DCHECK(!rs.is(at));
+ li(at, -rt.imm32_);
+ addu(rd, rs, at);
} else {
// li handles the relocation.
DCHECK(!rs.is(at));
@@ -3600,22 +3605,87 @@ bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
return false;
}
+void MacroAssembler::Jump(Register target, int16_t offset, Condition cond,
+ Register rs, const Operand& rt, BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(is_int16(offset));
+ if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
+ if (cond == cc_always) {
+ jic(target, offset);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jic(target, offset);
+ }
+ } else {
+ if (offset != 0) {
+ Addu(target, target, offset);
+ }
+ if (cond == cc_always) {
+ jr(target);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jr(target);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bd == PROTECT) nop();
+ }
+}
-void MacroAssembler::Jump(Register target,
- Condition cond,
- Register rs,
- const Operand& rt,
+void MacroAssembler::Jump(Register target, Register base, int16_t offset,
+ Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
+ DCHECK(is_int16(offset));
BlockTrampolinePoolScope block_trampoline_pool(this);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
if (cond == cc_always) {
- jic(target, 0);
+ jic(base, offset);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jic(base, offset);
+ }
+ } else {
+ if (offset != 0) {
+ Addu(target, base, offset);
+ } else { // Call through target
+ if (!target.is(base)) mov(target, base);
+ }
+ if (cond == cc_always) {
+ jr(target);
} else {
BRANCH_ARGS_CHECK(cond, rs, rt);
Branch(2, NegateCondition(cond), rs, rt);
- jic(target, 0);
+ jr(target);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bd == PROTECT) nop();
+ }
+}
+
+void MacroAssembler::Jump(Register target, const Operand& offset,
+ Condition cond, Register rs, const Operand& rt,
+ BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (IsMipsArchVariant(kMips32r6) && bd == PROTECT &&
+ !is_int16(offset.immediate())) {
+ uint32_t aui_offset, jic_offset;
+ Assembler::UnpackTargetAddressUnsigned(offset.immediate(), aui_offset,
+ jic_offset);
+ RecordRelocInfo(RelocInfo::EXTERNAL_REFERENCE, offset.immediate());
+ aui(target, target, aui_offset);
+ if (cond == cc_always) {
+ jic(target, jic_offset);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jic(target, jic_offset);
}
} else {
+ if (offset.immediate() != 0) {
+ Addu(target, target, offset);
+ }
if (cond == cc_always) {
jr(target);
} else {
@@ -3635,14 +3705,24 @@ void MacroAssembler::Jump(intptr_t target,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
Label skip;
if (cond != cc_always) {
Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
}
// The first instruction of 'li' may be placed in the delay slot.
// This is not an issue, t9 is expected to be clobbered anyway.
- li(t9, Operand(target, rmode));
- Jump(t9, al, zero_reg, Operand(zero_reg), bd);
+ if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
+ uint32_t lui_offset, jic_offset;
+ UnpackTargetAddressUnsigned(target, lui_offset, jic_offset);
+ DCHECK(MustUseReg(rmode));
+ RecordRelocInfo(rmode, target);
+ lui(t9, lui_offset);
+ Jump(t9, jic_offset, al, zero_reg, Operand(zero_reg), bd);
+ } else {
+ li(t9, Operand(target, rmode));
+ Jump(t9, 0, al, zero_reg, Operand(zero_reg), bd);
+ }
bind(&skip);
}
@@ -3669,11 +3749,8 @@ void MacroAssembler::Jump(Handle<Code> code,
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
}
-
-int MacroAssembler::CallSize(Register target,
- Condition cond,
- Register rs,
- const Operand& rt,
+int MacroAssembler::CallSize(Register target, int16_t offset, Condition cond,
+ Register rs, const Operand& rt,
BranchDelaySlot bd) {
int size = 0;
@@ -3685,16 +3762,59 @@ int MacroAssembler::CallSize(Register target,
if (bd == PROTECT && !IsMipsArchVariant(kMips32r6)) size += 1;
+ if (!IsMipsArchVariant(kMips32r6) && offset != 0) {
+ size += 1;
+ }
+
return size * kInstrSize;
}
// Note: To call gcc-compiled C code on mips, you must call thru t9.
-void MacroAssembler::Call(Register target,
- Condition cond,
- Register rs,
- const Operand& rt,
+void MacroAssembler::Call(Register target, int16_t offset, Condition cond,
+ Register rs, const Operand& rt, BranchDelaySlot bd) {
+ DCHECK(is_int16(offset));
+#ifdef DEBUG
+ int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
+#endif
+
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label start;
+ bind(&start);
+ if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
+ if (cond == cc_always) {
+ jialc(target, offset);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jialc(target, offset);
+ }
+ } else {
+ if (offset != 0) {
+ Addu(target, target, offset);
+ }
+ if (cond == cc_always) {
+ jalr(target);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
+ jalr(target);
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bd == PROTECT) nop();
+ }
+
+#ifdef DEBUG
+ CHECK_EQ(size + CallSize(target, offset, cond, rs, rt, bd),
+ SizeOfCodeGeneratedSince(&start));
+#endif
+}
+
+// Note: To call gcc-compiled C code on mips, you must call thru t9.
+void MacroAssembler::Call(Register target, Register base, int16_t offset,
+ Condition cond, Register rs, const Operand& rt,
BranchDelaySlot bd) {
+ DCHECK(is_uint16(offset));
#ifdef DEBUG
int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
#endif
@@ -3704,13 +3824,18 @@ void MacroAssembler::Call(Register target,
bind(&start);
if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) {
if (cond == cc_always) {
- jialc(target, 0);
+ jialc(base, offset);
} else {
BRANCH_ARGS_CHECK(cond, rs, rt);
Branch(2, NegateCondition(cond), rs, rt);
- jialc(target, 0);
+ jialc(base, offset);
}
} else {
+ if (offset != 0) {
+ Addu(target, base, offset);
+ } else { // Call through target
+ if (!target.is(base)) mov(target, base);
+ }
if (cond == cc_always) {
jalr(target);
} else {
@@ -3723,7 +3848,7 @@ void MacroAssembler::Call(Register target,
}
#ifdef DEBUG
- CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
+ CHECK_EQ(size + CallSize(target, offset, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
#endif
}
@@ -3735,8 +3860,11 @@ int MacroAssembler::CallSize(Address target,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
- int size = CallSize(t9, cond, rs, rt, bd);
- return size + 2 * kInstrSize;
+ int size = CallSize(t9, 0, cond, rs, rt, bd);
+ if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && cond == cc_always)
+ return size + 1 * kInstrSize;
+ else
+ return size + 2 * kInstrSize;
}
@@ -3746,12 +3874,23 @@ void MacroAssembler::Call(Address target,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
+ CheckBuffer();
BlockTrampolinePoolScope block_trampoline_pool(this);
Label start;
bind(&start);
int32_t target_int = reinterpret_cast<int32_t>(target);
- li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
- Call(t9, cond, rs, rt, bd);
+ if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && cond == cc_always) {
+ uint32_t lui_offset, jialc_offset;
+ UnpackTargetAddressUnsigned(target_int, lui_offset, jialc_offset);
+ if (MustUseReg(rmode)) {
+ RecordRelocInfo(rmode, target_int);
+ }
+ lui(t9, lui_offset);
+ Call(t9, jialc_offset, cond, rs, rt, bd);
+ } else {
+ li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
+ Call(t9, 0, cond, rs, rt, bd);
+ }
DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
@@ -3796,7 +3935,7 @@ void MacroAssembler::Ret(Condition cond,
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
- Jump(ra, cond, rs, rt, bd);
+ Jump(ra, 0, cond, rs, rt, bd);
}
@@ -3825,9 +3964,8 @@ void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal
- // references
- // until associated instructions are emitted and available to be
- // patched.
+ // references until associated instructions are emitted and
+ // available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
lui(at, (imm32 & kHiMask) >> kLuiShift);
ori(at, at, (imm32 & kImm16Mask));
@@ -3850,8 +3988,8 @@ void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
uint32_t imm32;
imm32 = jump_address(L);
if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
- uint32_t lui_offset, jic_offset;
- UnpackTargetAddressUnsigned(imm32, lui_offset, jic_offset);
+ uint32_t lui_offset, jialc_offset;
+ UnpackTargetAddressUnsigned(imm32, lui_offset, jialc_offset);
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal
@@ -3859,16 +3997,15 @@ void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
// available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
lui(at, lui_offset);
- jialc(at, jic_offset);
+ jialc(at, jialc_offset);
}
CheckBuffer();
} else {
{
BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal
- // references
- // until associated instructions are emitted and available to be
- // patched.
+ // references until associated instructions are emitted and
+ // available to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
lui(at, (imm32 & kHiMask) >> kLuiShift);
ori(at, at, (imm32 & kImm16Mask));
@@ -6047,15 +6184,27 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
void MacroAssembler::CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments) {
- li(t8, Operand(function));
- CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
+ if (IsMipsArchVariant(kMips32r6)) {
+ uint32_t lui_offset, jialc_offset;
+ UnpackTargetAddressUnsigned(Operand(function).immediate(), lui_offset,
+ jialc_offset);
+ if (MustUseReg(Operand(function).rmode())) {
+ RecordRelocInfo(Operand(function).rmode(), Operand(function).immediate());
+ }
+ lui(t9, lui_offset);
+ CallCFunctionHelper(t9, jialc_offset, num_reg_arguments,
+ num_double_arguments);
+ } else {
+ li(t9, Operand(function));
+ CallCFunctionHelper(t9, 0, num_reg_arguments, num_double_arguments);
+ }
}
void MacroAssembler::CallCFunction(Register function,
int num_reg_arguments,
int num_double_arguments) {
- CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+ CallCFunctionHelper(function, 0, num_reg_arguments, num_double_arguments);
}
@@ -6070,10 +6219,11 @@ void MacroAssembler::CallCFunction(Register function,
CallCFunction(function, num_arguments, 0);
}
-
-void MacroAssembler::CallCFunctionHelper(Register function,
+void MacroAssembler::CallCFunctionHelper(Register function_base,
+ int16_t function_offset,
int num_reg_arguments,
int num_double_arguments) {
+ DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
@@ -6102,12 +6252,12 @@ void MacroAssembler::CallCFunctionHelper(Register function,
// allow preemption, so the return address in the link register
// stays correct.
- if (!function.is(t9)) {
- mov(t9, function);
- function = t9;
+ if (!function_base.is(t9)) {
+ mov(t9, function_base);
+ function_base = t9;
}
- Call(function);
+ Call(function_base, function_offset);
int stack_passed_arguments = CalculateStackPassedWords(
num_reg_arguments, num_double_arguments);
@@ -6442,6 +6592,7 @@ CodePatcher::~CodePatcher() {
}
// Check that the code was patched as expected.
+
DCHECK(masm_.pc_ == address_ + size_);
DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 94802f8858..3b2539e408 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -180,12 +180,15 @@ class MacroAssembler: public Assembler {
#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
- void Jump(Register target, COND_ARGS);
+ void Jump(Register target, int16_t offset = 0, COND_ARGS);
+ void Jump(Register target, Register base, int16_t offset = 0, COND_ARGS);
+ void Jump(Register target, const Operand& offset, COND_ARGS);
void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
- static int CallSize(Register target, COND_ARGS);
- void Call(Register target, COND_ARGS);
+ static int CallSize(Register target, int16_t offset = 0, COND_ARGS);
+ void Call(Register target, int16_t offset = 0, COND_ARGS);
+ void Call(Register target, Register base, int16_t offset = 0, COND_ARGS);
static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
int CallSize(Handle<Code> code,
@@ -1664,9 +1667,8 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
private:
- void CallCFunctionHelper(Register function,
- int num_reg_arguments,
- int num_double_arguments);
+ void CallCFunctionHelper(Register function_base, int16_t function_offset,
+ int num_reg_arguments, int num_double_arguments);
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 94c3112ff4..38816e9e0d 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -1016,6 +1016,8 @@ void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
void* Simulator::RedirectExternalReference(Isolate* isolate,
void* external_function,
ExternalReference::Type type) {
+ base::LockGuard<base::Mutex> lock_guard(
+ isolate->simulator_redirection_mutex());
Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address_of_swi_instruction();
}
@@ -2028,12 +2030,11 @@ void Simulator::Format(Instruction* instr, const char* format) {
// 64-bit value. With the code below we assume that all runtime calls return
// 64 bits of result. If they don't, the v1 result register contains a bogus
// value, which is fine because it is caller-saved.
-typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
- int32_t arg1,
- int32_t arg2,
- int32_t arg3,
- int32_t arg4,
- int32_t arg5);
+typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0, int32_t arg1,
+ int32_t arg2, int32_t arg3,
+ int32_t arg4, int32_t arg5,
+ int32_t arg6, int32_t arg7,
+ int32_t arg8);
typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int32_t arg0, int32_t arg1,
int32_t arg2, int32_t arg3,
@@ -2076,6 +2077,10 @@ void Simulator::SoftwareInterrupt() {
// Args 4 and 5 are on the stack after the reserved space for args 0..3.
int32_t arg4 = stack_pointer[4];
int32_t arg5 = stack_pointer[5];
+ int32_t arg6 = stack_pointer[6];
+ int32_t arg7 = stack_pointer[7];
+ int32_t arg8 = stack_pointer[8];
+ STATIC_ASSERT(kMaxCParameters == 9);
bool fp_call =
(redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
@@ -2282,11 +2287,12 @@ void Simulator::SoftwareInterrupt() {
if (::v8::internal::FLAG_trace_sim) {
PrintF(
"Call to host function at %p "
- "args %08x, %08x, %08x, %08x, %08x, %08x\n",
+ "args %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x\n",
static_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2, arg3,
- arg4, arg5);
+ arg4, arg5, arg6, arg7, arg8);
}
- int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ int64_t result =
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
set_register(v0, static_cast<int32_t>(result));
set_register(v1, static_cast<int32_t>(result >> 32));
}
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 2785f913c9..1ed96bd003 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -26,18 +26,15 @@ namespace internal {
#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
entry(p0, p1, p2, p3, p4)
-typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
- void*, int*, int, Address, int, Isolate*);
-
+typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*, int*,
+ int, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
-// The fifth argument is a dummy that reserves the space used for
-// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
- (FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6, \
- p7, p8))
+ (FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
+ p8))
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on mips uses the C stack, we
@@ -466,7 +463,7 @@ class Simulator {
// Exceptions.
void SignalException(Exception e);
- // Runtime call support.
+ // Runtime call support. Uses the isolate in a thread-safe way.
static void* RedirectExternalReference(Isolate* isolate,
void* external_function,
ExternalReference::Type type);
@@ -530,9 +527,8 @@ class Simulator {
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
- Simulator::current(isolate) \
- ->Call(entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
-
+ Simulator::current(isolate)->Call(entry, 9, p0, p1, p2, p3, p4, p5, p6, p7, \
+ p8)
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code. The JS-based limit normally points near the end of
diff --git a/deps/v8/src/mips64/OWNERS b/deps/v8/src/mips64/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/src/mips64/OWNERS
+++ b/deps/v8/src/mips64/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/src/mips64/assembler-mips64-inl.h b/deps/v8/src/mips64/assembler-mips64-inl.h
index 470394334d..e873e04e13 100644
--- a/deps/v8/src/mips64/assembler-mips64-inl.h
+++ b/deps/v8/src/mips64/assembler-mips64-inl.h
@@ -341,23 +341,23 @@ template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
+ visitor->VisitEmbeddedPointer(host(), this);
} else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
+ visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::CELL) {
- visitor->VisitCell(this);
+ visitor->VisitCellPointer(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(this);
+ visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE ||
mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
- visitor->VisitInternalReference(this);
+ visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
+ visitor->VisitCodeAgeSequence(host(), this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
- visitor->VisitDebugTarget(this);
+ visitor->VisitDebugTarget(host(), this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(this);
+ visitor->VisitRuntimeEntry(host(), this);
}
}
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index f1b6f9bb00..084d5db036 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -236,7 +236,6 @@ MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
-static const int kNegOffset = 0x00008000;
// daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
// operations as post-increment of sp.
const Instr kPopInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
@@ -246,10 +245,10 @@ const Instr kPopInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
const Instr kPushInstruction = DADDIU | (Register::kCode_sp << kRsShift) |
(Register::kCode_sp << kRtShift) |
(-kPointerSize & kImm16Mask); // NOLINT
-// sd(r, MemOperand(sp, 0))
+// Sd(r, MemOperand(sp, 0))
const Instr kPushRegPattern =
SD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
-// ld(r, MemOperand(sp, 0))
+// Ld(r, MemOperand(sp, 0))
const Instr kPopRegPattern =
LD | (Register::kCode_sp << kRsShift) | (0 & kImm16Mask); // NOLINT
@@ -2090,92 +2089,33 @@ void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
}
}
-// Helper for base-reg + upper part of offset, when offset is larger than int16.
-// Loads higher part of the offset to AT register.
-// Returns lower part of the offset to be used as offset
-// in Load/Store instructions
-int32_t Assembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) {
- DCHECK(!src.rm().is(at));
- DCHECK(is_int32(src.offset_));
- int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
- // If the highest bit of the lower part of the offset is 1, this would make
- // the offset in the load/store instruction negative. We need to compensate
- // for this by adding 1 to the upper part of the offset.
- if (src.offset_ & kNegOffset) {
- if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) {
- LoadRegPlusOffsetToAt(src);
- return 0;
- }
-
- hi += 1;
- }
-
- if (kArchVariant == kMips64r6) {
- daui(at, src.rm(), hi);
- } else {
- lui(at, hi);
- daddu(at, at, src.rm());
- }
- return (src.offset_ & kImm16Mask);
-}
-
void Assembler::lb(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(LB, at, rd, off16);
- }
+ GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
}
void Assembler::lbu(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(LBU, at, rd, off16);
- }
+ GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
}
void Assembler::lh(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(LH, at, rd, off16);
- }
+ GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
}
void Assembler::lhu(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(LHU, at, rd, off16);
- }
+ GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
}
void Assembler::lw(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(LW, at, rd, off16);
- }
+ GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
}
void Assembler::lwu(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(LWU, at, rd, off16);
- }
+ GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
}
@@ -2194,32 +2134,17 @@ void Assembler::lwr(Register rd, const MemOperand& rs) {
void Assembler::sb(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to store.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(SB, at, rd, off16);
- }
+ GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
}
void Assembler::sh(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to store.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(SH, at, rd, off16);
- }
+ GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
}
void Assembler::sw(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to store.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(SW, at, rd, off16);
- }
+ GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
}
@@ -2299,22 +2224,12 @@ void Assembler::sdr(Register rd, const MemOperand& rs) {
void Assembler::ld(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(LD, at, rd, off16);
- }
+ GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
}
void Assembler::sd(Register rd, const MemOperand& rs) {
- if (is_int16(rs.offset_)) {
- GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
- } else { // Offset > 16 bits, use multiple instructions to store.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
- GenInstrImmediate(SD, at, rd, off16);
- }
+ GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
}
@@ -2582,7 +2497,7 @@ void Assembler::selnez(Register rd, Register rs, Register rt) {
// Bit twiddling.
void Assembler::clz(Register rd, Register rs) {
if (kArchVariant != kMips64r6) {
- // Clz instr requires same GPR number in 'rd' and 'rt' fields.
+ // clz instr requires same GPR number in 'rd' and 'rt' fields.
GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
} else {
GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
@@ -2602,7 +2517,7 @@ void Assembler::dclz(Register rd, Register rs) {
void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ins.
- // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
+ // ins instr has 'rt' field as dest, and two uint5: msb, lsb.
DCHECK((kArchVariant == kMips64r2) || (kArchVariant == kMips64r6));
GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
}
@@ -2610,15 +2525,28 @@ void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
void Assembler::dins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Dins.
- // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
+ // dins instr has 'rt' field as dest, and two uint5: msb, lsb.
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, DINS);
}
+void Assembler::dinsm_(Register rt, Register rs, uint16_t pos, uint16_t size) {
+ // Should be called via MacroAssembler::Dins.
+ // dinsm instr has 'rt' field as dest, and two uint5: msbminus32, lsb.
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1 - 32, pos, DINSM);
+}
+
+void Assembler::dinsu_(Register rt, Register rs, uint16_t pos, uint16_t size) {
+ // Should be called via MacroAssembler::Dins.
+ // dinsu instr has 'rt' field as dest, and two uint5: msbminus32, lsbminus32.
+ DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+ GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1 - 32, pos - 32, DINSU);
+}
void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ext.
- // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
+ // ext instr has 'rt' field as dest, and two uint5: msbd, lsb.
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
}
@@ -2626,23 +2554,21 @@ void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
void Assembler::dext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Dext.
- // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
+ // dext instr has 'rt' field as dest, and two uint5: msbd, lsb.
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, DEXT);
}
-
-void Assembler::dextm(Register rt, Register rs, uint16_t pos, uint16_t size) {
+void Assembler::dextm_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Dextm.
- // Dextm instr has 'rt' field as dest, and two uint5: msb, lsb.
+ // dextm instr has 'rt' field as dest, and two uint5: msbdminus32, lsb.
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL3, rs, rt, size - 1 - 32, pos, DEXTM);
}
-
-void Assembler::dextu(Register rt, Register rs, uint16_t pos, uint16_t size) {
+void Assembler::dextu_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Dextu.
- // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
+ // dextu instr has 'rt' field as dest, and two uint5: msbd, lsbminus32.
DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos - 32, DEXTU);
}
@@ -2712,43 +2638,20 @@ void Assembler::seb(Register rd, Register rt) {
// Load, store, move.
void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
- if (is_int16(src.offset_)) {
- GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
- GenInstrImmediate(LWC1, at, fd, off16);
- }
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
}
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
- if (is_int16(src.offset_)) {
- GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
- GenInstrImmediate(LDC1, at, fd, off16);
- }
+ GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
}
-
-void Assembler::swc1(FPURegister fd, const MemOperand& src) {
- if (is_int16(src.offset_)) {
- GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
- GenInstrImmediate(SWC1, at, fd, off16);
- }
+void Assembler::swc1(FPURegister fs, const MemOperand& src) {
+ GenInstrImmediate(SWC1, src.rm(), fs, src.offset_);
}
-
-void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
- DCHECK(!src.rm().is(at));
- if (is_int16(src.offset_)) {
- GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
- } else { // Offset > 16 bits, use multiple instructions to load.
- int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
- GenInstrImmediate(SDC1, at, fd, off16);
- }
+void Assembler::sdc1(FPURegister fs, const MemOperand& src) {
+ GenInstrImmediate(SDC1, src.rm(), fs, src.offset_);
}
@@ -3903,13 +3806,20 @@ void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
// Compute new buffer size.
- CodeDesc desc; // The new buffer.
+ CodeDesc desc; // the new buffer
if (buffer_size_ < 1 * MB) {
desc.buffer_size = 2*buffer_size_;
} else {
desc.buffer_size = buffer_size_ + 1*MB;
}
- CHECK_GT(desc.buffer_size, 0); // No overflow.
+
+ // Some internal data structures overflow for very large buffers,
+ // they must ensure that kMaximalBufferSize is not too large.
+ if (desc.buffer_size > kMaximalBufferSize ||
+ static_cast<size_t>(desc.buffer_size) >
+ isolate_data().max_old_generation_size_) {
+ V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ }
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
diff --git a/deps/v8/src/mips64/assembler-mips64.h b/deps/v8/src/mips64/assembler-mips64.h
index a57a566b21..dc78b890ed 100644
--- a/deps/v8/src/mips64/assembler-mips64.h
+++ b/deps/v8/src/mips64/assembler-mips64.h
@@ -361,6 +361,9 @@ constexpr DoubleRegister kDoubleRegZero = f28;
// Used on mips64r6 for compare operations.
// We use the last non-callee saved odd register for N64 ABI
constexpr DoubleRegister kDoubleCompareReg = f23;
+// MSA zero and scratch regs must have the same numbers as FPU zero and scratch
+constexpr Simd128Register kSimd128RegZero = w28;
+constexpr Simd128Register kSimd128ScratchReg = w30;
// FPU (coprocessor 1) control registers.
// Currently only FCSR (#31) is implemented.
@@ -1002,9 +1005,11 @@ class Assembler : public AssemblerBase {
void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
void dext_(Register rt, Register rs, uint16_t pos, uint16_t size);
- void dextm(Register rt, Register rs, uint16_t pos, uint16_t size);
- void dextu(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void dextm_(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void dextu_(Register rt, Register rs, uint16_t pos, uint16_t size);
void dins_(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void dinsm_(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void dinsu_(Register rt, Register rs, uint16_t pos, uint16_t size);
void bitswap(Register rd, Register rt);
void dbitswap(Register rd, Register rt);
void align(Register rd, Register rs, Register rt, uint8_t bp);
@@ -1898,7 +1903,6 @@ class Assembler : public AssemblerBase {
// Helpers.
void LoadRegPlusOffsetToAt(const MemOperand& src);
- int32_t LoadRegPlusUpperOffsetPartToAt(const MemOperand& src);
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
@@ -1974,6 +1978,9 @@ class Assembler : public AssemblerBase {
inline void CheckTrampolinePoolQuick(int extra_instructions = 0);
private:
+ // Avoid overflows for displacements etc.
+ static const int kMaximalBufferSize = 512 * MB;
+
// Buffer size and constant pool distance are checked together at regular
// intervals of kBufferCheckInterval emitted bytes.
static constexpr int kBufferCheckInterval = 1 * KB / 2;
diff --git a/deps/v8/src/mips64/code-stubs-mips64.cc b/deps/v8/src/mips64/code-stubs-mips64.cc
index 1738ef432e..1b6b502522 100644
--- a/deps/v8/src/mips64/code-stubs-mips64.cc
+++ b/deps/v8/src/mips64/code-stubs-mips64.cc
@@ -25,7 +25,7 @@ namespace internal {
void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ dsll(t9, a0, kPointerSizeLog2);
__ Daddu(t9, sp, t9);
- __ sd(a1, MemOperand(t9, 0));
+ __ Sd(a1, MemOperand(t9, 0));
__ Push(a1);
__ Push(a2);
__ Daddu(a0, a0, 3);
@@ -61,7 +61,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
__ Dsubu(sp, sp, Operand(param_count * kPointerSize));
for (int i = 0; i < param_count; ++i) {
// Store argument to stack.
- __ sd(descriptor.GetRegisterParameter(i),
+ __ Sd(descriptor.GetRegisterParameter(i),
MemOperand(sp, (param_count - 1 - i) * kPointerSize));
}
__ CallExternalReference(miss, param_count);
@@ -91,7 +91,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ Push(scratch, scratch2, scratch3);
if (!skip_fastpath()) {
// Load double input.
- __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
+ __ Ldc1(double_scratch, MemOperand(input_reg, double_offset));
// Clear cumulative exception flags and save the FCSR.
__ cfc1(scratch2, FCSR);
@@ -123,9 +123,9 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Register input_high = scratch2;
Register input_low = scratch3;
- __ lw(input_low,
+ __ Lw(input_low,
MemOperand(input_reg, double_offset + Register::kMantissaOffset));
- __ lw(input_high,
+ __ Lw(input_high,
MemOperand(input_reg, double_offset + Register::kExponentOffset));
Label normal_exponent, restore_sign;
@@ -281,7 +281,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// The representation of NaN values has all exponent bits (52..62) set,
// and not all mantissa bits (0..51) clear.
// Read top bits of double representation (second word of value).
- __ lwu(a6, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ __ Lwu(a6, FieldMemOperand(a0, HeapNumber::kExponentOffset));
// Test that exponent bits are all set.
__ And(a7, a6, Operand(exp_mask_reg));
// If all bits not set (ne cond), then not a NaN, objects are equal.
@@ -290,7 +290,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Shift out flag and all exponent bits, retaining only mantissa.
__ sll(a6, a6, HeapNumber::kNonMantissaBitsInTopWord);
// Or with all low-bits of mantissa.
- __ lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+ __ Lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
__ Or(v0, a7, Operand(a6));
// For equal we already have the right value in v0: Return zero (equal)
// if all bits in mantissa are zero (it's an Infinity) and non-zero if
@@ -343,7 +343,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ SmiUntag(at, rhs);
__ mtc1(at, f14);
__ cvt_d_w(f14, f14);
- __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ Ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
// We now have both loaded as doubles.
__ jmp(both_loaded_as_doubles);
@@ -367,7 +367,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ SmiUntag(at, lhs);
__ mtc1(at, f12);
__ cvt_d_w(f12, f12);
- __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ Ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
// Fall through to both_loaded_as_doubles.
}
@@ -418,14 +418,14 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
Label* slow) {
__ GetObjectType(lhs, a3, a2);
__ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
- __ ld(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ld(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
// If first was a heap number & second wasn't, go to slow case.
__ Branch(slow, ne, a3, Operand(a2));
// Both are heap numbers. Load them up then jump to the code we have
// for that.
- __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ Ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ Ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
__ jmp(both_loaded_as_doubles);
}
@@ -458,10 +458,10 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ mov(v0, a0); // In delay slot.
__ bind(&object_test);
- __ ld(a2, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ ld(a3, FieldMemOperand(rhs, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(a2, Map::kBitFieldOffset));
- __ lbu(t1, FieldMemOperand(a3, Map::kBitFieldOffset));
+ __ Ld(a2, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ld(a3, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Lbu(t0, FieldMemOperand(a2, Map::kBitFieldOffset));
+ __ Lbu(t1, FieldMemOperand(a3, Map::kBitFieldOffset));
__ And(at, t0, Operand(1 << Map::kIsUndetectable));
__ Branch(&undetectable, ne, at, Operand(zero_reg));
__ And(at, t1, Operand(1 << Map::kIsUndetectable));
@@ -760,7 +760,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Base is already in double_base.
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
- __ ldc1(double_exponent,
+ __ Ldc1(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
}
@@ -996,7 +996,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ bind(&find_ra);
// This spot was reserved in EnterExitFrame.
- __ sd(ra, MemOperand(sp, result_stack_size));
+ __ Sd(ra, MemOperand(sp, result_stack_size));
// Stack space reservation moved to the branch delay slot below.
// Stack is still aligned.
@@ -1012,9 +1012,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
if (result_size() > 2) {
DCHECK_EQ(3, result_size());
// Read result values stored on stack.
- __ ld(a0, MemOperand(v0, 2 * kPointerSize));
- __ ld(v1, MemOperand(v0, 1 * kPointerSize));
- __ ld(v0, MemOperand(v0, 0 * kPointerSize));
+ __ Ld(a0, MemOperand(v0, 2 * kPointerSize));
+ __ Ld(v1, MemOperand(v0, 1 * kPointerSize));
+ __ Ld(v0, MemOperand(v0, 0 * kPointerSize));
}
// Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers!
@@ -1030,7 +1030,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
ExternalReference pending_exception_address(
Isolate::kPendingExceptionAddress, isolate());
__ li(a2, Operand(pending_exception_address));
- __ ld(a2, MemOperand(a2));
+ __ Ld(a2, MemOperand(a2));
__ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
// Cannot use check here as it attempts to generate call into runtime.
__ Branch(&okay, eq, a4, Operand(a2));
@@ -1081,24 +1081,24 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Retrieve the handler context, SP and FP.
__ li(cp, Operand(pending_handler_context_address));
- __ ld(cp, MemOperand(cp));
+ __ Ld(cp, MemOperand(cp));
__ li(sp, Operand(pending_handler_sp_address));
- __ ld(sp, MemOperand(sp));
+ __ Ld(sp, MemOperand(sp));
__ li(fp, Operand(pending_handler_fp_address));
- __ ld(fp, MemOperand(fp));
+ __ Ld(fp, MemOperand(fp));
// If the handler is a JS frame, restore the context to the frame. Note that
// the context will be set to (cp == 0) for non-JS frames.
Label zero;
__ Branch(&zero, eq, cp, Operand(zero_reg));
- __ sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&zero);
// Compute the handler entry address and jump to it.
__ li(a1, Operand(pending_handler_code_address));
- __ ld(a1, MemOperand(a1));
+ __ Ld(a1, MemOperand(a1));
__ li(a2, Operand(pending_handler_offset_address));
- __ ld(a2, MemOperand(a2));
+ __ Ld(a2, MemOperand(a2));
__ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Daddu(t9, a1, a2);
__ Jump(t9);
@@ -1143,7 +1143,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ li(a5, Operand(StackFrame::TypeToMarker(marker)));
ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
__ li(a4, Operand(c_entry_fp));
- __ ld(a4, MemOperand(a4));
+ __ Ld(a4, MemOperand(a4));
__ Push(a7, a6, a5, a4);
// Set up frame pointer for the frame to be pushed.
__ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
@@ -1168,9 +1168,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
Label non_outermost_js;
ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
__ li(a5, Operand(ExternalReference(js_entry_sp)));
- __ ld(a6, MemOperand(a5));
+ __ Ld(a6, MemOperand(a5));
__ Branch(&non_outermost_js, ne, a6, Operand(zero_reg));
- __ sd(fp, MemOperand(a5));
+ __ Sd(fp, MemOperand(a5));
__ li(a4, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
Label cont;
__ b(&cont);
@@ -1191,7 +1191,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// signal the existence of the JSEntry frame.
__ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
- __ sd(v0, MemOperand(a4)); // We come back from 'invoke'. result is in v0.
+ __ Sd(v0, MemOperand(a4)); // We come back from 'invoke'. result is in v0.
__ LoadRoot(v0, Heap::kExceptionRootIndex);
__ b(&exit); // b exposes branch delay slot.
__ nop(); // Branch delay slot nop.
@@ -1230,7 +1230,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
__ li(a4, Operand(entry));
}
- __ ld(t9, MemOperand(a4)); // Deref address.
+ __ Ld(t9, MemOperand(a4)); // Deref address.
// Call JSEntryTrampoline.
__ daddiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
__ Call(t9);
@@ -1245,14 +1245,14 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Branch(&non_outermost_js_2, ne, a5,
Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ li(a5, Operand(ExternalReference(js_entry_sp)));
- __ sd(zero_reg, MemOperand(a5));
+ __ Sd(zero_reg, MemOperand(a5));
__ bind(&non_outermost_js_2);
// Restore the top frame descriptors from the stack.
__ pop(a5);
__ li(a4, Operand(ExternalReference(Isolate::kCEntryFPAddress,
isolate)));
- __ sd(a5, MemOperand(a4));
+ __ Sd(a5, MemOperand(a4));
// Reset the stack to the callee saved registers.
__ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
@@ -1266,86 +1266,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Jump(ra);
}
-void RegExpExecStub::Generate(MacroAssembler* masm) {
-#ifdef V8_INTERPRETED_REGEXP
- // This case is handled prior to the RegExpExecStub call.
- __ Abort(kUnexpectedRegExpExecCall);
-#else // V8_INTERPRETED_REGEXP
- // Isolates: note we add an additional parameter here (isolate pointer).
- const int kRegExpExecuteArguments = 9;
- const int kParameterRegisters = 8;
- __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
-
- // Stack pointer now points to cell where return address is to be written.
- // Arguments are before that on the stack or in registers, meaning we
- // treat the return address as argument 5. Thus every argument after that
- // needs to be shifted back by 1. Since DirectCEntryStub will handle
- // allocating space for the c argument slots, we don't need to calculate
- // that into the argument positions on the stack. This is how the stack will
- // look (sp meaning the value of sp at this moment):
- // Abi n64:
- // [sp + 1] - Argument 9
- // [sp + 0] - saved ra
- // Abi O32:
- // [sp + 5] - Argument 9
- // [sp + 4] - Argument 8
- // [sp + 3] - Argument 7
- // [sp + 2] - Argument 6
- // [sp + 1] - Argument 5
- // [sp + 0] - saved ra
-
- // Argument 9: Pass current isolate address.
- __ li(t1, Operand(ExternalReference::isolate_address(isolate())));
- __ sd(t1, MemOperand(sp, 1 * kPointerSize));
-
- // Argument 8: Indicate that this is a direct call from JavaScript.
- __ li(a7, Operand(1));
-
- // Argument 7: Start (high end) of backtracking stack memory area.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate());
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate());
- __ li(t1, Operand(address_of_regexp_stack_memory_address));
- __ ld(t1, MemOperand(t1, 0));
- __ li(t2, Operand(address_of_regexp_stack_memory_size));
- __ ld(t2, MemOperand(t2, 0));
- __ daddu(a6, t1, t2);
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(a5, zero_reg);
-
- // Argument 5: static offsets vector buffer.
- __ li(
- a4,
- Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
-
- // Argument 4, a3: End of string data
- // Argument 3, a2: Start of string data
- CHECK(a3.is(RegExpExecDescriptor::StringEndRegister()));
- CHECK(a2.is(RegExpExecDescriptor::StringStartRegister()));
-
- // Argument 2 (a1): Previous index.
- CHECK(a1.is(RegExpExecDescriptor::LastIndexRegister()));
-
- // Argument 1 (a0): Subject string.
- CHECK(a0.is(RegExpExecDescriptor::StringRegister()));
-
- // Locate the code entry and call it.
- Register code_reg = RegExpExecDescriptor::CodeRegister();
- __ Daddu(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm, code_reg);
-
- __ LeaveExitFrame(false, no_reg, true);
-
- // Return the smi-tagged result.
- __ SmiTag(v0);
- __ Ret();
-#endif // V8_INTERPRETED_REGEXP
-}
-
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// a0 : number of arguments to the construct function
@@ -1388,7 +1308,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Load the cache state into a5.
__ dsrl(a5, a3, 32 - kPointerSizeLog2);
__ Daddu(a5, a2, Operand(a5));
- __ ld(a5, FieldMemOperand(a5, FixedArray::kHeaderSize));
+ __ Ld(a5, FieldMemOperand(a5, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
@@ -1397,11 +1317,11 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Label check_allocation_site;
Register feedback_map = a6;
Register weak_value = t0;
- __ ld(weak_value, FieldMemOperand(a5, WeakCell::kValueOffset));
+ __ Ld(weak_value, FieldMemOperand(a5, WeakCell::kValueOffset));
__ Branch(&done, eq, a1, Operand(weak_value));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&done, eq, a5, Operand(at));
- __ ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset));
+ __ Ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kWeakCellMapRootIndex);
__ Branch(&check_allocation_site, ne, feedback_map, Operand(at));
@@ -1434,7 +1354,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ dsrl(a5, a3, 32 - kPointerSizeLog2);
__ Daddu(a5, a2, Operand(a5));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ sd(at, FieldMemOperand(a5, FixedArray::kHeaderSize));
+ __ Sd(at, FieldMemOperand(a5, FixedArray::kHeaderSize));
__ jmp(&done);
// An uninitialized cache is patched with the function.
@@ -1460,9 +1380,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Increment the call count for all function calls.
__ SmiScale(a4, a3, kPointerSizeLog2);
__ Daddu(a5, a2, Operand(a4));
- __ ld(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
+ __ Ld(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
__ Daddu(a4, a4, Operand(Smi::FromInt(1)));
- __ sd(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
+ __ Sd(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
}
@@ -1485,8 +1405,8 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Daddu(a5, a2, at);
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into a2, or undefined.
- __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
- __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
+ __ Ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
+ __ Ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&feedback_register_initialized, eq, a5, Operand(at));
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
@@ -1499,8 +1419,8 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
- __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
+ __ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
__ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
@@ -1521,8 +1441,8 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ JumpIfSmi(object_, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
- __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ __ Ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ Lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
// If the receiver is not a string trigger the non-string case.
__ And(a4, result_, Operand(kIsNotStringMask));
__ Branch(receiver_not_string_, ne, a4, Operand(zero_reg));
@@ -1534,7 +1454,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ bind(&got_smi_index_);
// Check for index out of range.
- __ ld(a4, FieldMemOperand(object_, String::kLengthOffset));
+ __ Ld(a4, FieldMemOperand(object_, String::kLengthOffset));
__ Branch(index_out_of_range_, ls, a4, Operand(index_));
__ SmiUntag(index_);
@@ -1583,8 +1503,8 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ pop(object_);
}
// Reload the instance type.
- __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ __ Ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ Lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
__ JumpIfNotSmi(index_, index_out_of_range_);
@@ -1615,8 +1535,8 @@ void StringHelper::GenerateFlatOneByteStringEquals(
// Compare lengths.
Label strings_not_equal, check_zero_length;
- __ ld(length, FieldMemOperand(left, String::kLengthOffset));
- __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ Ld(length, FieldMemOperand(left, String::kLengthOffset));
+ __ Ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
__ Branch(&check_zero_length, eq, length, Operand(scratch2));
__ bind(&strings_not_equal);
// Can not put li in delayslot, it has multi instructions.
@@ -1649,8 +1569,8 @@ void StringHelper::GenerateCompareFlatOneByteStrings(
Register scratch2, Register scratch3, Register scratch4) {
Label result_not_equal, compare_lengths;
// Find minimum length and length difference.
- __ ld(scratch1, FieldMemOperand(left, String::kLengthOffset));
- __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ Ld(scratch1, FieldMemOperand(left, String::kLengthOffset));
+ __ Ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
__ Dsubu(scratch3, scratch1, Operand(scratch2));
Register length_delta = scratch3;
__ slt(scratch4, scratch2, scratch1);
@@ -1704,9 +1624,9 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
Label loop;
__ bind(&loop);
__ Daddu(scratch3, left, index);
- __ lbu(scratch1, MemOperand(scratch3));
+ __ Lbu(scratch1, MemOperand(scratch3));
__ Daddu(scratch3, right, index);
- __ lbu(scratch2, MemOperand(scratch3));
+ __ Lbu(scratch2, MemOperand(scratch3));
__ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
__ Daddu(index, index, 1);
__ Branch(&loop, ne, index, Operand(zero_reg));
@@ -1729,7 +1649,7 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ And(at, a2, Operand(kSmiTagMask));
__ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
- __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ Ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Assert(eq, kExpectedAllocationSite, a4, Operand(at));
}
@@ -1748,9 +1668,9 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
if (!Token::IsEqualityOp(op())) {
- __ ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
+ __ Ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
__ AssertSmi(a1);
- __ ld(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
+ __ Ld(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
__ AssertSmi(a0);
}
__ Ret(USE_DELAY_SLOT);
@@ -1806,7 +1726,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
DONT_DO_SMI_CHECK);
__ Dsubu(a2, a0, Operand(kHeapObjectTag));
- __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
__ Branch(&left);
__ bind(&right_smi);
__ SmiUntag(a2, a0); // Can't clobber a0 yet.
@@ -1819,7 +1739,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
DONT_DO_SMI_CHECK);
__ Dsubu(a2, a1, Operand(kHeapObjectTag));
- __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
__ Branch(&done);
__ bind(&left_smi);
__ SmiUntag(a2, a1); // Can't clobber a1 yet.
@@ -1891,10 +1811,10 @@ void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ JumpIfEitherSmi(left, right, &miss);
// Check that both operands are internalized strings.
- __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ __ Ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ Ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ Lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ Lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ Or(tmp1, tmp1, Operand(tmp2));
__ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
@@ -1933,10 +1853,10 @@ void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
// Check that both operands are unique names. This leaves the instance
// types loaded in tmp1 and tmp2.
- __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ __ Ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ Ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ Lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ Lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
__ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
@@ -1981,10 +1901,10 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
// Check that both operands are strings. This leaves the instance
// types loaded in tmp1 and tmp2.
- __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ __ Ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ Ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ Lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ Lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kNotStringTag != 0);
__ Or(tmp3, tmp1, tmp2);
__ And(tmp5, tmp3, Operand(kIsNotStringMask));
@@ -2081,8 +2001,8 @@ void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
__ And(a2, a1, a0);
__ JumpIfSmi(a2, &miss);
__ GetWeakValue(a4, cell);
- __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ Ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
__ Branch(&miss, ne, a2, Operand(a4));
__ Branch(&miss, ne, a3, Operand(a4));
@@ -2114,7 +2034,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ daddiu(sp, sp, -kPointerSize);
__ CallRuntime(Runtime::kCompareIC_Miss, 3, kDontSaveFPRegs,
USE_DELAY_SLOT);
- __ sd(a4, MemOperand(sp)); // In the delay slot.
+ __ Sd(a4, MemOperand(sp)); // In the delay slot.
// Compute the entry point of the rewritten stub.
__ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -2133,9 +2053,9 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
__ daddiu(sp, sp, -kCArgsSlotsSize);
// Place the return address on the stack, making the call
// GC safe. The RegExp backend also relies on this.
- __ sd(ra, MemOperand(sp, kCArgsSlotsSize));
+ __ Sd(ra, MemOperand(sp, kCArgsSlotsSize));
__ Call(t9); // Call the C++ function.
- __ ld(t9, MemOperand(sp, kCArgsSlotsSize));
+ __ Ld(t9, MemOperand(sp, kCArgsSlotsSize));
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
// In case of an error the return address may point to a memory area
@@ -2192,7 +2112,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Register tmp = properties;
__ Dlsa(tmp, properties, index, kPointerSizeLog2);
- __ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+ __ Ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
DCHECK(!tmp.is(entity_name));
__ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
@@ -2208,15 +2128,13 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ Branch(&good, eq, entity_name, Operand(tmp));
// Check if the entry name is not a unique name.
- __ ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ lbu(entity_name,
- FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+ __ Ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+ __ Lbu(entity_name, FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(entity_name, miss);
__ bind(&good);
// Restore the properties.
- __ ld(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
}
const int spill_mask =
@@ -2224,7 +2142,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
a2.bit() | a1.bit() | a0.bit() | v0.bit());
__ MultiPush(spill_mask);
- __ ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ li(a1, Operand(Handle<Name>(name)));
NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
__ CallStub(&stub);
@@ -2258,11 +2176,11 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
- __ ld(mask, FieldMemOperand(dictionary, kCapacityOffset));
+ __ Ld(mask, FieldMemOperand(dictionary, kCapacityOffset));
__ SmiUntag(mask);
__ Dsubu(mask, mask, Operand(1));
- __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+ __ Lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
@@ -2290,7 +2208,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagSize == 1);
__ Dlsa(index, dictionary, index, kPointerSizeLog2);
- __ ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
+ __ Ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
// Having undefined at this place means the name is not contained.
__ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
@@ -2300,9 +2218,8 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
// Check if the entry name is not a unique name.
- __ ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
- __ lbu(entry_key,
- FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+ __ Ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+ __ Lbu(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
}
}
@@ -2384,7 +2301,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
if (remembered_set_action() == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
- __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
+ __ Ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
__ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
regs_.scratch0(),
&dont_need_remembered_set);
@@ -2462,7 +2379,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
__ bind(&on_black);
// Get the value from the slot.
- __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
+ __ Ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
if (mode == INCREMENTAL_COMPACTION) {
Label ensure_not_white;
@@ -2517,7 +2434,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
- __ ld(a1, MemOperand(fp, parameter_count_offset));
+ __ Ld(a1, MemOperand(fp, parameter_count_offset));
if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ Daddu(a1, a1, Operand(1));
}
@@ -2645,7 +2562,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ Branch(&normal_sequence, ne, at, Operand(zero_reg));
}
// look at the first argument
- __ ld(a5, MemOperand(sp, 0));
+ __ Ld(a5, MemOperand(sp, 0));
__ Branch(&normal_sequence, eq, a5, Operand(zero_reg));
if (mode == DISABLE_ALLOCATION_SITES) {
@@ -2668,7 +2585,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ Daddu(a3, a3, Operand(1));
if (FLAG_debug_code) {
- __ ld(a5, FieldMemOperand(a2, 0));
+ __ Ld(a5, FieldMemOperand(a2, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Assert(eq, kExpectedAllocationSite, a5, Operand(at));
}
@@ -2677,10 +2594,9 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field...upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+ __ Ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
__ Daddu(a4, a4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
- __ sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
-
+ __ Sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
@@ -2764,7 +2680,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ SmiTst(a4, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
@@ -2778,7 +2694,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
}
// Enter the context of the Array function.
- __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ __ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
Label subclassing;
__ Branch(&subclassing, ne, a1, Operand(a3));
@@ -2788,7 +2704,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(at));
- __ ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+ __ Ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(a3);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
@@ -2800,7 +2716,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing.
__ bind(&subclassing);
__ Dlsa(at, sp, a0, kPointerSizeLog2);
- __ sd(a1, MemOperand(at));
+ __ Sd(a1, MemOperand(at));
__ li(at, Operand(3));
__ Daddu(a0, a0, at);
__ Push(a3, a2);
@@ -2820,7 +2736,7 @@ void InternalArrayConstructorStub::GenerateCase(
if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array
// look at the first argument.
- __ ld(at, MemOperand(sp, 0));
+ __ Ld(at, MemOperand(sp, 0));
InternalArraySingleArgumentConstructorStub
stub1_holey(isolate(), GetHoleyElementsKind(kind));
@@ -2845,7 +2761,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ SmiTst(a3, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
@@ -2856,11 +2772,11 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
// Figure out the right elements kind.
- __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Load the map's "bit field 2" into a3. We only need the first byte,
// but the following bit field extraction takes care of that anyway.
- __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
+ __ Lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ DecodeField<Map::ElementsKindBits>(a3);
@@ -2910,7 +2826,7 @@ static void CallApiFunctionAndReturn(
Label profiler_disabled;
Label end_profiler_check;
__ li(t9, Operand(ExternalReference::is_profiling_address(isolate)));
- __ lb(t9, MemOperand(t9, 0));
+ __ Lb(t9, MemOperand(t9, 0));
__ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
// Additional parameter is the address of the actual callback.
@@ -2923,11 +2839,11 @@ static void CallApiFunctionAndReturn(
// Allocate HandleScope in callee-save registers.
__ li(s3, Operand(next_address));
- __ ld(s0, MemOperand(s3, kNextOffset));
- __ ld(s1, MemOperand(s3, kLimitOffset));
- __ lw(s2, MemOperand(s3, kLevelOffset));
+ __ Ld(s0, MemOperand(s3, kNextOffset));
+ __ Ld(s1, MemOperand(s3, kLimitOffset));
+ __ Lw(s2, MemOperand(s3, kLevelOffset));
__ Addu(s2, s2, Operand(1));
- __ sw(s2, MemOperand(s3, kLevelOffset));
+ __ Sw(s2, MemOperand(s3, kLevelOffset));
if (FLAG_log_timer_events) {
FrameScope frame(masm, StackFrame::MANUAL);
@@ -2961,19 +2877,19 @@ static void CallApiFunctionAndReturn(
Label return_value_loaded;
// Load value from ReturnValue.
- __ ld(v0, return_value_operand);
+ __ Ld(v0, return_value_operand);
__ bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
- __ sd(s0, MemOperand(s3, kNextOffset));
+ __ Sd(s0, MemOperand(s3, kNextOffset));
if (__ emit_debug_code()) {
- __ lw(a1, MemOperand(s3, kLevelOffset));
+ __ Lw(a1, MemOperand(s3, kLevelOffset));
__ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
}
__ Subu(s2, s2, Operand(1));
- __ sw(s2, MemOperand(s3, kLevelOffset));
- __ ld(at, MemOperand(s3, kLimitOffset));
+ __ Sw(s2, MemOperand(s3, kLevelOffset));
+ __ Ld(at, MemOperand(s3, kLimitOffset));
__ Branch(&delete_allocated_handles, ne, s1, Operand(at));
// Leave the API exit frame.
@@ -2981,11 +2897,11 @@ static void CallApiFunctionAndReturn(
bool restore_context = context_restore_operand != NULL;
if (restore_context) {
- __ ld(cp, *context_restore_operand);
+ __ Ld(cp, *context_restore_operand);
}
if (stack_space_offset != kInvalidStackOffset) {
DCHECK(kCArgsSlotsSize == 0);
- __ ld(s0, MemOperand(sp, stack_space_offset));
+ __ Ld(s0, MemOperand(sp, stack_space_offset));
} else {
__ li(s0, Operand(stack_space));
}
@@ -2995,7 +2911,7 @@ static void CallApiFunctionAndReturn(
// Check if the function scheduled an exception.
__ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
__ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
- __ ld(a5, MemOperand(at));
+ __ Ld(a5, MemOperand(at));
__ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
__ Ret();
@@ -3006,7 +2922,7 @@ static void CallApiFunctionAndReturn(
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
- __ sd(s1, MemOperand(s3, kLimitOffset));
+ __ Sd(s1, MemOperand(s3, kLimitOffset));
__ mov(s0, v0);
__ mov(a0, v0);
__ PrepareCallCFunction(1, s1);
@@ -3056,13 +2972,11 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
__ Push(context, callee, call_data);
if (!is_lazy()) {
// Load context from callee.
- __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ __ Ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
}
Register scratch = call_data;
- if (!call_data_undefined()) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- }
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
// Push return value and default return value.
__ Push(scratch, scratch);
__ li(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
@@ -3084,16 +2998,16 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// Arguments is after the return address.
__ Daddu(a0, sp, Operand(1 * kPointerSize));
// FunctionCallbackInfo::implicit_args_
- __ sd(scratch, MemOperand(a0, 0 * kPointerSize));
+ __ Sd(scratch, MemOperand(a0, 0 * kPointerSize));
// FunctionCallbackInfo::values_
__ Daddu(at, scratch,
Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
- __ sd(at, MemOperand(a0, 1 * kPointerSize));
+ __ Sd(at, MemOperand(a0, 1 * kPointerSize));
// FunctionCallbackInfo::length_ = argc
// Stored as int field, 32-bit integers within struct on stack always left
// justified by n64 ABI.
__ li(at, Operand(argc()));
- __ sw(at, MemOperand(a0, 2 * kPointerSize));
+ __ Sw(at, MemOperand(a0, 2 * kPointerSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
@@ -3143,22 +3057,22 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// Here and below +1 is for name() pushed after the args_ array.
typedef PropertyCallbackArguments PCA;
__ Dsubu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
- __ sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
- __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
- __ sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
+ __ Sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
+ __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ Sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- __ sd(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
- __ sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
+ __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
+ __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
kPointerSize));
__ li(scratch, Operand(ExternalReference::isolate_address(isolate())));
- __ sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
- __ sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
+ __ Sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
+ __ Sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
// should_throw_on_error -> false
DCHECK(Smi::kZero == nullptr);
- __ sd(zero_reg,
+ __ Sd(zero_reg,
MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
- __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
- __ sd(scratch, MemOperand(sp, 0 * kPointerSize));
+ __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ Sd(scratch, MemOperand(sp, 0 * kPointerSize));
// v8::PropertyCallbackInfo::args_ array and name handle.
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -3173,15 +3087,15 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
- __ sd(a1, MemOperand(sp, 1 * kPointerSize));
+ __ Sd(a1, MemOperand(sp, 1 * kPointerSize));
__ Daddu(a1, sp, Operand(1 * kPointerSize));
// a1 = v8::PropertyCallbackInfo&
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
- __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
- __ ld(api_function_address,
+ __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ Ld(api_function_address,
FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
// +3 is to skip prolog, return address and name handle.
diff --git a/deps/v8/src/mips64/codegen-mips64.cc b/deps/v8/src/mips64/codegen-mips64.cc
index 4508760a8a..6bd0b7a7d9 100644
--- a/deps/v8/src/mips64/codegen-mips64.cc
+++ b/deps/v8/src/mips64/codegen-mips64.cc
@@ -126,53 +126,53 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
}
__ bind(&loop16w);
- __ lw(a4, MemOperand(a1));
+ __ Lw(a4, MemOperand(a1));
if (pref_hint_store == kPrefHintPrepareForStore) {
__ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch.
__ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
}
- __ lw(a5, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
+ __ Lw(a5, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
__ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
__ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
__ bind(&skip_pref);
- __ lw(a6, MemOperand(a1, 2, loadstore_chunk));
- __ lw(a7, MemOperand(a1, 3, loadstore_chunk));
- __ lw(t0, MemOperand(a1, 4, loadstore_chunk));
- __ lw(t1, MemOperand(a1, 5, loadstore_chunk));
- __ lw(t2, MemOperand(a1, 6, loadstore_chunk));
- __ lw(t3, MemOperand(a1, 7, loadstore_chunk));
+ __ Lw(a6, MemOperand(a1, 2, loadstore_chunk));
+ __ Lw(a7, MemOperand(a1, 3, loadstore_chunk));
+ __ Lw(t0, MemOperand(a1, 4, loadstore_chunk));
+ __ Lw(t1, MemOperand(a1, 5, loadstore_chunk));
+ __ Lw(t2, MemOperand(a1, 6, loadstore_chunk));
+ __ Lw(t3, MemOperand(a1, 7, loadstore_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
- __ sw(a4, MemOperand(a0));
- __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
- __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
- __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
- __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
- __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
- __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
- __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
-
- __ lw(a4, MemOperand(a1, 8, loadstore_chunk));
- __ lw(a5, MemOperand(a1, 9, loadstore_chunk));
- __ lw(a6, MemOperand(a1, 10, loadstore_chunk));
- __ lw(a7, MemOperand(a1, 11, loadstore_chunk));
- __ lw(t0, MemOperand(a1, 12, loadstore_chunk));
- __ lw(t1, MemOperand(a1, 13, loadstore_chunk));
- __ lw(t2, MemOperand(a1, 14, loadstore_chunk));
- __ lw(t3, MemOperand(a1, 15, loadstore_chunk));
+ __ Sw(a4, MemOperand(a0));
+ __ Sw(a5, MemOperand(a0, 1, loadstore_chunk));
+ __ Sw(a6, MemOperand(a0, 2, loadstore_chunk));
+ __ Sw(a7, MemOperand(a0, 3, loadstore_chunk));
+ __ Sw(t0, MemOperand(a0, 4, loadstore_chunk));
+ __ Sw(t1, MemOperand(a0, 5, loadstore_chunk));
+ __ Sw(t2, MemOperand(a0, 6, loadstore_chunk));
+ __ Sw(t3, MemOperand(a0, 7, loadstore_chunk));
+
+ __ Lw(a4, MemOperand(a1, 8, loadstore_chunk));
+ __ Lw(a5, MemOperand(a1, 9, loadstore_chunk));
+ __ Lw(a6, MemOperand(a1, 10, loadstore_chunk));
+ __ Lw(a7, MemOperand(a1, 11, loadstore_chunk));
+ __ Lw(t0, MemOperand(a1, 12, loadstore_chunk));
+ __ Lw(t1, MemOperand(a1, 13, loadstore_chunk));
+ __ Lw(t2, MemOperand(a1, 14, loadstore_chunk));
+ __ Lw(t3, MemOperand(a1, 15, loadstore_chunk));
__ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
- __ sw(a4, MemOperand(a0, 8, loadstore_chunk));
- __ sw(a5, MemOperand(a0, 9, loadstore_chunk));
- __ sw(a6, MemOperand(a0, 10, loadstore_chunk));
- __ sw(a7, MemOperand(a0, 11, loadstore_chunk));
- __ sw(t0, MemOperand(a0, 12, loadstore_chunk));
- __ sw(t1, MemOperand(a0, 13, loadstore_chunk));
- __ sw(t2, MemOperand(a0, 14, loadstore_chunk));
- __ sw(t3, MemOperand(a0, 15, loadstore_chunk));
+ __ Sw(a4, MemOperand(a0, 8, loadstore_chunk));
+ __ Sw(a5, MemOperand(a0, 9, loadstore_chunk));
+ __ Sw(a6, MemOperand(a0, 10, loadstore_chunk));
+ __ Sw(a7, MemOperand(a0, 11, loadstore_chunk));
+ __ Sw(t0, MemOperand(a0, 12, loadstore_chunk));
+ __ Sw(t1, MemOperand(a0, 13, loadstore_chunk));
+ __ Sw(t2, MemOperand(a0, 14, loadstore_chunk));
+ __ Sw(t3, MemOperand(a0, 15, loadstore_chunk));
__ addiu(a0, a0, 16 * loadstore_chunk);
__ bne(a0, a3, &loop16w);
__ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
@@ -186,23 +186,23 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ andi(t8, a2, 0x1f);
__ beq(a2, t8, &chk1w); // Less than 32?
__ nop(); // In delay slot.
- __ lw(a4, MemOperand(a1));
- __ lw(a5, MemOperand(a1, 1, loadstore_chunk));
- __ lw(a6, MemOperand(a1, 2, loadstore_chunk));
- __ lw(a7, MemOperand(a1, 3, loadstore_chunk));
- __ lw(t0, MemOperand(a1, 4, loadstore_chunk));
- __ lw(t1, MemOperand(a1, 5, loadstore_chunk));
- __ lw(t2, MemOperand(a1, 6, loadstore_chunk));
- __ lw(t3, MemOperand(a1, 7, loadstore_chunk));
+ __ Lw(a4, MemOperand(a1));
+ __ Lw(a5, MemOperand(a1, 1, loadstore_chunk));
+ __ Lw(a6, MemOperand(a1, 2, loadstore_chunk));
+ __ Lw(a7, MemOperand(a1, 3, loadstore_chunk));
+ __ Lw(t0, MemOperand(a1, 4, loadstore_chunk));
+ __ Lw(t1, MemOperand(a1, 5, loadstore_chunk));
+ __ Lw(t2, MemOperand(a1, 6, loadstore_chunk));
+ __ Lw(t3, MemOperand(a1, 7, loadstore_chunk));
__ addiu(a1, a1, 8 * loadstore_chunk);
- __ sw(a4, MemOperand(a0));
- __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
- __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
- __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
- __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
- __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
- __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
- __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
+ __ Sw(a4, MemOperand(a0));
+ __ Sw(a5, MemOperand(a0, 1, loadstore_chunk));
+ __ Sw(a6, MemOperand(a0, 2, loadstore_chunk));
+ __ Sw(a7, MemOperand(a0, 3, loadstore_chunk));
+ __ Sw(t0, MemOperand(a0, 4, loadstore_chunk));
+ __ Sw(t1, MemOperand(a0, 5, loadstore_chunk));
+ __ Sw(t2, MemOperand(a0, 6, loadstore_chunk));
+ __ Sw(t3, MemOperand(a0, 7, loadstore_chunk));
__ addiu(a0, a0, 8 * loadstore_chunk);
// Here we have less than 32 bytes to copy. Set up for a loop to copy
@@ -217,22 +217,22 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ addu(a3, a0, a3);
__ bind(&wordCopy_loop);
- __ lw(a7, MemOperand(a1));
+ __ Lw(a7, MemOperand(a1));
__ addiu(a0, a0, loadstore_chunk);
__ addiu(a1, a1, loadstore_chunk);
__ bne(a0, a3, &wordCopy_loop);
- __ sw(a7, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
+ __ Sw(a7, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
__ bind(&lastb);
__ Branch(&leave, le, a2, Operand(zero_reg));
__ addu(a3, a0, a2);
__ bind(&lastbloop);
- __ lb(v1, MemOperand(a1));
+ __ Lb(v1, MemOperand(a1));
__ addiu(a0, a0, 1);
__ addiu(a1, a1, 1);
__ bne(a0, a3, &lastbloop);
- __ sb(v1, MemOperand(a0, -1)); // In delay slot.
+ __ Sb(v1, MemOperand(a0, -1)); // In delay slot.
__ bind(&leave);
__ jr(ra);
@@ -362,14 +362,14 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
}
__ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
- __ sw(a4, MemOperand(a0));
- __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
- __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
- __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
- __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
- __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
- __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
- __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
+ __ Sw(a4, MemOperand(a0));
+ __ Sw(a5, MemOperand(a0, 1, loadstore_chunk));
+ __ Sw(a6, MemOperand(a0, 2, loadstore_chunk));
+ __ Sw(a7, MemOperand(a0, 3, loadstore_chunk));
+ __ Sw(t0, MemOperand(a0, 4, loadstore_chunk));
+ __ Sw(t1, MemOperand(a0, 5, loadstore_chunk));
+ __ Sw(t2, MemOperand(a0, 6, loadstore_chunk));
+ __ Sw(t3, MemOperand(a0, 7, loadstore_chunk));
if (kArchEndian == kLittle) {
__ lwr(a4, MemOperand(a1, 8, loadstore_chunk));
__ lwr(a5, MemOperand(a1, 9, loadstore_chunk));
@@ -422,14 +422,14 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
}
__ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
- __ sw(a4, MemOperand(a0, 8, loadstore_chunk));
- __ sw(a5, MemOperand(a0, 9, loadstore_chunk));
- __ sw(a6, MemOperand(a0, 10, loadstore_chunk));
- __ sw(a7, MemOperand(a0, 11, loadstore_chunk));
- __ sw(t0, MemOperand(a0, 12, loadstore_chunk));
- __ sw(t1, MemOperand(a0, 13, loadstore_chunk));
- __ sw(t2, MemOperand(a0, 14, loadstore_chunk));
- __ sw(t3, MemOperand(a0, 15, loadstore_chunk));
+ __ Sw(a4, MemOperand(a0, 8, loadstore_chunk));
+ __ Sw(a5, MemOperand(a0, 9, loadstore_chunk));
+ __ Sw(a6, MemOperand(a0, 10, loadstore_chunk));
+ __ Sw(a7, MemOperand(a0, 11, loadstore_chunk));
+ __ Sw(t0, MemOperand(a0, 12, loadstore_chunk));
+ __ Sw(t1, MemOperand(a0, 13, loadstore_chunk));
+ __ Sw(t2, MemOperand(a0, 14, loadstore_chunk));
+ __ Sw(t3, MemOperand(a0, 15, loadstore_chunk));
__ addiu(a0, a0, 16 * loadstore_chunk);
__ bne(a0, a3, &ua_loop16w);
__ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
@@ -496,14 +496,14 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
}
__ addiu(a1, a1, 8 * loadstore_chunk);
- __ sw(a4, MemOperand(a0));
- __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
- __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
- __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
- __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
- __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
- __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
- __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
+ __ Sw(a4, MemOperand(a0));
+ __ Sw(a5, MemOperand(a0, 1, loadstore_chunk));
+ __ Sw(a6, MemOperand(a0, 2, loadstore_chunk));
+ __ Sw(a7, MemOperand(a0, 3, loadstore_chunk));
+ __ Sw(t0, MemOperand(a0, 4, loadstore_chunk));
+ __ Sw(t1, MemOperand(a0, 5, loadstore_chunk));
+ __ Sw(t2, MemOperand(a0, 6, loadstore_chunk));
+ __ Sw(t3, MemOperand(a0, 7, loadstore_chunk));
__ addiu(a0, a0, 8 * loadstore_chunk);
// Less than 32 bytes to copy. Set up for a loop to
@@ -527,7 +527,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ addiu(a0, a0, loadstore_chunk);
__ addiu(a1, a1, loadstore_chunk);
__ bne(a0, a3, &ua_wordCopy_loop);
- __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
+ __ Sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
// Copy the last 8 bytes.
__ bind(&ua_smallCopy);
@@ -535,11 +535,11 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ addu(a3, a0, a2); // In delay slot.
__ bind(&ua_smallCopy_loop);
- __ lb(v1, MemOperand(a1));
+ __ Lb(v1, MemOperand(a1));
__ addiu(a0, a0, 1);
__ addiu(a1, a1, 1);
__ bne(a0, a3, &ua_smallCopy_loop);
- __ sb(v1, MemOperand(a0, -1)); // In delay slot.
+ __ Sb(v1, MemOperand(a0, -1)); // In delay slot.
__ jr(ra);
__ nop();
@@ -616,8 +616,8 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&indirect_string_loaded);
// Fetch the instance type of the receiver into result register.
- __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+ __ Ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
// We need special handling for indirect strings.
Label check_sequential;
@@ -631,15 +631,15 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Branch(&thin_string, eq, at, Operand(kThinStringTag));
// Handle slices.
- __ ld(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
- __ ld(string, FieldMemOperand(string, SlicedString::kParentOffset));
+ __ Ld(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+ __ Ld(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ dsra32(at, result, 0);
__ Daddu(index, index, at);
__ jmp(&indirect_string_loaded);
// Handle thin strings.
__ bind(&thin_string);
- __ ld(string, FieldMemOperand(string, ThinString::kActualOffset));
+ __ Ld(string, FieldMemOperand(string, ThinString::kActualOffset));
__ jmp(&indirect_string_loaded);
// Handle cons strings.
@@ -648,11 +648,11 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// the case we would rather go to the runtime system now to flatten
// the string.
__ bind(&cons_string);
- __ ld(result, FieldMemOperand(string, ConsString::kSecondOffset));
+ __ Ld(result, FieldMemOperand(string, ConsString::kSecondOffset));
__ LoadRoot(at, Heap::kempty_stringRootIndex);
__ Branch(call_runtime, ne, result, Operand(at));
// Get the first of the two strings and load its instance type.
- __ ld(string, FieldMemOperand(string, ConsString::kFirstOffset));
+ __ Ld(string, FieldMemOperand(string, ConsString::kFirstOffset));
__ jmp(&indirect_string_loaded);
// Distinguish sequential and external strings. Only these two string
@@ -684,7 +684,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
STATIC_ASSERT(kShortExternalStringTag != 0);
__ And(at, result, Operand(kShortExternalStringMask));
__ Branch(call_runtime, ne, at, Operand(zero_reg));
- __ ld(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
+ __ Ld(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
Label one_byte, done;
__ bind(&check_encoding);
@@ -693,12 +693,12 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Branch(&one_byte, ne, at, Operand(zero_reg));
// Two-byte string.
__ Dlsa(at, string, index, 1);
- __ lhu(result, MemOperand(at));
+ __ Lhu(result, MemOperand(at));
__ jmp(&done);
__ bind(&one_byte);
// One_byte string.
__ Daddu(at, string, index);
- __ lbu(result, MemOperand(at));
+ __ Lbu(result, MemOperand(at));
__ bind(&done);
}
diff --git a/deps/v8/src/mips64/constants-mips64.h b/deps/v8/src/mips64/constants-mips64.h
index a12acca06d..eb9fe4573d 100644
--- a/deps/v8/src/mips64/constants-mips64.h
+++ b/deps/v8/src/mips64/constants-mips64.h
@@ -1179,9 +1179,9 @@ inline Hint NegateHint(Hint hint) {
extern const Instr kPopInstruction;
// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
extern const Instr kPushInstruction;
-// sw(r, MemOperand(sp, 0))
+// Sw(r, MemOperand(sp, 0))
extern const Instr kPushRegPattern;
-// lw(r, MemOperand(sp, 0))
+// Lw(r, MemOperand(sp, 0))
extern const Instr kPopRegPattern;
extern const Instr kLwRegFpOffsetPattern;
extern const Instr kSwRegFpOffsetPattern;
@@ -1684,6 +1684,8 @@ const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize * 2;
const int kInvalidStackOffset = -1;
const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
+static const int kNegOffset = 0x00008000;
+
InstructionBase::Type InstructionBase::InstructionType() const {
switch (OpcodeFieldRaw()) {
case SPECIAL:
@@ -1706,6 +1708,8 @@ InstructionBase::Type InstructionBase::InstructionType() const {
switch (FunctionFieldRaw()) {
case INS:
case DINS:
+ case DINSM:
+ case DINSU:
case EXT:
case DEXT:
case DEXTM:
diff --git a/deps/v8/src/mips64/deoptimizer-mips64.cc b/deps/v8/src/mips64/deoptimizer-mips64.cc
index 7243e8e9e7..804a176bce 100644
--- a/deps/v8/src/mips64/deoptimizer-mips64.cc
+++ b/deps/v8/src/mips64/deoptimizer-mips64.cc
@@ -30,25 +30,22 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- if (FLAG_zap_code_space) {
- // Fail hard and early if we enter this code object again.
- byte* pointer = code->FindCodeAgeSequence();
- if (pointer != NULL) {
- pointer += kNoCodeAgeSequenceLength;
- } else {
- pointer = code->instruction_start();
- }
- CodePatcher patcher(isolate, pointer, 1);
- patcher.masm()->break_(0xCC);
-
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- int osr_offset = data->OsrPcOffset()->value();
- if (osr_offset > 0) {
- CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
- 1);
- osr_patcher.masm()->break_(0xCC);
- }
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(isolate, pointer, 1);
+ patcher.masm()->break_(0xCC);
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(isolate, code_start_address + osr_offset, 1);
+ osr_patcher.masm()->break_(0xCC);
}
DeoptimizationInputData* deopt_data =
@@ -123,7 +120,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
int offset = code * kDoubleSize;
- __ sdc1(fpu_reg, MemOperand(sp, offset));
+ __ Sdc1(fpu_reg, MemOperand(sp, offset));
}
// Save all float FPU registers before messing with them.
@@ -132,7 +129,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int code = config->GetAllocatableFloatCode(i);
const FloatRegister fpu_reg = FloatRegister::from_code(code);
int offset = code * kFloatSize;
- __ swc1(fpu_reg, MemOperand(sp, offset));
+ __ Swc1(fpu_reg, MemOperand(sp, offset));
}
// Push saved_regs (needed to populate FrameDescription::registers_).
@@ -140,18 +137,18 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Dsubu(sp, sp, kNumberOfRegisters * kPointerSize);
for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
if ((saved_regs & (1 << i)) != 0) {
- __ sd(ToRegister(i), MemOperand(sp, kPointerSize * i));
+ __ Sd(ToRegister(i), MemOperand(sp, kPointerSize * i));
}
}
__ li(a2, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
- __ sd(fp, MemOperand(a2));
+ __ Sd(fp, MemOperand(a2));
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
// Get the bailout id from the stack.
- __ ld(a2, MemOperand(sp, kSavedRegistersAreaSize));
+ __ Ld(a2, MemOperand(sp, kSavedRegistersAreaSize));
// Get the address of the location in the code object (a3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
@@ -167,9 +164,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Pass six arguments, according to n64 ABI.
__ mov(a0, zero_reg);
Label context_check;
- __ ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ Ld(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(a1, &context_check);
- __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ li(a1, Operand(type())); // Bailout type.
// a2: bailout id already loaded.
@@ -187,18 +184,18 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// frame descriptor pointer to a1 (deoptimizer->input_);
// Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
__ mov(a0, v0);
- __ ld(a1, MemOperand(v0, Deoptimizer::input_offset()));
+ __ Ld(a1, MemOperand(v0, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
DCHECK(Register::kNumRegisters == kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
if ((saved_regs & (1 << i)) != 0) {
- __ ld(a2, MemOperand(sp, i * kPointerSize));
- __ sd(a2, MemOperand(a1, offset));
+ __ Ld(a2, MemOperand(sp, i * kPointerSize));
+ __ Sd(a2, MemOperand(a1, offset));
} else if (FLAG_debug_code) {
__ li(a2, kDebugZapValue);
- __ sd(a2, MemOperand(a1, offset));
+ __ Sd(a2, MemOperand(a1, offset));
}
}
@@ -210,8 +207,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int dst_offset = code * kDoubleSize + double_regs_offset;
int src_offset =
code * kDoubleSize + kNumberOfRegisters * kPointerSize + kFloatRegsSize;
- __ ldc1(f0, MemOperand(sp, src_offset));
- __ sdc1(f0, MemOperand(a1, dst_offset));
+ __ Ldc1(f0, MemOperand(sp, src_offset));
+ __ Sdc1(f0, MemOperand(a1, dst_offset));
}
int float_regs_offset = FrameDescription::float_registers_offset();
@@ -221,8 +218,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
int code = config->GetAllocatableFloatCode(i);
int dst_offset = code * kFloatSize + float_regs_offset;
int src_offset = code * kFloatSize + kNumberOfRegisters * kPointerSize;
- __ lwc1(f0, MemOperand(sp, src_offset));
- __ swc1(f0, MemOperand(a1, dst_offset));
+ __ Lwc1(f0, MemOperand(sp, src_offset));
+ __ Swc1(f0, MemOperand(a1, dst_offset));
}
// Remove the bailout id and the saved registers from the stack.
@@ -230,7 +227,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Compute a pointer to the unwinding limit in register a2; that is
// the first stack slot not part of the input frame.
- __ ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
+ __ Ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
__ Daddu(a2, a2, sp);
// Unwind the stack down to - but not including - the unwinding
@@ -242,7 +239,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ BranchShort(&pop_loop_header);
__ bind(&pop_loop);
__ pop(a4);
- __ sd(a4, MemOperand(a3, 0));
+ __ Sd(a4, MemOperand(a3, 0));
__ daddiu(a3, a3, sizeof(uint64_t));
__ bind(&pop_loop_header);
__ BranchShort(&pop_loop, ne, a2, Operand(sp));
@@ -258,26 +255,26 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
__ pop(a0); // Restore deoptimizer object (class Deoptimizer).
- __ ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
+ __ Ld(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
// Replace the current (input) frame with the output frames.
Label outer_push_loop, inner_push_loop,
outer_loop_header, inner_loop_header;
// Outer loop state: a4 = current "FrameDescription** output_",
// a1 = one past the last FrameDescription**.
- __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
- __ ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
+ __ Lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
+ __ Ld(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
__ Dlsa(a1, a4, a1, kPointerSizeLog2);
__ BranchShort(&outer_loop_header);
__ bind(&outer_push_loop);
// Inner loop state: a2 = current FrameDescription*, a3 = loop index.
- __ ld(a2, MemOperand(a4, 0)); // output_[ix]
- __ ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
+ __ Ld(a2, MemOperand(a4, 0)); // output_[ix]
+ __ Ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
__ BranchShort(&inner_loop_header);
__ bind(&inner_push_loop);
__ Dsubu(a3, a3, Operand(sizeof(uint64_t)));
__ Daddu(a6, a2, Operand(a3));
- __ ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
+ __ Ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
__ push(a7);
__ bind(&inner_loop_header);
__ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
@@ -286,21 +283,21 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ bind(&outer_loop_header);
__ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
- __ ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
+ __ Ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
int src_offset = code * kDoubleSize + double_regs_offset;
- __ ldc1(fpu_reg, MemOperand(a1, src_offset));
+ __ Ldc1(fpu_reg, MemOperand(a1, src_offset));
}
// Push state, pc, and continuation from the last output frame.
- __ ld(a6, MemOperand(a2, FrameDescription::state_offset()));
+ __ Ld(a6, MemOperand(a2, FrameDescription::state_offset()));
__ push(a6);
- __ ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
+ __ Ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
__ push(a6);
- __ ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
+ __ Ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
__ push(a6);
@@ -312,7 +309,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
if ((restored_regs & (1 << i)) != 0) {
- __ ld(ToRegister(i), MemOperand(at, offset));
+ __ Ld(ToRegister(i), MemOperand(at, offset));
}
}
@@ -326,14 +323,14 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Maximum size of a table entry generated below.
-const int Deoptimizer::table_entry_size_ = 2 * Assembler::kInstrSize;
+const int Deoptimizer::table_entry_size_ = 3 * Assembler::kInstrSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
// Create a sequence of deoptimization entries.
// Note that registers are still live when jumping to an entry.
- Label table_start, done, done_special, trampoline_jump;
+ Label table_start, done, trampoline_jump;
__ bind(&table_start);
int kMaxEntriesBranchReach =
(1 << (kImm16Bits - 2)) / (table_entry_size_ / Assembler::kInstrSize);
@@ -346,6 +343,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
DCHECK(is_int16(i));
__ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
__ li(at, i); // In the delay slot.
+ __ nop();
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
}
@@ -356,34 +354,29 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
__ Push(at);
} else {
// Uncommon case, the branch cannot reach.
- // Create mini trampoline and adjust id constants to get proper value at
- // the end of table.
- for (int i = kMaxEntriesBranchReach; i > 1; i--) {
+ // Create mini trampoline to reach the end of the table
+ for (int i = 0, j = 0; i < count(); i++, j++) {
Label start;
__ bind(&start);
DCHECK(is_int16(i));
- __ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
- __ li(at, -i); // In the delay slot.
+ if (j >= kMaxEntriesBranchReach) {
+ j = 0;
+ __ li(at, i);
+ __ bind(&trampoline_jump);
+ trampoline_jump = Label();
+ __ BranchShort(USE_DELAY_SLOT, &trampoline_jump);
+ __ nop();
+ } else {
+ __ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
+ __ li(at, i); // In the delay slot.
+ __ nop();
+ }
DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
}
- // Entry with id == kMaxEntriesBranchReach - 1.
- __ bind(&trampoline_jump);
- __ BranchShort(USE_DELAY_SLOT, &done_special);
- __ li(at, -1);
-
- for (int i = kMaxEntriesBranchReach; i < count(); i++) {
- Label start;
- __ bind(&start);
- DCHECK(is_int16(i));
- __ Branch(USE_DELAY_SLOT, &done); // Expose delay slot.
- __ li(at, i); // In the delay slot.
- }
DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
count() * table_entry_size_);
- __ bind(&done_special);
- __ daddiu(at, at, kMaxEntriesBranchReach);
- __ bind(&done);
+ __ bind(&trampoline_jump);
__ Push(at);
}
}
diff --git a/deps/v8/src/mips64/disasm-mips64.cc b/deps/v8/src/mips64/disasm-mips64.cc
index a6911daa86..2ebd0ead13 100644
--- a/deps/v8/src/mips64/disasm-mips64.cc
+++ b/deps/v8/src/mips64/disasm-mips64.cc
@@ -92,6 +92,9 @@ class Decoder {
void PrintSd(Instruction* instr);
void PrintSs1(Instruction* instr);
void PrintSs2(Instruction* instr);
+ void PrintSs3(Instruction* instr);
+ void PrintSs4(Instruction* instr);
+ void PrintSs5(Instruction* instr);
void PrintBc(Instruction* instr);
void PrintCc(Instruction* instr);
void PrintFunction(Instruction* instr);
@@ -289,20 +292,41 @@ void Decoder::PrintSd(Instruction* instr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd);
}
-
-// Print the integer value of the rd field, when used as 'ext' size.
+// Print the integer value of ext/dext/dextu size from the msbd field.
void Decoder::PrintSs1(Instruction* instr) {
- int ss = instr->RdValue();
- out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss + 1);
+ int msbd = instr->RdValue();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", msbd + 1);
}
-
-// Print the integer value of the rd field, when used as 'ins' size.
+// Print the integer value of ins/dins/dinsu size from the msb and lsb fields
+// (for dinsu it is msbminus32 and lsbminus32 fields).
void Decoder::PrintSs2(Instruction* instr) {
- int ss = instr->RdValue();
- int pos = instr->SaValue();
+ int msb = instr->RdValue();
+ int lsb = instr->SaValue();
out_buffer_pos_ +=
- SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss - pos + 1);
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%d", msb - lsb + 1);
+}
+
+// Print the integer value of dextm size from the msbdminus32 field.
+void Decoder::PrintSs3(Instruction* instr) {
+ int msbdminus32 = instr->RdValue();
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%d", msbdminus32 + 32 + 1);
+}
+
+// Print the integer value of dinsm size from the msbminus32 and lsb fields.
+void Decoder::PrintSs4(Instruction* instr) {
+ int msbminus32 = instr->RdValue();
+ int lsb = instr->SaValue();
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%d", msbminus32 + 32 - lsb + 1);
+}
+
+// Print the integer value of dextu/dinsu pos from the lsbminus32 field.
+void Decoder::PrintSs5(Instruction* instr) {
+ int lsbminus32 = instr->SaValue();
+ out_buffer_pos_ +=
+ SNPrintF(out_buffer_ + out_buffer_pos_, "%d", lsbminus32 + 32);
}
@@ -954,14 +978,22 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
case 's': {
if (format[2] == '1') {
- DCHECK(STRING_STARTS_WITH(format, "ss1")); /* ext size */
- PrintSs1(instr);
- return 3;
+ DCHECK(STRING_STARTS_WITH(format, "ss1")); // ext, dext, dextu size
+ PrintSs1(instr);
+ } else if (format[2] == '2') {
+ DCHECK(STRING_STARTS_WITH(format, "ss2")); // ins, dins, dinsu size
+ PrintSs2(instr);
+ } else if (format[2] == '3') {
+ DCHECK(STRING_STARTS_WITH(format, "ss3")); // dextm size
+ PrintSs3(instr);
+ } else if (format[2] == '4') {
+ DCHECK(STRING_STARTS_WITH(format, "ss4")); // dinsm size
+ PrintSs4(instr);
} else {
- DCHECK(STRING_STARTS_WITH(format, "ss2")); /* ins size */
- PrintSs2(instr);
- return 3;
+ DCHECK(STRING_STARTS_WITH(format, "ss5")); // dextu, dinsu pos
+ PrintSs5(instr);
}
+ return 3;
}
}
}
@@ -1694,10 +1726,6 @@ void Decoder::DecodeTypeRegisterSPECIAL2(Instruction* instr) {
void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
switch (instr->FunctionFieldRaw()) {
- case INS: {
- Format(instr, "ins 'rt, 'rs, 'sa, 'ss2");
- break;
- }
case EXT: {
Format(instr, "ext 'rt, 'rs, 'sa, 'ss1");
break;
@@ -1707,11 +1735,27 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
break;
}
case DEXTM: {
- Format(instr, "dextm 'rt, 'rs, 'sa, 'ss1");
+ Format(instr, "dextm 'rt, 'rs, 'sa, 'ss3");
break;
}
case DEXTU: {
- Format(instr, "dextu 'rt, 'rs, 'sa, 'ss1");
+ Format(instr, "dextu 'rt, 'rs, 'ss5, 'ss1");
+ break;
+ }
+ case INS: {
+ Format(instr, "ins 'rt, 'rs, 'sa, 'ss2");
+ break;
+ }
+ case DINS: {
+ Format(instr, "dins 'rt, 'rs, 'sa, 'ss2");
+ break;
+ }
+ case DINSM: {
+ Format(instr, "dinsm 'rt, 'rs, 'sa, 'ss4");
+ break;
+ }
+ case DINSU: {
+ Format(instr, "dinsu 'rt, 'rs, 'ss5, 'ss2");
break;
}
case BSHFL: {
@@ -1749,10 +1793,6 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
}
break;
}
- case DINS: {
- Format(instr, "dins 'rt, 'rs, 'sa, 'ss2");
- break;
- }
case DBSHFL: {
int sa = instr->SaFieldRaw() >> kSaShift;
switch (sa) {
diff --git a/deps/v8/src/mips64/interface-descriptors-mips64.cc b/deps/v8/src/mips64/interface-descriptors-mips64.cc
index 8deb518c3b..73889d2d34 100644
--- a/deps/v8/src/mips64/interface-descriptors-mips64.cc
+++ b/deps/v8/src/mips64/interface-descriptors-mips64.cc
@@ -56,11 +56,6 @@ const Register MathPowIntegerDescriptor::exponent() {
return MathPowTaggedDescriptor::exponent();
}
-const Register RegExpExecDescriptor::StringRegister() { return a0; }
-const Register RegExpExecDescriptor::LastIndexRegister() { return a1; }
-const Register RegExpExecDescriptor::StringStartRegister() { return a2; }
-const Register RegExpExecDescriptor::StringEndRegister() { return a3; }
-const Register RegExpExecDescriptor::CodeRegister() { return t0; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
@@ -161,8 +156,19 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// a1: the target to call
+ // a0: number of arguments
+ // a2: start index (to support rest parameters)
+ Register registers[] = {a1, a0, a2};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // a1: the target to call
+ // a3: new target
+ // a0: number of arguments
// a2: start index (to support rest parameters)
- Register registers[] = {a1, a2};
+ Register registers[] = {a1, a3, a0, a2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.cc b/deps/v8/src/mips64/macro-assembler-mips64.cc
index ca0f0c1a0c..84a55d46e6 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/mips64/macro-assembler-mips64.cc
@@ -47,17 +47,17 @@ void MacroAssembler::Load(Register dst,
Representation r) {
DCHECK(!r.IsDouble());
if (r.IsInteger8()) {
- lb(dst, src);
+ Lb(dst, src);
} else if (r.IsUInteger8()) {
- lbu(dst, src);
+ Lbu(dst, src);
} else if (r.IsInteger16()) {
- lh(dst, src);
+ Lh(dst, src);
} else if (r.IsUInteger16()) {
- lhu(dst, src);
+ Lhu(dst, src);
} else if (r.IsInteger32()) {
- lw(dst, src);
+ Lw(dst, src);
} else {
- ld(dst, src);
+ Ld(dst, src);
}
}
@@ -67,25 +67,25 @@ void MacroAssembler::Store(Register src,
Representation r) {
DCHECK(!r.IsDouble());
if (r.IsInteger8() || r.IsUInteger8()) {
- sb(src, dst);
+ Sb(src, dst);
} else if (r.IsInteger16() || r.IsUInteger16()) {
- sh(src, dst);
+ Sh(src, dst);
} else if (r.IsInteger32()) {
- sw(src, dst);
+ Sw(src, dst);
} else {
if (r.IsHeapObject()) {
AssertNotSmi(src);
} else if (r.IsSmi()) {
AssertSmi(src);
}
- sd(src, dst);
+ Sd(src, dst);
}
}
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index) {
- ld(destination, MemOperand(s6, index << kPointerSizeLog2));
+ Ld(destination, MemOperand(s6, index << kPointerSizeLog2));
}
@@ -94,14 +94,14 @@ void MacroAssembler::LoadRoot(Register destination,
Condition cond,
Register src1, const Operand& src2) {
Branch(2, NegateCondition(cond), src1, src2);
- ld(destination, MemOperand(s6, index << kPointerSizeLog2));
+ Ld(destination, MemOperand(s6, index << kPointerSizeLog2));
}
void MacroAssembler::StoreRoot(Register source,
Heap::RootListIndex index) {
DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
- sd(source, MemOperand(s6, index << kPointerSizeLog2));
+ Sd(source, MemOperand(s6, index << kPointerSizeLog2));
}
@@ -111,7 +111,7 @@ void MacroAssembler::StoreRoot(Register source,
Register src1, const Operand& src2) {
DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
Branch(2, NegateCondition(cond), src1, src2);
- sd(source, MemOperand(s6, index << kPointerSizeLog2));
+ Sd(source, MemOperand(s6, index << kPointerSizeLog2));
}
void MacroAssembler::PushCommonFrame(Register marker_reg) {
@@ -166,12 +166,12 @@ void MacroAssembler::PopSafepointRegisters() {
void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
- sd(src, SafepointRegisterSlot(dst));
+ Sd(src, SafepointRegisterSlot(dst));
}
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
- ld(dst, SafepointRegisterSlot(src));
+ Ld(dst, SafepointRegisterSlot(src));
}
@@ -195,6 +195,61 @@ MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
return MemOperand(sp, doubles_size + register_offset);
}
+// Helper for base-reg + offset, when offset is larger than int16.
+void MacroAssembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
+ DCHECK(!src.rm().is(at));
+ DCHECK(is_int32(src.offset()));
+
+ if (kArchVariant == kMips64r6) {
+ int32_t hi = (src.offset() >> kLuiShift) & kImm16Mask;
+ if (src.offset() & kNegOffset) {
+ if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) {
+ lui(at, (src.offset() >> kLuiShift) & kImm16Mask);
+ ori(at, at, src.offset() & kImm16Mask); // Load 32-bit offset.
+ daddu(at, at, src.rm()); // Add base register.
+ return;
+ }
+
+ hi += 1;
+ }
+
+ daui(at, src.rm(), hi);
+ daddiu(at, at, src.offset() & kImm16Mask);
+ } else {
+ lui(at, (src.offset() >> kLuiShift) & kImm16Mask);
+ ori(at, at, src.offset() & kImm16Mask); // Load 32-bit offset.
+ daddu(at, at, src.rm()); // Add base register.
+ }
+}
+
+// Helper for base-reg + upper part of offset, when offset is larger than int16.
+// Loads higher part of the offset to AT register.
+// Returns lower part of the offset to be used as offset
+// in Load/Store instructions
+int32_t MacroAssembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) {
+ DCHECK(!src.rm().is(at));
+ DCHECK(is_int32(src.offset()));
+ int32_t hi = (src.offset() >> kLuiShift) & kImm16Mask;
+ // If the highest bit of the lower part of the offset is 1, this would make
+ // the offset in the load/store instruction negative. We need to compensate
+ // for this by adding 1 to the upper part of the offset.
+ if (src.offset() & kNegOffset) {
+ if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) {
+ LoadRegPlusOffsetToAt(src);
+ return 0;
+ }
+
+ hi += 1;
+ }
+
+ if (kArchVariant == kMips64r6) {
+ daui(at, src.rm(), hi);
+ } else {
+ lui(at, hi);
+ daddu(at, at, src.rm());
+ }
+ return (src.offset() & kImm16Mask);
+}
void MacroAssembler::InNewSpace(Register object,
Register scratch,
@@ -235,7 +290,7 @@ void MacroAssembler::RecordWriteField(
Daddu(dst, object, Operand(offset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
- And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
+ And(t8, dst, Operand(kPointerSize - 1));
Branch(&ok, eq, t8, Operand(zero_reg));
stop("Unaligned cell in write barrier");
bind(&ok);
@@ -269,7 +324,7 @@ void MacroAssembler::RecordWriteForMap(Register object,
SaveFPRegsMode fp_mode) {
if (emit_debug_code()) {
DCHECK(!dst.is(at));
- ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
+ Ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
Check(eq,
kWrongAddressOrValuePassedToRecordWrite,
dst,
@@ -281,7 +336,7 @@ void MacroAssembler::RecordWriteForMap(Register object,
}
if (emit_debug_code()) {
- ld(at, FieldMemOperand(object, HeapObject::kMapOffset));
+ Ld(at, FieldMemOperand(object, HeapObject::kMapOffset));
Check(eq,
kWrongAddressOrValuePassedToRecordWrite,
map,
@@ -303,7 +358,7 @@ void MacroAssembler::RecordWriteForMap(Register object,
Daddu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
- And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
+ And(at, dst, Operand(kPointerSize - 1));
Branch(&ok, eq, at, Operand(zero_reg));
stop("Unaligned cell in write barrier");
bind(&ok);
@@ -351,7 +406,7 @@ void MacroAssembler::RecordWrite(
DCHECK(!AreAliased(object, address, value, t9));
if (emit_debug_code()) {
- ld(at, MemOperand(address));
+ Ld(at, MemOperand(address));
Assert(
eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
}
@@ -426,7 +481,7 @@ void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
if (emit_debug_code()) {
Daddu(scratch, js_function, Operand(offset - kHeapObjectTag));
- ld(at, MemOperand(scratch));
+ Ld(at, MemOperand(scratch));
Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at,
Operand(code_entry));
}
@@ -487,12 +542,12 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
ExternalReference store_buffer =
ExternalReference::store_buffer_top(isolate());
li(t8, Operand(store_buffer));
- ld(scratch, MemOperand(t8));
+ Ld(scratch, MemOperand(t8));
// Store pointer to buffer and increment buffer top.
- sd(address, MemOperand(scratch));
+ Sd(address, MemOperand(scratch));
Daddu(scratch, scratch, kPointerSize);
// Write back new top of buffer.
- sd(scratch, MemOperand(t8));
+ Sd(scratch, MemOperand(t8));
// Call stub on end of buffer.
// Check for end of buffer.
And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask));
@@ -1249,7 +1304,7 @@ void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
- lw(rd, rs);
+ Lw(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset() + kMipsLwrOffset) &&
@@ -1272,7 +1327,7 @@ void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) {
if (kArchVariant == kMips64r6) {
- lwu(rd, rs);
+ Lwu(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
Ulw(rd, rs);
@@ -1285,7 +1340,7 @@ void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
- sw(rd, rs);
+ Sw(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset() + kMipsSwrOffset) &&
@@ -1304,25 +1359,25 @@ void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
- lh(rd, rs);
+ Lh(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
- lbu(at, rs);
- lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
+ Lbu(at, rs);
+ Lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
- lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
- lb(rd, rs);
+ Lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
+ Lb(rd, rs);
#endif
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
#if defined(V8_TARGET_LITTLE_ENDIAN)
- lb(rd, MemOperand(at, 1));
- lbu(at, MemOperand(at, 0));
+ Lb(rd, MemOperand(at, 1));
+ Lbu(at, MemOperand(at, 0));
#elif defined(V8_TARGET_BIG_ENDIAN)
- lb(rd, MemOperand(at, 0));
- lbu(at, MemOperand(at, 1));
+ Lb(rd, MemOperand(at, 0));
+ Lbu(at, MemOperand(at, 1));
#endif
}
dsll(rd, rd, 8);
@@ -1334,25 +1389,25 @@ void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
- lhu(rd, rs);
+ Lhu(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
#if defined(V8_TARGET_LITTLE_ENDIAN)
- lbu(at, rs);
- lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
+ Lbu(at, rs);
+ Lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
- lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
- lbu(rd, rs);
+ Lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
+ Lbu(rd, rs);
#endif
} else { // Offset > 16 bits, use multiple instructions to load.
LoadRegPlusOffsetToAt(rs);
#if defined(V8_TARGET_LITTLE_ENDIAN)
- lbu(rd, MemOperand(at, 1));
- lbu(at, MemOperand(at, 0));
+ Lbu(rd, MemOperand(at, 1));
+ Lbu(at, MemOperand(at, 0));
#elif defined(V8_TARGET_BIG_ENDIAN)
- lbu(rd, MemOperand(at, 0));
- lbu(at, MemOperand(at, 1));
+ Lbu(rd, MemOperand(at, 0));
+ Lbu(at, MemOperand(at, 1));
#endif
}
dsll(rd, rd, 8);
@@ -1366,7 +1421,7 @@ void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
DCHECK(!rs.rm().is(scratch));
DCHECK(!scratch.is(at));
if (kArchVariant == kMips64r6) {
- sh(rd, rs);
+ Sh(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
MemOperand source = rs;
@@ -1381,13 +1436,13 @@ void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
}
#if defined(V8_TARGET_LITTLE_ENDIAN)
- sb(scratch, source);
+ Sb(scratch, source);
srl(scratch, scratch, 8);
- sb(scratch, MemOperand(source.rm(), source.offset() + 1));
+ Sb(scratch, MemOperand(source.rm(), source.offset() + 1));
#elif defined(V8_TARGET_BIG_ENDIAN)
- sb(scratch, MemOperand(source.rm(), source.offset() + 1));
+ Sb(scratch, MemOperand(source.rm(), source.offset() + 1));
srl(scratch, scratch, 8);
- sb(scratch, source);
+ Sb(scratch, source);
#endif
}
}
@@ -1396,7 +1451,7 @@ void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
- ld(rd, rs);
+ Ld(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset() + kMipsLdrOffset) &&
@@ -1423,8 +1478,8 @@ void MacroAssembler::Uld(Register rd, const MemOperand& rs) {
// second word in high bits.
void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
Register scratch) {
- lwu(rd, rs);
- lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+ Lwu(rd, rs);
+ Lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
dsll32(scratch, scratch, 0);
Daddu(rd, rd, scratch);
}
@@ -1433,7 +1488,7 @@ void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
DCHECK(!rd.is(at));
DCHECK(!rs.rm().is(at));
if (kArchVariant == kMips64r6) {
- sd(rd, rs);
+ Sd(rd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
if (is_int16(rs.offset() + kMipsSdrOffset) &&
@@ -1452,15 +1507,15 @@ void MacroAssembler::Usd(Register rd, const MemOperand& rs) {
// Do 64-bit store as two consequent 32-bit stores to unaligned address.
void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
Register scratch) {
- sw(rd, rs);
+ Sw(rd, rs);
dsrl32(scratch, rd, 0);
- sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+ Sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
}
void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (kArchVariant == kMips64r6) {
- lwc1(fd, rs);
+ Lwc1(fd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
Ulw(scratch, rs);
@@ -1471,7 +1526,7 @@ void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
if (kArchVariant == kMips64r6) {
- swc1(fd, rs);
+ Swc1(fd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
mfc1(scratch, fd);
@@ -1483,7 +1538,7 @@ void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(!scratch.is(at));
if (kArchVariant == kMips64r6) {
- ldc1(fd, rs);
+ Ldc1(fd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
Uld(scratch, rs);
@@ -1495,7 +1550,7 @@ void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
Register scratch) {
DCHECK(!scratch.is(at));
if (kArchVariant == kMips64r6) {
- sdc1(fd, rs);
+ Sdc1(fd, rs);
} else {
DCHECK(kArchVariant == kMips64r2);
dmfc1(scratch, fd);
@@ -1503,6 +1558,142 @@ void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
}
}
+void MacroAssembler::Lb(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ lb(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ lb(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Lbu(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ lbu(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ lbu(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Sb(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ sb(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ sb(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Lh(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ lh(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ lh(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Lhu(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ lhu(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ lhu(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Sh(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ sh(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ sh(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Lw(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ lw(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ lw(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Lwu(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ lwu(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ lwu(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Sw(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ sw(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ sw(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Ld(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ ld(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ ld(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Sd(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset())) {
+ sd(rd, rs);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs);
+ sd(rd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Lwc1(FPURegister fd, const MemOperand& src) {
+ if (is_int16(src.offset())) {
+ lwc1(fd, src);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
+ lwc1(fd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Swc1(FPURegister fs, const MemOperand& src) {
+ if (is_int16(src.offset())) {
+ swc1(fs, src);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
+ swc1(fs, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Ldc1(FPURegister fd, const MemOperand& src) {
+ if (is_int16(src.offset())) {
+ ldc1(fd, src);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
+ ldc1(fd, MemOperand(at, off16));
+ }
+}
+
+void MacroAssembler::Sdc1(FPURegister fs, const MemOperand& src) {
+ DCHECK(!src.rm().is(at));
+ if (is_int16(src.offset())) {
+ sdc1(fs, src);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src);
+ sdc1(fs, MemOperand(at, off16));
+ }
+}
+
void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
li(dst, Operand(value), mode);
}
@@ -1650,7 +1841,7 @@ void MacroAssembler::MultiPush(RegList regs) {
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kPointerSize;
- sd(ToRegister(i), MemOperand(sp, stack_offset));
+ Sd(ToRegister(i), MemOperand(sp, stack_offset));
}
}
}
@@ -1664,7 +1855,7 @@ void MacroAssembler::MultiPushReversed(RegList regs) {
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kPointerSize;
- sd(ToRegister(i), MemOperand(sp, stack_offset));
+ Sd(ToRegister(i), MemOperand(sp, stack_offset));
}
}
}
@@ -1675,7 +1866,7 @@ void MacroAssembler::MultiPop(RegList regs) {
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
- ld(ToRegister(i), MemOperand(sp, stack_offset));
+ Ld(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
}
}
@@ -1688,7 +1879,7 @@ void MacroAssembler::MultiPopReversed(RegList regs) {
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
- ld(ToRegister(i), MemOperand(sp, stack_offset));
+ Ld(ToRegister(i), MemOperand(sp, stack_offset));
stack_offset += kPointerSize;
}
}
@@ -1704,7 +1895,7 @@ void MacroAssembler::MultiPushFPU(RegList regs) {
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kDoubleSize;
- sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+ Sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
}
}
}
@@ -1718,7 +1909,7 @@ void MacroAssembler::MultiPushReversedFPU(RegList regs) {
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
stack_offset -= kDoubleSize;
- sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+ Sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
}
}
}
@@ -1729,7 +1920,7 @@ void MacroAssembler::MultiPopFPU(RegList regs) {
for (int16_t i = 0; i < kNumRegisters; i++) {
if ((regs & (1 << i)) != 0) {
- ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+ Ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
stack_offset += kDoubleSize;
}
}
@@ -1742,7 +1933,7 @@ void MacroAssembler::MultiPopReversedFPU(RegList regs) {
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
if ((regs & (1 << i)) != 0) {
- ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+ Ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
stack_offset += kDoubleSize;
}
}
@@ -1759,55 +1950,18 @@ void MacroAssembler::Ext(Register rt,
ext_(rt, rs, pos, size);
}
-void MacroAssembler::ExtractBits(Register rt, Register rs, uint16_t pos,
- uint16_t size) {
- DCHECK(pos < 64);
- DCHECK(size > 0 && size <= 64);
- DCHECK(pos + size <= 64);
- if (pos < 32) {
- if (size <= 32) {
- Dext(rt, rs, pos, size);
- } else {
- Dextm(rt, rs, pos, size);
- }
- } else if (pos < 64) {
- DCHECK(size <= 32);
- Dextu(rt, rs, pos, size);
- }
-}
void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
uint16_t size) {
- DCHECK(pos < 32);
- DCHECK(size > 0 && size <= 32);
- dext_(rt, rs, pos, size);
-}
-
-
-void MacroAssembler::Dextm(Register rt, Register rs, uint16_t pos,
- uint16_t size) {
- DCHECK(pos < 32);
- DCHECK(size > 32 && size <= 64);
- DCHECK((pos + size) > 32 && (pos + size) <= 64);
- dextm(rt, rs, pos, size);
-}
-
-
-void MacroAssembler::Dextu(Register rt, Register rs, uint16_t pos,
- uint16_t size) {
- DCHECK(pos >= 32 && pos < 64);
- DCHECK(size > 0 && size <= 32);
- DCHECK((pos + size) > 32 && (pos + size) <= 64);
- dextu(rt, rs, pos, size);
-}
-
-
-void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos,
- uint16_t size) {
- DCHECK(pos < 32);
- DCHECK(pos + size <= 32);
- DCHECK(size != 0);
- dins_(rt, rs, pos, size);
+ DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size &&
+ pos + size <= 64);
+ if (size > 32) {
+ dextm_(rt, rs, pos, size);
+ } else if (pos >= 32) {
+ dextu_(rt, rs, pos, size);
+ } else {
+ dext_(rt, rs, pos, size);
+ }
}
@@ -1821,6 +1975,19 @@ void MacroAssembler::Ins(Register rt,
ins_(rt, rs, pos, size);
}
+void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size &&
+ pos + size <= 64);
+ if (pos + size <= 32) {
+ dins_(rt, rs, pos, size);
+ } else if (pos < 32) {
+ dinsm_(rt, rs, pos, size);
+ } else {
+ dinsu_(rt, rs, pos, size);
+ }
+}
+
void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
if (kArchVariant == kMips64r6) {
// r6 neg_s changes the sign for NaN-like operands as well.
@@ -2659,7 +2826,7 @@ void MacroAssembler::TruncateDoubleToI(Register result,
// If we fell through then inline version didn't succeed - call stub instead.
push(ra);
Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
- sdc1(double_input, MemOperand(sp, 0));
+ Sdc1(double_input, MemOperand(sp, 0));
DoubleToIStub stub(isolate(), sp, result, 0, true, true);
CallStub(&stub);
@@ -2676,7 +2843,7 @@ void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
DoubleRegister double_scratch = f12;
DCHECK(!result.is(object));
- ldc1(double_scratch,
+ Ldc1(double_scratch,
MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
TryInlineTruncateDoubleToI(result, double_scratch, &done);
@@ -4046,7 +4213,7 @@ void MacroAssembler::MaybeDropFrames() {
ExternalReference restart_fp =
ExternalReference::debug_restart_fp_address(isolate());
li(a1, Operand(restart_fp));
- ld(a1, MemOperand(a1));
+ Ld(a1, MemOperand(a1));
Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
ne, a1, Operand(zero_reg));
}
@@ -4061,11 +4228,11 @@ void MacroAssembler::PushStackHandler() {
// Link the current handler as the next handler.
li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- ld(a5, MemOperand(a6));
+ Ld(a5, MemOperand(a6));
push(a5);
// Set this new handler as the current one.
- sd(sp, MemOperand(a6));
+ Sd(sp, MemOperand(a6));
}
@@ -4075,7 +4242,7 @@ void MacroAssembler::PopStackHandler() {
Daddu(sp, sp, Operand(static_cast<int64_t>(StackHandlerConstants::kSize -
kPointerSize)));
li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
- sd(a1, MemOperand(at));
+ Sd(a1, MemOperand(at));
}
@@ -4126,16 +4293,16 @@ void MacroAssembler::Allocate(int object_size,
if ((flags & RESULT_CONTAINS_TOP) == 0) {
// Load allocation top into result and allocation limit into alloc_limit.
- ld(result, MemOperand(top_address));
- ld(alloc_limit, MemOperand(top_address, kPointerSize));
+ Ld(result, MemOperand(top_address));
+ Ld(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
// Assert that result actually contains top on entry.
- ld(alloc_limit, MemOperand(top_address));
+ Ld(alloc_limit, MemOperand(top_address));
Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
}
// Load allocation limit. Result already contains allocation top.
- ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
+ Ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
}
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
@@ -4154,7 +4321,7 @@ void MacroAssembler::Allocate(int object_size,
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
- sd(result_end, MemOperand(top_address));
+ Sd(result_end, MemOperand(top_address));
}
// Tag object.
@@ -4199,16 +4366,16 @@ void MacroAssembler::Allocate(Register object_size, Register result,
if ((flags & RESULT_CONTAINS_TOP) == 0) {
// Load allocation top into result and allocation limit into alloc_limit.
- ld(result, MemOperand(top_address));
- ld(alloc_limit, MemOperand(top_address, kPointerSize));
+ Ld(result, MemOperand(top_address));
+ Ld(alloc_limit, MemOperand(top_address, kPointerSize));
} else {
if (emit_debug_code()) {
// Assert that result actually contains top on entry.
- ld(alloc_limit, MemOperand(top_address));
+ Ld(alloc_limit, MemOperand(top_address));
Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit));
}
// Load allocation limit. Result already contains allocation top.
- ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
+ Ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top)));
}
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
@@ -4239,7 +4406,7 @@ void MacroAssembler::Allocate(Register object_size, Register result,
if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
// The top pointer is not updated for allocation folding dominators.
- sd(result_end, MemOperand(top_address));
+ Sd(result_end, MemOperand(top_address));
}
// Tag object if.
@@ -4264,7 +4431,7 @@ void MacroAssembler::FastAllocate(int object_size, Register result,
Register top_address = scratch1;
Register result_end = scratch2;
li(top_address, Operand(allocation_top));
- ld(result, MemOperand(top_address));
+ Ld(result, MemOperand(top_address));
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
// the same alignment on MIPS64.
@@ -4277,7 +4444,7 @@ void MacroAssembler::FastAllocate(int object_size, Register result,
// Calculate new top and write it back.
Daddu(result_end, result, Operand(object_size));
- sd(result_end, MemOperand(top_address));
+ Sd(result_end, MemOperand(top_address));
Daddu(result, result, Operand(kHeapObjectTag));
}
@@ -4295,7 +4462,7 @@ void MacroAssembler::FastAllocate(Register object_size, Register result,
// Set up allocation top address and object size registers.
Register top_address = scratch;
li(top_address, Operand(allocation_top));
- ld(result, MemOperand(top_address));
+ Ld(result, MemOperand(top_address));
// We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
// the same alignment on MIPS64.
@@ -4353,7 +4520,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
AssertIsRoot(heap_number_map, map_index);
// Store heap number map in the allocated object.
- sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+ Sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
}
@@ -4364,7 +4531,7 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result,
Label* gc_required) {
LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
- sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
+ Sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
}
@@ -4382,11 +4549,11 @@ void MacroAssembler::AllocateJSValue(Register result, Register constructor,
// Initialize the JSValue.
LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
- sd(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
+ Sd(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
- sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
- sd(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
- sd(value, FieldMemOperand(result, JSValue::kValueOffset));
+ Sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
+ Sd(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
+ Sd(value, FieldMemOperand(result, JSValue::kValueOffset));
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
}
@@ -4396,7 +4563,7 @@ void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Label loop, entry;
Branch(&entry);
bind(&loop);
- sd(filler, MemOperand(current_address));
+ Sd(filler, MemOperand(current_address));
Daddu(current_address, current_address, kPointerSize);
bind(&entry);
Branch(&loop, ult, current_address, Operand(end_address));
@@ -4475,7 +4642,7 @@ void MacroAssembler::CompareMapAndBranch(Register obj,
Label* early_success,
Condition cond,
Label* branch_to) {
- ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ Ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
}
@@ -4511,7 +4678,7 @@ void MacroAssembler::CheckMap(Register obj,
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(obj, fail);
}
- ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ Ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
LoadRoot(at, index);
Branch(fail, ne, scratch, Operand(at));
}
@@ -4519,7 +4686,7 @@ void MacroAssembler::CheckMap(Register obj,
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
li(value, Operand(cell));
- ld(value, FieldMemOperand(value, WeakCell::kValueOffset));
+ Ld(value, FieldMemOperand(value, WeakCell::kValueOffset));
}
void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
@@ -4649,8 +4816,8 @@ void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
// Restore caller's frame pointer and return address now as they will be
// overwritten by the copying loop.
- ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
- ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ Ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Now copy callee arguments to the caller frame going backwards to avoid
// callee arguments corruption (source and destination areas could overlap).
@@ -4663,8 +4830,8 @@ void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
bind(&loop);
Dsubu(src_reg, src_reg, Operand(kPointerSize));
Dsubu(dst_reg, dst_reg, Operand(kPointerSize));
- ld(tmp_reg, MemOperand(src_reg));
- sd(tmp_reg, MemOperand(dst_reg));
+ Ld(tmp_reg, MemOperand(src_reg));
+ Sd(tmp_reg, MemOperand(dst_reg));
bind(&entry);
Branch(&loop, ne, sp, Operand(src_reg));
@@ -4743,7 +4910,7 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
li(t0, Operand(debug_hook_active));
- lb(t0, MemOperand(t0));
+ Lb(t0, MemOperand(t0));
Branch(&skip_hook, eq, t0, Operand(zero_reg));
{
FrameScope frame(this,
@@ -4807,7 +4974,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// allow recompilation to take effect without changing any of the
// call sites.
Register code = t0;
- ld(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ Ld(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
if (flag == CALL_FUNCTION) {
call_wrapper.BeforeCall(CallSize(code));
Call(code);
@@ -4835,11 +5002,11 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK(function.is(a1));
Register expected_reg = a2;
Register temp_reg = t0;
- ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ Ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
// The argument count is stored as int32_t on 64-bit platforms.
// TODO(plind): Smi on 32-bit platforms.
- lw(expected_reg,
+ Lw(expected_reg,
FieldMemOperand(temp_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount expected(expected_reg);
@@ -4859,7 +5026,7 @@ void MacroAssembler::InvokeFunction(Register function,
DCHECK(function.is(a1));
// Get the function and setup the context.
- ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper);
}
@@ -4880,8 +5047,8 @@ void MacroAssembler::IsObjectJSStringType(Register object,
Label* fail) {
DCHECK(kNotStringTag != 0);
- ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
- lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ Ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ Lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
And(scratch, scratch, Operand(kIsNotStringMask));
Branch(fail, ne, scratch, Operand(zero_reg));
}
@@ -4906,8 +5073,8 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
void MacroAssembler::GetObjectType(Register object,
Register map,
Register type_reg) {
- ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
- lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ Lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
}
@@ -4959,20 +5126,20 @@ void MacroAssembler::ObjectToDoubleFPURegister(Register object,
bind(&not_smi);
}
// Check for heap number and load double value from it.
- ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
+ Ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
Branch(not_number, ne, scratch1, Operand(heap_number_map));
if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
// If exponent is all ones the number is either a NaN or +/-Infinity.
Register exponent = scratch1;
Register mask_reg = scratch2;
- lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ Lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
li(mask_reg, HeapNumber::kExponentMask);
And(exponent, exponent, mask_reg);
Branch(not_number, eq, exponent, Operand(mask_reg));
}
- ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
+ Ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
bind(&done);
}
@@ -5450,7 +5617,7 @@ void MacroAssembler::SetCounter(StatsCounter* counter, int value,
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch1, Operand(value));
li(scratch2, Operand(ExternalReference(counter)));
- sw(scratch1, MemOperand(scratch2));
+ Sw(scratch1, MemOperand(scratch2));
}
}
@@ -5460,9 +5627,9 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch2, Operand(ExternalReference(counter)));
- lw(scratch1, MemOperand(scratch2));
+ Lw(scratch1, MemOperand(scratch2));
Addu(scratch1, scratch1, Operand(value));
- sw(scratch1, MemOperand(scratch2));
+ Sw(scratch1, MemOperand(scratch2));
}
}
@@ -5472,9 +5639,9 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
li(scratch2, Operand(ExternalReference(counter)));
- lw(scratch1, MemOperand(scratch2));
+ Lw(scratch1, MemOperand(scratch2));
Subu(scratch1, scratch1, Operand(value));
- sw(scratch1, MemOperand(scratch2));
+ Sw(scratch1, MemOperand(scratch2));
}
}
@@ -5550,9 +5717,9 @@ void MacroAssembler::Abort(BailoutReason reason) {
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
- ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ Ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
for (int i = 1; i < context_chain_length; i++) {
- ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ Ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
} else {
// Slot is in the current function context. Move it into the
@@ -5563,8 +5730,8 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
- ld(dst, NativeContextMemOperand());
- ld(dst, ContextMemOperand(dst, index));
+ Ld(dst, NativeContextMemOperand());
+ Ld(dst, ContextMemOperand(dst, index));
}
@@ -5572,7 +5739,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
Register map,
Register scratch) {
// Load the initial map. The global functions all have initial maps.
- ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ Ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
if (emit_debug_code()) {
Label ok, fail;
CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
@@ -5616,9 +5783,9 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
}
void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
- ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- ld(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
- ld(vector, FieldMemOperand(vector, Cell::kValueOffset));
+ Ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ Ld(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
+ Ld(vector, FieldMemOperand(vector, Cell::kValueOffset));
}
@@ -5640,16 +5807,16 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
}
daddiu(sp, sp, stack_offset);
stack_offset = -stack_offset - kPointerSize;
- sd(ra, MemOperand(sp, stack_offset));
+ Sd(ra, MemOperand(sp, stack_offset));
stack_offset -= kPointerSize;
- sd(fp, MemOperand(sp, stack_offset));
+ Sd(fp, MemOperand(sp, stack_offset));
stack_offset -= kPointerSize;
li(t9, Operand(StackFrame::TypeToMarker(type)));
- sd(t9, MemOperand(sp, stack_offset));
+ Sd(t9, MemOperand(sp, stack_offset));
if (type == StackFrame::INTERNAL) {
DCHECK_EQ(stack_offset, kPointerSize);
li(t9, Operand(CodeObject()));
- sd(t9, MemOperand(sp, 0));
+ Sd(t9, MemOperand(sp, 0));
} else {
DCHECK_EQ(stack_offset, 0);
}
@@ -5660,8 +5827,8 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
daddiu(sp, fp, 2 * kPointerSize);
- ld(ra, MemOperand(fp, 1 * kPointerSize));
- ld(fp, MemOperand(fp, 0 * kPointerSize));
+ Ld(ra, MemOperand(fp, 1 * kPointerSize));
+ Ld(fp, MemOperand(fp, 0 * kPointerSize));
}
void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
@@ -5699,26 +5866,26 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Save registers and reserve room for saved entry sp and code object.
daddiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
- sd(ra, MemOperand(sp, 4 * kPointerSize));
- sd(fp, MemOperand(sp, 3 * kPointerSize));
+ Sd(ra, MemOperand(sp, 4 * kPointerSize));
+ Sd(fp, MemOperand(sp, 3 * kPointerSize));
li(at, Operand(StackFrame::TypeToMarker(frame_type)));
- sd(at, MemOperand(sp, 2 * kPointerSize));
+ Sd(at, MemOperand(sp, 2 * kPointerSize));
// Set up new frame pointer.
daddiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
if (emit_debug_code()) {
- sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ Sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
// Accessed from ExitFrame::code_slot.
li(t8, Operand(CodeObject()), CONSTANT_SIZE);
- sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
+ Sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
- sd(fp, MemOperand(t8));
+ Sd(fp, MemOperand(t8));
li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- sd(cp, MemOperand(t8));
+ Sd(cp, MemOperand(t8));
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (save_doubles) {
@@ -5729,7 +5896,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Remember: we only need to save every 2nd double FPU value.
for (int i = 0; i < kNumOfSavedRegisters; i++) {
FPURegister reg = FPURegister::from_code(2 * i);
- sdc1(reg, MemOperand(sp, i * kDoubleSize));
+ Sdc1(reg, MemOperand(sp, i * kDoubleSize));
}
}
@@ -5746,7 +5913,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
// Set the exit frame sp value to point just before the return address
// location.
daddiu(at, sp, kPointerSize);
- sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ Sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -5761,28 +5928,28 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
kNumOfSavedRegisters * kDoubleSize));
for (int i = 0; i < kNumOfSavedRegisters; i++) {
FPURegister reg = FPURegister::from_code(2 * i);
- ldc1(reg, MemOperand(t8, i * kDoubleSize));
+ Ldc1(reg, MemOperand(t8, i * kDoubleSize));
}
}
// Clear top frame.
li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
- sd(zero_reg, MemOperand(t8));
+ Sd(zero_reg, MemOperand(t8));
// Restore current context from top and clear it in debug mode.
if (restore_context) {
li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- ld(cp, MemOperand(t8));
+ Ld(cp, MemOperand(t8));
}
#ifdef DEBUG
li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
- sd(a3, MemOperand(t8));
+ Sd(a3, MemOperand(t8));
#endif
// Pop the arguments, restore registers, and return.
mov(sp, fp); // Respect ABI stack constraint.
- ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
- ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
+ Ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
+ Ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
if (argument_count.is_valid()) {
if (argument_count_is_length) {
@@ -5872,9 +6039,9 @@ void MacroAssembler::SmiTagCheckOverflow(Register dst,
void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
if (SmiValuesAre32Bits()) {
- lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
+ Lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
} else {
- lw(dst, src);
+ Lw(dst, src);
SmiUntag(dst);
}
}
@@ -5883,10 +6050,10 @@ void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) {
if (SmiValuesAre32Bits()) {
// TODO(plind): not clear if lw or ld faster here, need micro-benchmark.
- lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
+ Lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
dsll(dst, dst, scale);
} else {
- lw(dst, src);
+ Lw(dst, src);
DCHECK(scale >= kSmiTagSize);
sll(dst, dst, scale - kSmiTagSize);
}
@@ -5899,10 +6066,10 @@ void MacroAssembler::SmiLoadWithScale(Register d_smi,
MemOperand src,
int scale) {
if (SmiValuesAre32Bits()) {
- ld(d_smi, src);
+ Ld(d_smi, src);
dsra(d_scaled, d_smi, kSmiShift - scale);
} else {
- lw(d_smi, src);
+ Lw(d_smi, src);
DCHECK(scale >= kSmiTagSize);
sll(d_scaled, d_smi, scale - kSmiTagSize);
}
@@ -5915,10 +6082,10 @@ void MacroAssembler::SmiLoadUntagWithScale(Register d_int,
MemOperand src,
int scale) {
if (SmiValuesAre32Bits()) {
- lw(d_int, UntagSmiMemOperand(src.rm(), src.offset()));
+ Lw(d_int, UntagSmiMemOperand(src.rm(), src.offset()));
dsll(d_scaled, d_int, scale);
} else {
- lw(d_int, src);
+ Lw(d_int, src);
// Need both the int and the scaled in, so use two instructions.
SmiUntag(d_int);
sll(d_scaled, d_int, scale);
@@ -6057,7 +6224,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
AssertNotSmi(object);
LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
Branch(&done_checking, eq, object, Operand(scratch));
- ld(t8, FieldMemOperand(object, HeapObject::kMapOffset));
+ Ld(t8, FieldMemOperand(object, HeapObject::kMapOffset));
LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch));
bind(&done_checking);
@@ -6078,7 +6245,7 @@ void MacroAssembler::JumpIfNotHeapNumber(Register object,
Register heap_number_map,
Register scratch,
Label* on_not_heap_number) {
- ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ Ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
}
@@ -6089,10 +6256,10 @@ void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
Label* failure) {
// Test that both first and second are sequential one-byte strings.
// Assume that they are non-smis.
- ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
- ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
- lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
- lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+ Ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+ Ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+ Lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ Lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
scratch2, failure);
@@ -6326,8 +6493,8 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
SmiTst(string, at);
Check(ne, kNonObject, at, Operand(zero_reg));
- ld(at, FieldMemOperand(string, HeapObject::kMapOffset));
- lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
+ Ld(at, FieldMemOperand(string, HeapObject::kMapOffset));
+ Lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
andi(at, at, kStringRepresentationMask | kStringEncodingMask);
li(scratch, Operand(encoding_mask));
@@ -6335,7 +6502,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
// TODO(plind): requires Smi size check code for mips32.
- ld(at, FieldMemOperand(string, String::kLengthOffset));
+ Ld(at, FieldMemOperand(string, String::kLengthOffset));
Check(lt, kIndexIsTooLarge, index, Operand(at));
DCHECK(Smi::kZero == 0);
@@ -6364,7 +6531,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
And(sp, sp, Operand(-frame_alignment));
- sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ Sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
}
@@ -6407,6 +6574,7 @@ void MacroAssembler::CallCFunction(Register function,
void MacroAssembler::CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments) {
+ DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
// Make sure that the stack is aligned before calling a C function unless
// running in the simulator. The simulator has its own alignment check which
@@ -6446,7 +6614,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
num_reg_arguments, num_double_arguments);
if (base::OS::ActivationFrameAlignment() > kPointerSize) {
- ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ Ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else {
Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
}
@@ -6463,7 +6631,7 @@ void MacroAssembler::CheckPageFlag(
Condition cc,
Label* condition_met) {
And(scratch, object, Operand(~Page::kPageAlignmentMask));
- ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+ Ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
And(scratch, scratch, Operand(mask));
Branch(condition_met, cc, scratch, Operand(zero_reg));
}
@@ -6540,7 +6708,7 @@ void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
LoadWordPair(load_scratch,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
} else {
- lwu(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ Lwu(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
}
And(t8, mask_scratch, load_scratch);
Branch(value_is_white, eq, t8, Operand(zero_reg));
@@ -6549,19 +6717,19 @@ void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
- ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
+ Ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
}
void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
- lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ Lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
}
void MacroAssembler::EnumLength(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ Lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
And(dst, dst, Operand(Map::EnumLengthBits::kMask));
SmiTag(dst);
}
@@ -6570,13 +6738,13 @@ void MacroAssembler::EnumLength(Register dst, Register map) {
void MacroAssembler::LoadAccessor(Register dst, Register holder,
int accessor_index,
AccessorComponent accessor) {
- ld(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
+ Ld(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
LoadInstanceDescriptors(dst, dst);
- ld(dst,
+ Ld(dst,
FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
: AccessorPair::kSetterOffset;
- ld(dst, FieldMemOperand(dst, offset));
+ Ld(dst, FieldMemOperand(dst, offset));
}
@@ -6589,7 +6757,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
// Check if the enum length field is properly initialized, indicating that
// there is an enum cache.
- ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
+ Ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
EnumLength(a3, a1);
Branch(
@@ -6599,7 +6767,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
jmp(&start);
bind(&next);
- ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
+ Ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
// For all objects but the receiver, check that the cache is empty.
EnumLength(a3, a1);
@@ -6610,7 +6778,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
// Check that there are no elements. Register a2 contains the current JS
// object we've reached through the prototype chain.
Label no_elements;
- ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
+ Ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
// Second chance, the object may be using the empty slow element dictionary.
@@ -6618,7 +6786,7 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) {
Branch(call_runtime, ne, a2, Operand(at));
bind(&no_elements);
- ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
+ Ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
Branch(&next, ne, a2, Operand(null_value));
}
@@ -6682,7 +6850,7 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
// page as the current top.
Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
li(at, Operand(new_space_allocation_top_adr));
- ld(at, MemOperand(at));
+ Ld(at, MemOperand(at));
Xor(scratch_reg, scratch_reg, Operand(at));
And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
Branch(&top_check, eq, scratch_reg, Operand(zero_reg));
@@ -6700,11 +6868,11 @@ void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
bind(&top_check);
Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset));
li(at, Operand(new_space_allocation_top_adr));
- ld(at, MemOperand(at));
+ Ld(at, MemOperand(at));
Branch(no_memento_found, ge, scratch_reg, Operand(at));
// Memento map check.
bind(&map_check);
- ld(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
+ Ld(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset));
Branch(no_memento_found, ne, scratch_reg,
Operand(isolate()->factory()->allocation_memento_map()));
}
diff --git a/deps/v8/src/mips64/macro-assembler-mips64.h b/deps/v8/src/mips64/macro-assembler-mips64.h
index 4d54ec5d73..ef13a2f57f 100644
--- a/deps/v8/src/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/mips64/macro-assembler-mips64.h
@@ -716,6 +716,27 @@ class MacroAssembler: public Assembler {
void LoadWordPair(Register rd, const MemOperand& rs, Register scratch = at);
void StoreWordPair(Register rd, const MemOperand& rs, Register scratch = at);
+ void Lb(Register rd, const MemOperand& rs);
+ void Lbu(Register rd, const MemOperand& rs);
+ void Sb(Register rd, const MemOperand& rs);
+
+ void Lh(Register rd, const MemOperand& rs);
+ void Lhu(Register rd, const MemOperand& rs);
+ void Sh(Register rd, const MemOperand& rs);
+
+ void Lw(Register rd, const MemOperand& rs);
+ void Lwu(Register rd, const MemOperand& rs);
+ void Sw(Register rd, const MemOperand& rs);
+
+ void Ld(Register rd, const MemOperand& rs);
+ void Sd(Register rd, const MemOperand& rs);
+
+ void Lwc1(FPURegister fd, const MemOperand& src);
+ void Swc1(FPURegister fs, const MemOperand& dst);
+
+ void Ldc1(FPURegister fd, const MemOperand& src);
+ void Sdc1(FPURegister fs, const MemOperand& dst);
+
// Load int32 in the rd register.
void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
inline bool LiLower32BitHelper(Register rd, Operand j);
@@ -735,7 +756,7 @@ class MacroAssembler: public Assembler {
void push(Register src) {
Daddu(sp, sp, Operand(-kPointerSize));
- sd(src, MemOperand(sp, 0));
+ Sd(src, MemOperand(sp, 0));
}
void Push(Register src) { push(src); }
@@ -746,43 +767,43 @@ class MacroAssembler: public Assembler {
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
Dsubu(sp, sp, Operand(2 * kPointerSize));
- sd(src1, MemOperand(sp, 1 * kPointerSize));
- sd(src2, MemOperand(sp, 0 * kPointerSize));
+ Sd(src1, MemOperand(sp, 1 * kPointerSize));
+ Sd(src2, MemOperand(sp, 0 * kPointerSize));
}
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3) {
Dsubu(sp, sp, Operand(3 * kPointerSize));
- sd(src1, MemOperand(sp, 2 * kPointerSize));
- sd(src2, MemOperand(sp, 1 * kPointerSize));
- sd(src3, MemOperand(sp, 0 * kPointerSize));
+ Sd(src1, MemOperand(sp, 2 * kPointerSize));
+ Sd(src2, MemOperand(sp, 1 * kPointerSize));
+ Sd(src3, MemOperand(sp, 0 * kPointerSize));
}
// Push four registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4) {
Dsubu(sp, sp, Operand(4 * kPointerSize));
- sd(src1, MemOperand(sp, 3 * kPointerSize));
- sd(src2, MemOperand(sp, 2 * kPointerSize));
- sd(src3, MemOperand(sp, 1 * kPointerSize));
- sd(src4, MemOperand(sp, 0 * kPointerSize));
+ Sd(src1, MemOperand(sp, 3 * kPointerSize));
+ Sd(src2, MemOperand(sp, 2 * kPointerSize));
+ Sd(src3, MemOperand(sp, 1 * kPointerSize));
+ Sd(src4, MemOperand(sp, 0 * kPointerSize));
}
// Push five registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4,
Register src5) {
Dsubu(sp, sp, Operand(5 * kPointerSize));
- sd(src1, MemOperand(sp, 4 * kPointerSize));
- sd(src2, MemOperand(sp, 3 * kPointerSize));
- sd(src3, MemOperand(sp, 2 * kPointerSize));
- sd(src4, MemOperand(sp, 1 * kPointerSize));
- sd(src5, MemOperand(sp, 0 * kPointerSize));
+ Sd(src1, MemOperand(sp, 4 * kPointerSize));
+ Sd(src2, MemOperand(sp, 3 * kPointerSize));
+ Sd(src3, MemOperand(sp, 2 * kPointerSize));
+ Sd(src4, MemOperand(sp, 1 * kPointerSize));
+ Sd(src5, MemOperand(sp, 0 * kPointerSize));
}
void Push(Register src, Condition cond, Register tst1, Register tst2) {
// Since we don't have conditional execution we use a Branch.
Branch(3, cond, tst1, Operand(tst2));
Dsubu(sp, sp, Operand(kPointerSize));
- sd(src, MemOperand(sp, 0));
+ Sd(src, MemOperand(sp, 0));
}
void PushRegisterAsTwoSmis(Register src, Register scratch = at);
@@ -797,7 +818,7 @@ class MacroAssembler: public Assembler {
void MultiPopReversedFPU(RegList regs);
void pop(Register dst) {
- ld(dst, MemOperand(sp, 0));
+ Ld(dst, MemOperand(sp, 0));
Daddu(sp, sp, Operand(kPointerSize));
}
void Pop(Register dst) { pop(dst); }
@@ -805,16 +826,16 @@ class MacroAssembler: public Assembler {
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
DCHECK(!src1.is(src2));
- ld(src2, MemOperand(sp, 0 * kPointerSize));
- ld(src1, MemOperand(sp, 1 * kPointerSize));
+ Ld(src2, MemOperand(sp, 0 * kPointerSize));
+ Ld(src1, MemOperand(sp, 1 * kPointerSize));
Daddu(sp, sp, 2 * kPointerSize);
}
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3) {
- ld(src3, MemOperand(sp, 0 * kPointerSize));
- ld(src2, MemOperand(sp, 1 * kPointerSize));
- ld(src1, MemOperand(sp, 2 * kPointerSize));
+ Ld(src3, MemOperand(sp, 0 * kPointerSize));
+ Ld(src2, MemOperand(sp, 1 * kPointerSize));
+ Ld(src1, MemOperand(sp, 2 * kPointerSize));
Daddu(sp, sp, 3 * kPointerSize);
}
@@ -842,15 +863,10 @@ class MacroAssembler: public Assembler {
void LoadFromSafepointRegisterSlot(Register dst, Register src);
// MIPS64 R2 instruction macro.
- void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
- void Dins(Register rt, Register rs, uint16_t pos, uint16_t size);
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
-
- void ExtractBits(Register rt, Register rs, uint16_t pos, uint16_t size);
-
void Dext(Register rt, Register rs, uint16_t pos, uint16_t size);
- void Dextm(Register rt, Register rs, uint16_t pos, uint16_t size);
- void Dextu(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Dins(Register rt, Register rs, uint16_t pos, uint16_t size);
void Neg_s(FPURegister fd, FPURegister fs);
void Neg_d(FPURegister fd, FPURegister fs);
@@ -1163,7 +1179,7 @@ class MacroAssembler: public Assembler {
Register type_reg);
void GetInstanceType(Register object_map, Register object_instance_type) {
- lbu(object_instance_type,
+ Lbu(object_instance_type,
FieldMemOperand(object_map, Map::kInstanceTypeOffset));
}
@@ -1220,8 +1236,8 @@ class MacroAssembler: public Assembler {
Condition IsObjectStringType(Register obj,
Register type,
Register result) {
- ld(type, FieldMemOperand(obj, HeapObject::kMapOffset));
- lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
+ Ld(type, FieldMemOperand(obj, HeapObject::kMapOffset));
+ Lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
And(type, type, Operand(kIsNotStringMask));
DCHECK_EQ(0u, kStringTag);
return eq;
@@ -1465,7 +1481,7 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Arguments 1-4 are placed in registers a0 thru a3 respectively.
// Arguments 5..n are stored to stack using following:
- // sw(a4, CFunctionArgumentOperand(5));
+ // Sw(a4, CFunctionArgumentOperand(5));
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
@@ -1861,6 +1877,10 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+ // Helpers.
+ void LoadRegPlusOffsetToAt(const MemOperand& src);
+ int32_t LoadRegPlusUpperOffsetPartToAt(const MemOperand& src);
+
bool generating_stub_;
bool has_frame_;
bool has_double_zero_reg_set_;
@@ -1924,7 +1944,7 @@ void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
}
addiupc(at, 5);
Dlsa(at, at, index, kPointerSizeLog2);
- ld(at, MemOperand(at));
+ Ld(at, MemOperand(at));
} else {
Label here;
BlockTrampolinePoolFor(static_cast<int>(case_count) * 2 +
@@ -1936,7 +1956,7 @@ void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
bind(&here);
daddu(at, at, ra);
pop(ra);
- ld(at, MemOperand(at, 6 * v8::internal::Assembler::kInstrSize));
+ Ld(at, MemOperand(at, 6 * v8::internal::Assembler::kInstrSize));
}
jr(at);
nop(); // Branch delay slot nop.
diff --git a/deps/v8/src/mips64/simulator-mips64.cc b/deps/v8/src/mips64/simulator-mips64.cc
index 7ec51b1cfe..320b97296a 100644
--- a/deps/v8/src/mips64/simulator-mips64.cc
+++ b/deps/v8/src/mips64/simulator-mips64.cc
@@ -949,6 +949,8 @@ void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
void* Simulator::RedirectExternalReference(Isolate* isolate,
void* external_function,
ExternalReference::Type type) {
+ base::LockGuard<base::Mutex> lock_guard(
+ isolate->simulator_redirection_mutex());
Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address_of_swi_instruction();
}
@@ -1984,12 +1986,11 @@ void Simulator::Format(Instruction* instr, const char* format) {
// 64 bits of result. If they don't, the v1 result register contains a bogus
// value, which is fine because it is caller-saved.
-typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
- int64_t arg1,
- int64_t arg2,
- int64_t arg3,
- int64_t arg4,
- int64_t arg5);
+typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0, int64_t arg1,
+ int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5,
+ int64_t arg6, int64_t arg7,
+ int64_t arg8);
typedef ObjectTriple (*SimulatorRuntimeTripleCall)(int64_t arg0, int64_t arg1,
int64_t arg2, int64_t arg3,
@@ -2022,14 +2023,19 @@ void Simulator::SoftwareInterrupt() {
// We first check if we met a call_rt_redirected.
if (instr_.InstructionBits() == rtCallRedirInstr) {
Redirection* redirection = Redirection::FromSwiInstruction(instr_.instr());
+
+ int64_t* stack_pointer = reinterpret_cast<int64_t*>(get_register(sp));
+
int64_t arg0 = get_register(a0);
int64_t arg1 = get_register(a1);
int64_t arg2 = get_register(a2);
int64_t arg3 = get_register(a3);
- int64_t arg4, arg5;
-
- arg4 = get_register(a4); // Abi n64 register a4.
- arg5 = get_register(a5); // Abi n64 register a5.
+ int64_t arg4 = get_register(a4);
+ int64_t arg5 = get_register(a5);
+ int64_t arg6 = get_register(a6);
+ int64_t arg7 = get_register(a7);
+ int64_t arg8 = stack_pointer[0];
+ STATIC_ASSERT(kMaxCParameters == 9);
bool fp_call =
(redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
@@ -2224,14 +2230,13 @@ void Simulator::SoftwareInterrupt() {
PrintF(
"Call to host function at %p "
"args %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
- " , %08" PRIx64 " , %08" PRIx64 " \n",
+ " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
+ " , %08" PRIx64 " \n",
static_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2, arg3,
- arg4, arg5);
+ arg4, arg5, arg6, arg7, arg8);
}
- // int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
- // set_register(v0, static_cast<int32_t>(result));
- // set_register(v1, static_cast<int32_t>(result >> 32));
- ObjectPair result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ ObjectPair result =
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8);
set_register(v0, (int64_t)(result.x));
set_register(v1, (int64_t)(result.y));
}
@@ -4032,73 +4037,101 @@ void Simulator::DecodeTypeRegisterSPECIAL2() {
void Simulator::DecodeTypeRegisterSPECIAL3() {
int64_t alu_out;
switch (instr_.FunctionFieldRaw()) {
- case INS: { // Mips64r2 instruction.
- // Interpret rd field as 5-bit msb of insert.
- uint16_t msb = rd_reg();
- // Interpret sa field as 5-bit lsb of insert.
- uint16_t lsb = sa();
- uint16_t size = msb - lsb + 1;
- uint64_t mask = (1ULL << size) - 1;
- alu_out = static_cast<int32_t>((rt_u() & ~(mask << lsb)) |
- ((rs_u() & mask) << lsb));
- SetResult(rt_reg(), alu_out);
- break;
- }
- case DINS: { // Mips64r2 instruction.
- // Interpret rd field as 5-bit msb of insert.
- uint16_t msb = rd_reg();
- // Interpret sa field as 5-bit lsb of insert.
- uint16_t lsb = sa();
- uint16_t size = msb - lsb + 1;
- uint64_t mask = (1ULL << size) - 1;
- alu_out = (rt_u() & ~(mask << lsb)) | ((rs_u() & mask) << lsb);
- SetResult(rt_reg(), alu_out);
- break;
- }
- case EXT: { // Mips64r2 instruction.
- // Interpret rd field as 5-bit msb of extract.
- uint16_t msb = rd_reg();
+ case EXT: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msbd of extract.
+ uint16_t msbd = rd_reg();
// Interpret sa field as 5-bit lsb of extract.
uint16_t lsb = sa();
- uint16_t size = msb + 1;
+ uint16_t size = msbd + 1;
uint64_t mask = (1ULL << size) - 1;
alu_out = static_cast<int32_t>((rs_u() & (mask << lsb)) >> lsb);
SetResult(rt_reg(), alu_out);
break;
}
case DEXT: { // Mips64r2 instruction.
- // Interpret rd field as 5-bit msb of extract.
- uint16_t msb = rd_reg();
+ // Interpret rd field as 5-bit msbd of extract.
+ uint16_t msbd = rd_reg();
// Interpret sa field as 5-bit lsb of extract.
uint16_t lsb = sa();
- uint16_t size = msb + 1;
+ uint16_t size = msbd + 1;
uint64_t mask = (size == 64) ? UINT64_MAX : (1ULL << size) - 1;
alu_out = static_cast<int64_t>((rs_u() & (mask << lsb)) >> lsb);
SetResult(rt_reg(), alu_out);
break;
}
case DEXTM: {
- // Interpret rd field as 5-bit msb of extract.
- uint16_t msb = rd_reg();
+ // Interpret rd field as 5-bit msbdminus32 of extract.
+ uint16_t msbdminus32 = rd_reg();
// Interpret sa field as 5-bit lsb of extract.
uint16_t lsb = sa();
- uint16_t size = msb + 33;
+ uint16_t size = msbdminus32 + 1 + 32;
uint64_t mask = (size == 64) ? UINT64_MAX : (1ULL << size) - 1;
alu_out = static_cast<int64_t>((rs_u() & (mask << lsb)) >> lsb);
SetResult(rt_reg(), alu_out);
break;
}
case DEXTU: {
- // Interpret rd field as 5-bit msb of extract.
- uint16_t msb = rd_reg();
- // Interpret sa field as 5-bit lsb of extract.
+ // Interpret rd field as 5-bit msbd of extract.
+ uint16_t msbd = rd_reg();
+ // Interpret sa field as 5-bit lsbminus32 of extract and add 32 to get
+ // lsb.
uint16_t lsb = sa() + 32;
- uint16_t size = msb + 1;
+ uint16_t size = msbd + 1;
uint64_t mask = (size == 64) ? UINT64_MAX : (1ULL << size) - 1;
alu_out = static_cast<int64_t>((rs_u() & (mask << lsb)) >> lsb);
SetResult(rt_reg(), alu_out);
break;
}
+ case INS: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg();
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa();
+ uint16_t size = msb - lsb + 1;
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = static_cast<int32_t>((rt_u() & ~(mask << lsb)) |
+ ((rs_u() & mask) << lsb));
+ SetResult(rt_reg(), alu_out);
+ break;
+ }
+ case DINS: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg();
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa();
+ uint16_t size = msb - lsb + 1;
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = (rt_u() & ~(mask << lsb)) | ((rs_u() & mask) << lsb);
+ SetResult(rt_reg(), alu_out);
+ break;
+ }
+ case DINSM: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msbminus32 of insert.
+ uint16_t msbminus32 = rd_reg();
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa();
+ uint16_t size = msbminus32 + 32 - lsb + 1;
+ uint64_t mask;
+ if (size < 64)
+ mask = (1ULL << size) - 1;
+ else
+ mask = std::numeric_limits<uint64_t>::max();
+ alu_out = (rt_u() & ~(mask << lsb)) | ((rs_u() & mask) << lsb);
+ SetResult(rt_reg(), alu_out);
+ break;
+ }
+ case DINSU: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msbminus32 of insert.
+ uint16_t msbminus32 = rd_reg();
+ // Interpret rd field as 5-bit lsbminus32 of insert.
+ uint16_t lsbminus32 = sa();
+ uint16_t lsb = lsbminus32 + 32;
+ uint16_t size = msbminus32 + 32 - lsb + 1;
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = (rt_u() & ~(mask << lsb)) | ((rs_u() & mask) << lsb);
+ SetResult(rt_reg(), alu_out);
+ break;
+ }
case BSHFL: {
int32_t sa = instr_.SaFieldRaw() >> kSaShift;
switch (sa) {
diff --git a/deps/v8/src/mips64/simulator-mips64.h b/deps/v8/src/mips64/simulator-mips64.h
index 6c41ae111a..a9e0d3d118 100644
--- a/deps/v8/src/mips64/simulator-mips64.h
+++ b/deps/v8/src/mips64/simulator-mips64.h
@@ -29,8 +29,6 @@ namespace internal {
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
-// The fifth (or ninth) argument is a dummy that reserves the space used for
-// the return address added by the ExitFrame in native calls.
typedef int (*mips_regexp_matcher)(String* input,
int64_t start_offset,
const byte* input_start,
@@ -39,14 +37,12 @@ typedef int (*mips_regexp_matcher)(String* input,
int64_t output_size,
Address stack_base,
int64_t direct_call,
- void* return_address,
Isolate* isolate);
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
(FUNCTION_CAST<mips_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
- NULL, p8))
-
+ p8))
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on mips uses the C stack, we
@@ -497,7 +493,7 @@ class Simulator {
// Exceptions.
void SignalException(Exception e);
- // Runtime call support.
+ // Runtime call support. Uses the isolate in a thread-safe way.
static void* RedirectExternalReference(Isolate* isolate,
void* external_function,
ExternalReference::Type type);
@@ -560,13 +556,11 @@ class Simulator {
reinterpret_cast<int64_t*>(p1), reinterpret_cast<int64_t*>(p2), \
reinterpret_cast<int64_t*>(p3), reinterpret_cast<int64_t*>(p4)))
-
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
static_cast<int>(Simulator::current(isolate)->Call( \
- entry, 10, p0, p1, p2, p3, p4, reinterpret_cast<int64_t*>(p5), p6, p7, \
- NULL, p8))
-
+ entry, 9, p0, p1, p2, p3, p4, reinterpret_cast<int64_t*>(p5), p6, p7, \
+ p8))
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code. The JS-based limit normally points near the end of
diff --git a/deps/v8/src/objects-body-descriptors-inl.h b/deps/v8/src/objects-body-descriptors-inl.h
index 3f54f6f690..7d83d51a17 100644
--- a/deps/v8/src/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects-body-descriptors-inl.h
@@ -79,7 +79,7 @@ DISABLE_CFI_PERF void BodyDescriptorBase::IteratePointers(HeapObject* obj,
int start_offset,
int end_offset,
ObjectVisitor* v) {
- v->VisitPointers(HeapObject::RawField(obj, start_offset),
+ v->VisitPointers(obj, HeapObject::RawField(obj, start_offset),
HeapObject::RawField(obj, end_offset));
}
@@ -96,7 +96,7 @@ DISABLE_CFI_PERF void BodyDescriptorBase::IteratePointers(Heap* heap,
template <typename ObjectVisitor>
void BodyDescriptorBase::IteratePointer(HeapObject* obj, int offset,
ObjectVisitor* v) {
- v->VisitPointer(HeapObject::RawField(obj, offset));
+ v->VisitPointer(obj, HeapObject::RawField(obj, offset));
}
template <typename StaticVisitor>
@@ -175,7 +175,8 @@ class JSFunction::BodyDescriptorImpl final : public BodyDescriptorBase {
IteratePointers(obj, kPropertiesOffset, kNonWeakFieldsEndOffset, v);
if (body_visiting_policy & kVisitCodeEntry) {
- v->VisitCodeEntry(obj->address() + kCodeEntryOffset);
+ v->VisitCodeEntry(JSFunction::cast(obj),
+ obj->address() + kCodeEntryOffset);
}
if (body_visiting_policy & kVisitNextFunction) {
@@ -209,11 +210,11 @@ class JSFunction::BodyDescriptorImpl final : public BodyDescriptorBase {
class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
public:
STATIC_ASSERT(kByteLengthOffset + kPointerSize == kBackingStoreOffset);
- STATIC_ASSERT(kBackingStoreOffset + kPointerSize == kBitFieldSlot);
+ STATIC_ASSERT(kAllocationLengthOffset + kPointerSize == kBitFieldSlot);
STATIC_ASSERT(kBitFieldSlot + kPointerSize == kSize);
static bool IsValidSlot(HeapObject* obj, int offset) {
- if (offset < kBackingStoreOffset) return true;
+ if (offset < kAllocationLengthOffset) return true;
if (offset < kSize) return false;
return IsValidSlotImpl(obj, offset);
}
@@ -221,6 +222,9 @@ class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
template <typename ObjectVisitor>
static inline void IterateBody(HeapObject* obj, int object_size,
ObjectVisitor* v) {
+ // Array buffers contain raw pointers that the GC does not know about. These
+ // are stored at kBackStoreOffset and later, so we do not iterate over
+ // those.
IteratePointers(obj, kPropertiesOffset, kBackingStoreOffset, v);
IterateBodyImpl(obj, kSize, object_size, v);
}
@@ -228,6 +232,9 @@ class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
template <typename StaticVisitor>
static inline void IterateBody(HeapObject* obj, int object_size) {
Heap* heap = obj->GetHeap();
+ // Array buffers contain raw pointers that the GC does not know about. These
+ // are stored at kBackStoreOffset and later, so we do not iterate over
+ // those.
IteratePointers<StaticVisitor>(heap, obj, kPropertiesOffset,
kBackingStoreOffset);
IterateBodyImpl<StaticVisitor>(heap, obj, kSize, object_size);
@@ -238,6 +245,22 @@ class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
}
};
+class ByteArray::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {}
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {}
+
+ static inline int SizeOf(Map* map, HeapObject* obj) {
+ return reinterpret_cast<ByteArray*>(obj)->ByteArraySize();
+ }
+};
+
class BytecodeArray::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(HeapObject* obj, int offset) {
@@ -266,6 +289,23 @@ class BytecodeArray::BodyDescriptor final : public BodyDescriptorBase {
}
};
+class FixedDoubleArray::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {}
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {}
+
+ static inline int SizeOf(Map* map, HeapObject* obj) {
+ return FixedDoubleArray::SizeFor(
+ reinterpret_cast<FixedDoubleArray*>(obj)->length());
+ }
+};
+
class FixedTypedArrayBase::BodyDescriptor final : public BodyDescriptorBase {
public:
static bool IsValidSlot(HeapObject* obj, int offset) {
@@ -334,8 +374,9 @@ class Foreign::BodyDescriptor final : public BodyDescriptorBase {
template <typename ObjectVisitor>
static inline void IterateBody(HeapObject* obj, int object_size,
ObjectVisitor* v) {
- v->VisitExternalReference(reinterpret_cast<Address*>(
- HeapObject::RawField(obj, kForeignAddressOffset)));
+ v->VisitExternalReference(Foreign::cast(obj),
+ reinterpret_cast<Address*>(HeapObject::RawField(
+ obj, kForeignAddressOffset)));
}
template <typename StaticVisitor>
@@ -408,9 +449,10 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
RelocInfo::kDebugBreakSlotMask;
IteratePointers(obj, kRelocationInfoOffset, kNextCodeLinkOffset, v);
- v->VisitNextCodeLink(HeapObject::RawField(obj, kNextCodeLinkOffset));
+ v->VisitNextCodeLink(Code::cast(obj),
+ HeapObject::RawField(obj, kNextCodeLinkOffset));
- RelocIterator it(reinterpret_cast<Code*>(obj), mode_mask);
+ RelocIterator it(Code::cast(obj), mode_mask);
Isolate* isolate = obj->GetIsolate();
for (; !it.done(); it.next()) {
it.rinfo()->Visit(isolate, v);
@@ -440,7 +482,7 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
StaticVisitor::VisitNextCodeLink(
heap, HeapObject::RawField(obj, kNextCodeLinkOffset));
- RelocIterator it(reinterpret_cast<Code*>(obj), mode_mask);
+ RelocIterator it(Code::cast(obj), mode_mask);
for (; !it.done(); it.next()) {
it.rinfo()->template Visit<StaticVisitor>(heap);
}
@@ -456,6 +498,39 @@ class Code::BodyDescriptor final : public BodyDescriptorBase {
}
};
+class SeqOneByteString::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {}
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {}
+
+ static inline int SizeOf(Map* map, HeapObject* obj) {
+ SeqOneByteString* string = SeqOneByteString::cast(obj);
+ return string->SizeFor(string->length());
+ }
+};
+
+class SeqTwoByteString::BodyDescriptor final : public BodyDescriptorBase {
+ public:
+ static bool IsValidSlot(HeapObject* obj, int offset) { return false; }
+
+ template <typename ObjectVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size,
+ ObjectVisitor* v) {}
+
+ template <typename StaticVisitor>
+ static inline void IterateBody(HeapObject* obj, int object_size) {}
+
+ static inline int SizeOf(Map* map, HeapObject* obj) {
+ SeqTwoByteString* string = SeqTwoByteString::cast(obj);
+ return string->SizeFor(string->length());
+ }
+};
template <typename Op, typename ReturnType, typename T1, typename T2,
typename T3>
diff --git a/deps/v8/src/objects-body-descriptors.h b/deps/v8/src/objects-body-descriptors.h
index b201c20fbb..499c48a930 100644
--- a/deps/v8/src/objects-body-descriptors.h
+++ b/deps/v8/src/objects-body-descriptors.h
@@ -99,8 +99,10 @@ class FixedBodyDescriptor final : public BodyDescriptorBase {
template <typename StaticVisitor>
static inline void IterateBody(HeapObject* obj, int object_size) {
- IterateBody<StaticVisitor>(obj);
+ IterateBody(obj);
}
+
+ static inline int SizeOf(Map* map, HeapObject* object) { return kSize; }
};
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index ce075bf237..273bfa22e4 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -8,6 +8,7 @@
#include "src/bootstrapper.h"
#include "src/disasm.h"
#include "src/disassembler.h"
+#include "src/elements.h"
#include "src/field-type.h"
#include "src/layout-descriptor.h"
#include "src/macro-assembler.h"
@@ -105,12 +106,14 @@ void HeapObject::HeapObjectVerify() {
break;
case JS_OBJECT_TYPE:
case JS_ERROR_TYPE:
- case JS_ARGUMENTS_TYPE:
case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
JSObject::cast(this)->JSObjectVerify();
break;
+ case JS_ARGUMENTS_TYPE:
+ JSArgumentsObject::cast(this)->JSArgumentsObjectVerify();
+ break;
case JS_GENERATOR_OBJECT_TYPE:
JSGeneratorObject::cast(this)->JSGeneratorObjectVerify();
break;
@@ -162,6 +165,7 @@ void HeapObject::HeapObjectVerify() {
case JS_MAP_ITERATOR_TYPE:
JSMapIterator::cast(this)->JSMapIteratorVerify();
break;
+
case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
case JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE:
@@ -326,30 +330,33 @@ void JSObject::JSObjectVerify() {
VerifyHeapPointer(properties());
VerifyHeapPointer(elements());
- if (HasSloppyArgumentsElements()) {
- CHECK(this->elements()->IsFixedArray());
- CHECK_GE(this->elements()->length(), 2);
- }
-
+ CHECK_IMPLIES(HasSloppyArgumentsElements(), IsJSArgumentsObject());
if (HasFastProperties()) {
int actual_unused_property_fields = map()->GetInObjectProperties() +
properties()->length() -
map()->NextFreePropertyIndex();
if (map()->unused_property_fields() != actual_unused_property_fields) {
- // This could actually happen in the middle of StoreTransitionStub
- // when the new extended backing store is already set into the object and
- // the allocation of the MutableHeapNumber triggers GC (in this case map
- // is not updated yet).
- CHECK_EQ(map()->unused_property_fields(),
- actual_unused_property_fields - JSObject::kFieldsAdded);
+ // There are two reasons why this can happen:
+ // - in the middle of StoreTransitionStub when the new extended backing
+ // store is already set into the object and the allocation of the
+ // MutableHeapNumber triggers GC while the map isn't updated yet.
+ // - deletion of the last property can leave additional backing store
+ // capacity behind.
+ CHECK_GT(actual_unused_property_fields, map()->unused_property_fields());
+ int delta =
+ actual_unused_property_fields - map()->unused_property_fields();
+ CHECK_EQ(0, delta % JSObject::kFieldsAdded);
}
DescriptorArray* descriptors = map()->instance_descriptors();
Isolate* isolate = GetIsolate();
+ bool is_transitionable_fast_elements_kind =
+ IsTransitionableFastElementsKind(map()->elements_kind());
+
for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.location() == kField) {
DCHECK_EQ(kData, details.kind());
- Representation r = descriptors->GetDetails(i).representation();
+ Representation r = details.representation();
FieldIndex index = FieldIndex::ForDescriptor(map(), i);
if (IsUnboxedDoubleField(index)) {
DCHECK(r.IsDouble());
@@ -372,6 +379,9 @@ void JSObject::JSObjectVerify() {
CHECK(!field_type->NowStable() || field_type->NowContains(value) ||
(!FLAG_use_allocation_folding && value->IsUndefined(isolate)));
}
+ CHECK_IMPLIES(is_transitionable_fast_elements_kind,
+ !Map::IsInplaceGeneralizableField(details.constness(), r,
+ field_type));
}
}
}
@@ -428,13 +438,6 @@ void Map::VerifyOmittedMapChecks() {
}
-void TypeFeedbackInfo::TypeFeedbackInfoVerify() {
- VerifyObjectField(kStorage1Offset);
- VerifyObjectField(kStorage2Offset);
- VerifyObjectField(kStorage3Offset);
-}
-
-
void AliasedArgumentsEntry::AliasedArgumentsEntryVerify() {
VerifySmiField(kAliasedContextSlot);
}
@@ -474,6 +477,78 @@ void TransitionArray::TransitionArrayVerify() {
next_link()->IsTransitionArray());
}
+void JSArgumentsObject::JSArgumentsObjectVerify() {
+ if (IsSloppyArgumentsElementsKind(GetElementsKind())) {
+ JSSloppyArgumentsObject::cast(this)->JSSloppyArgumentsObjectVerify();
+ }
+ JSObjectVerify();
+}
+
+void JSSloppyArgumentsObject::JSSloppyArgumentsObjectVerify() {
+ Isolate* isolate = GetIsolate();
+ if (!map()->is_dictionary_map()) VerifyObjectField(kCalleeOffset);
+ if (isolate->IsInAnyContext(map(), Context::SLOPPY_ARGUMENTS_MAP_INDEX) ||
+ isolate->IsInAnyContext(map(),
+ Context::SLOW_ALIASED_ARGUMENTS_MAP_INDEX) ||
+ isolate->IsInAnyContext(map(),
+ Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX)) {
+ VerifyObjectField(kLengthOffset);
+ VerifyObjectField(kCalleeOffset);
+ }
+ ElementsKind kind = GetElementsKind();
+ CHECK(IsSloppyArgumentsElementsKind(kind));
+ SloppyArgumentsElements::cast(elements())
+ ->SloppyArgumentsElementsVerify(this);
+}
+
+void SloppyArgumentsElements::SloppyArgumentsElementsVerify(
+ JSSloppyArgumentsObject* holder) {
+ Isolate* isolate = GetIsolate();
+ FixedArrayVerify();
+ // Abort verification if only partially initialized (can't use arguments()
+ // getter because it does FixedArray::cast()).
+ if (get(kArgumentsIndex)->IsUndefined(isolate)) return;
+
+ ElementsKind kind = holder->GetElementsKind();
+ bool is_fast = kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS;
+ CHECK(IsFixedArray());
+ CHECK_GE(length(), 2);
+ CHECK_EQ(map(), isolate->heap()->sloppy_arguments_elements_map());
+ Context* context_object = Context::cast(context());
+ FixedArray* arg_elements = FixedArray::cast(arguments());
+ if (arg_elements->length() == 0) {
+ CHECK(arg_elements == isolate->heap()->empty_fixed_array());
+ return;
+ }
+ int nofMappedParameters =
+ length() - SloppyArgumentsElements::kParameterMapStart;
+ CHECK_LE(nofMappedParameters, context_object->length());
+ CHECK_LE(nofMappedParameters, arg_elements->length());
+ ElementsAccessor* accessor;
+ if (is_fast) {
+ accessor = ElementsAccessor::ForKind(FAST_HOLEY_ELEMENTS);
+ } else {
+ accessor = ElementsAccessor::ForKind(DICTIONARY_ELEMENTS);
+ }
+ for (int i = 0; i < nofMappedParameters; i++) {
+ // Verify that each context-mapped argument is either the hole or a valid
+ // Smi within context length range.
+ Object* mapped = get_mapped_entry(i);
+ if (mapped->IsTheHole(isolate)) {
+ // Slow sloppy arguments can be holey.
+ if (!is_fast) continue;
+ // Fast sloppy arguments elements are never holey. Either the element is
+ // context-mapped or present in the arguments elements.
+ CHECK(accessor->HasElement(holder, i, arg_elements));
+ continue;
+ }
+ Object* value = context_object->get(Smi::cast(mapped)->value());
+ CHECK(value->IsObject());
+ // None of the context-mapped entries should exist in the arguments
+ // elements.
+ CHECK(!accessor->HasElement(holder, i, arg_elements));
+ }
+}
void JSGeneratorObject::JSGeneratorObjectVerify() {
// In an expression like "new g()", there can be a point where a generator
@@ -637,7 +712,6 @@ void SharedFunctionInfo::SharedFunctionInfoVerify() {
VerifyObjectField(kFunctionIdentifierOffset);
VerifyObjectField(kInstanceClassNameOffset);
VerifyObjectField(kNameOffset);
- VerifyObjectField(kOptimizedCodeMapOffset);
VerifyObjectField(kOuterScopeInfoOffset);
VerifyObjectField(kScopeInfoOffset);
VerifyObjectField(kScriptOffset);
@@ -1170,12 +1244,6 @@ void ContextExtension::ContextExtensionVerify() {
VerifyObjectField(kExtensionOffset);
}
-void ConstantElementsPair::ConstantElementsPairVerify() {
- CHECK(IsConstantElementsPair());
- VerifySmiField(kElementsKindOffset);
- VerifyObjectField(kConstantValuesOffset);
-}
-
void AccessorInfo::AccessorInfoVerify() {
CHECK(IsAccessorInfo());
VerifyPointer(name());
@@ -1215,13 +1283,6 @@ void InterceptorInfo::InterceptorInfoVerify() {
}
-void CallHandlerInfo::CallHandlerInfoVerify() {
- CHECK(IsCallHandlerInfo());
- VerifyPointer(callback());
- VerifyPointer(data());
-}
-
-
void TemplateInfo::TemplateInfoVerify() {
VerifyPointer(tag());
VerifyPointer(property_list());
@@ -1298,11 +1359,6 @@ void DebugInfo::DebugInfoVerify() {
}
-void BreakPointInfo::BreakPointInfoVerify() {
- CHECK(IsBreakPointInfo());
- VerifyPointer(break_point_objects());
-}
-
void StackFrameInfo::StackFrameInfoVerify() {
CHECK(IsStackFrameInfo());
VerifyPointer(script_name());
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 66d258f128..406f49d996 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -31,13 +31,13 @@
#include "src/lookup-cache-inl.h"
#include "src/lookup.h"
#include "src/objects.h"
-#include "src/objects/hash-table-inl.h"
#include "src/objects/literal-objects.h"
#include "src/objects/module-info.h"
#include "src/objects/regexp-match-info.h"
#include "src/objects/scope-info.h"
#include "src/property.h"
#include "src/prototype.h"
+#include "src/string-hasher-inl.h"
#include "src/transitions-inl.h"
#include "src/v8memory.h"
@@ -140,10 +140,13 @@ int PropertyDetails::field_width_in_words() const {
return map()->instance_type() == instancetype; \
}
+TYPE_CHECKER(BreakPointInfo, TUPLE2_TYPE)
TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
TYPE_CHECKER(BytecodeArray, BYTECODE_ARRAY_TYPE)
+TYPE_CHECKER(CallHandlerInfo, TUPLE2_TYPE)
TYPE_CHECKER(Cell, CELL_TYPE)
TYPE_CHECKER(Code, CODE_TYPE)
+TYPE_CHECKER(ConstantElementsPair, TUPLE2_TYPE)
TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
TYPE_CHECKER(Foreign, FOREIGN_TYPE)
TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
@@ -179,8 +182,10 @@ TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
TYPE_CHECKER(Oddball, ODDBALL_TYPE)
TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
+TYPE_CHECKER(SourcePositionTableWithFrameCache, TUPLE2_TYPE)
TYPE_CHECKER(Symbol, SYMBOL_TYPE)
TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
+TYPE_CHECKER(TypeFeedbackInfo, TUPLE3_TYPE)
TYPE_CHECKER(WeakCell, WEAK_CELL_TYPE)
TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
@@ -203,6 +208,10 @@ bool HeapObject::IsFixedArray() const {
bool HeapObject::IsSloppyArgumentsElements() const { return IsFixedArray(); }
+bool HeapObject::IsJSSloppyArgumentsObject() const {
+ return IsJSArgumentsObject();
+}
+
bool HeapObject::IsJSGeneratorObject() const {
return map()->instance_type() == JS_GENERATOR_OBJECT_TYPE ||
IsJSAsyncGeneratorObject();
@@ -514,24 +523,6 @@ bool HeapObject::IsNormalizedMapCache() const {
return NormalizedMapCache::IsNormalizedMapCache(this);
}
-int NormalizedMapCache::GetIndex(Handle<Map> map) {
- return map->Hash() % NormalizedMapCache::kEntries;
-}
-
-bool NormalizedMapCache::IsNormalizedMapCache(const HeapObject* obj) {
- if (!obj->IsFixedArray()) return false;
- if (FixedArray::cast(obj)->length() != NormalizedMapCache::kEntries) {
- return false;
- }
-#ifdef VERIFY_HEAP
- if (FLAG_verify_heap) {
- reinterpret_cast<NormalizedMapCache*>(const_cast<HeapObject*>(obj))
- ->NormalizedMapCacheVerify();
- }
-#endif
- return true;
-}
-
bool HeapObject::IsCompilationCacheTable() const { return IsHashTable(); }
bool HeapObject::IsCodeCacheHashTable() const { return IsHashTable(); }
@@ -613,11 +604,14 @@ bool Object::IsMinusZero() const {
CAST_ACCESSOR(AbstractCode)
CAST_ACCESSOR(ArrayList)
CAST_ACCESSOR(BoilerplateDescription)
+CAST_ACCESSOR(BreakPointInfo)
CAST_ACCESSOR(ByteArray)
CAST_ACCESSOR(BytecodeArray)
+CAST_ACCESSOR(CallHandlerInfo)
CAST_ACCESSOR(Cell)
CAST_ACCESSOR(Code)
CAST_ACCESSOR(ConsString)
+CAST_ACCESSOR(ConstantElementsPair)
CAST_ACCESSOR(DeoptimizationInputData)
CAST_ACCESSOR(DeoptimizationOutputData)
CAST_ACCESSOR(DependentCode)
@@ -633,6 +627,7 @@ CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(GlobalDictionary)
CAST_ACCESSOR(HandlerTable)
CAST_ACCESSOR(HeapObject)
+CAST_ACCESSOR(JSArgumentsObject);
CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSArrayBuffer)
CAST_ACCESSOR(JSArrayBufferView)
@@ -656,6 +651,7 @@ CAST_ACCESSOR(JSPromiseCapability)
CAST_ACCESSOR(JSPromise)
CAST_ACCESSOR(JSSet)
CAST_ACCESSOR(JSSetIterator)
+CAST_ACCESSOR(JSSloppyArgumentsObject)
CAST_ACCESSOR(JSAsyncFromSyncIterator)
CAST_ACCESSOR(JSStringIterator)
CAST_ACCESSOR(JSArrayIterator)
@@ -665,7 +661,6 @@ CAST_ACCESSOR(JSWeakCollection)
CAST_ACCESSOR(JSWeakMap)
CAST_ACCESSOR(JSWeakSet)
CAST_ACCESSOR(LayoutDescriptor)
-CAST_ACCESSOR(Map)
CAST_ACCESSOR(ModuleInfo)
CAST_ACCESSOR(Name)
CAST_ACCESSOR(NameDictionary)
@@ -685,7 +680,9 @@ CAST_ACCESSOR(SeqOneByteString)
CAST_ACCESSOR(SeqString)
CAST_ACCESSOR(SeqTwoByteString)
CAST_ACCESSOR(SharedFunctionInfo)
+CAST_ACCESSOR(SourcePositionTableWithFrameCache)
CAST_ACCESSOR(SlicedString)
+CAST_ACCESSOR(SloppyArgumentsElements)
CAST_ACCESSOR(Smi)
CAST_ACCESSOR(String)
CAST_ACCESSOR(StringSet)
@@ -694,11 +691,11 @@ CAST_ACCESSOR(Struct)
CAST_ACCESSOR(Symbol)
CAST_ACCESSOR(TemplateInfo)
CAST_ACCESSOR(ThinString)
+CAST_ACCESSOR(TypeFeedbackInfo)
CAST_ACCESSOR(UnseededNumberDictionary)
CAST_ACCESSOR(WeakCell)
CAST_ACCESSOR(WeakFixedArray)
CAST_ACCESSOR(WeakHashTable)
-CAST_ACCESSOR(SloppyArgumentsElements)
#define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name)
STRUCT_LIST(MAKE_STRUCT_CAST)
@@ -832,6 +829,11 @@ bool String::HasOnlyOneByteChars() {
IsOneByteRepresentation();
}
+bool StringShape::HasOnlyOneByteChars() {
+ return (type_ & kStringEncodingMask) == kOneByteStringTag ||
+ (type_ & kOneByteDataHintMask) == kOneByteDataHintTag;
+}
+
bool StringShape::IsCons() {
return (type_ & kStringRepresentationMask) == kConsStringTag;
}
@@ -965,6 +967,14 @@ class SeqOneByteSubStringKey : public HashTableKey {
DCHECK(string_->IsSeqOneByteString());
}
+// VS 2017 on official builds gives this spurious warning:
+// warning C4789: buffer 'key' of size 16 bytes will be overrun; 4 bytes will
+// be written starting at offset 16
+// https://bugs.chromium.org/p/v8/issues/detail?id=6068
+#if defined(V8_CC_MSVC)
+#pragma warning(push)
+#pragma warning(disable : 4789)
+#endif
uint32_t Hash() override {
DCHECK(length_ >= 0);
DCHECK(from_ + length_ <= string_->length());
@@ -975,6 +985,9 @@ class SeqOneByteSubStringKey : public HashTableKey {
DCHECK(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
}
+#if defined(V8_CC_MSVC)
+#pragma warning(pop)
+#endif
uint32_t HashForObject(Object* other) override {
return String::cast(other)->Hash();
@@ -1090,9 +1103,10 @@ bool Object::ToUint32(uint32_t* value) {
// static
MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
- Handle<Object> object) {
+ Handle<Object> object,
+ const char* method_name) {
if (object->IsJSReceiver()) return Handle<JSReceiver>::cast(object);
- return ToObject(isolate, object, isolate->native_context());
+ return ToObject(isolate, object, isolate->native_context(), method_name);
}
@@ -1270,8 +1284,15 @@ bool JSObject::PrototypeHasNoElements(Isolate* isolate, JSObject* object) {
reinterpret_cast<Object*>(base::NoBarrier_Load( \
reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset))))
+#ifdef V8_CONCURRENT_MARKING
+#define WRITE_FIELD(p, offset, value) \
+ base::NoBarrier_Store( \
+ reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
+ reinterpret_cast<base::AtomicWord>(value));
+#else
#define WRITE_FIELD(p, offset, value) \
(*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
+#endif
#define RELEASE_WRITE_FIELD(p, offset, value) \
base::Release_Store( \
@@ -1445,21 +1466,22 @@ Isolate* HeapObject::GetIsolate() const {
return GetHeap()->isolate();
}
-
Map* HeapObject::map() const {
return map_word().ToMap();
}
void HeapObject::set_map(Map* value) {
+ if (value != nullptr) {
+#ifdef VERIFY_HEAP
+ value->GetHeap()->VerifyObjectLayoutChange(this, value);
+#endif
+ }
set_map_word(MapWord::FromMap(value));
if (value != nullptr) {
// TODO(1600) We are passing NULL as a slot because maps can never be on
// evacuation candidate.
value->GetHeap()->incremental_marking()->RecordWrite(this, nullptr, value);
-#ifdef VERIFY_HEAP
- value->GetHeap()->VerifyObjectLayoutChange(this, value);
-#endif
}
}
@@ -1470,28 +1492,43 @@ Map* HeapObject::synchronized_map() {
void HeapObject::synchronized_set_map(Map* value) {
- synchronized_set_map_word(MapWord::FromMap(value));
if (value != nullptr) {
- // TODO(1600) We are passing NULL as a slot because maps can never be on
- // evacuation candidate.
- value->GetHeap()->incremental_marking()->RecordWrite(this, nullptr, value);
#ifdef VERIFY_HEAP
value->GetHeap()->VerifyObjectLayoutChange(this, value);
#endif
}
-}
-
-
-void HeapObject::synchronized_set_map_no_write_barrier(Map* value) {
synchronized_set_map_word(MapWord::FromMap(value));
+ if (value != nullptr) {
+ // TODO(1600) We are passing NULL as a slot because maps can never be on
+ // evacuation candidate.
+ value->GetHeap()->incremental_marking()->RecordWrite(this, nullptr, value);
+ }
}
// Unsafe accessor omitting write barrier.
void HeapObject::set_map_no_write_barrier(Map* value) {
+ if (value != nullptr) {
+#ifdef VERIFY_HEAP
+ value->GetHeap()->VerifyObjectLayoutChange(this, value);
+#endif
+ }
set_map_word(MapWord::FromMap(value));
}
+void HeapObject::set_map_after_allocation(Map* value, WriteBarrierMode mode) {
+ set_map_word(MapWord::FromMap(value));
+ if (mode != SKIP_WRITE_BARRIER) {
+ DCHECK(value != nullptr);
+ // TODO(1600) We are passing NULL as a slot because maps can never be on
+ // evacuation candidate.
+ value->GetHeap()->incremental_marking()->RecordWrite(this, nullptr, value);
+ }
+}
+
+HeapObject** HeapObject::map_slot() {
+ return reinterpret_cast<HeapObject**>(FIELD_ADDR(this, kMapOffset));
+}
MapWord HeapObject::map_word() const {
return MapWord(
@@ -1952,18 +1989,6 @@ InterceptorInfo* JSObject::GetNamedInterceptor() {
return map()->GetNamedInterceptor();
}
-InterceptorInfo* Map::GetNamedInterceptor() {
- DCHECK(has_named_interceptor());
- FunctionTemplateInfo* info = GetFunctionTemplateInfo();
- return InterceptorInfo::cast(info->named_property_handler());
-}
-
-InterceptorInfo* Map::GetIndexedInterceptor() {
- DCHECK(has_indexed_interceptor());
- FunctionTemplateInfo* info = GetFunctionTemplateInfo();
- return InterceptorInfo::cast(info->indexed_property_handler());
-}
-
double Oddball::to_number_raw() const {
return READ_DOUBLE_FIELD(this, kToNumberRawOffset);
}
@@ -2718,25 +2743,25 @@ inline int DescriptorArray::number_of_entries() {
bool DescriptorArray::HasEnumCache() {
- return !IsEmpty() && !get(kEnumCacheIndex)->IsSmi();
+ return !IsEmpty() && !get(kEnumCacheBridgeIndex)->IsSmi();
}
void DescriptorArray::CopyEnumCacheFrom(DescriptorArray* array) {
- set(kEnumCacheIndex, array->get(kEnumCacheIndex));
+ set(kEnumCacheBridgeIndex, array->get(kEnumCacheBridgeIndex));
}
FixedArray* DescriptorArray::GetEnumCache() {
DCHECK(HasEnumCache());
- FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex));
+ FixedArray* bridge = FixedArray::cast(get(kEnumCacheBridgeIndex));
return FixedArray::cast(bridge->get(kEnumCacheBridgeCacheIndex));
}
bool DescriptorArray::HasEnumIndicesCache() {
if (IsEmpty()) return false;
- Object* object = get(kEnumCacheIndex);
+ Object* object = get(kEnumCacheBridgeIndex);
if (object->IsSmi()) return false;
FixedArray* bridge = FixedArray::cast(object);
return !bridge->get(kEnumCacheBridgeIndicesCacheIndex)->IsSmi();
@@ -2745,17 +2770,11 @@ bool DescriptorArray::HasEnumIndicesCache() {
FixedArray* DescriptorArray::GetEnumIndicesCache() {
DCHECK(HasEnumIndicesCache());
- FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex));
+ FixedArray* bridge = FixedArray::cast(get(kEnumCacheBridgeIndex));
return FixedArray::cast(bridge->get(kEnumCacheBridgeIndicesCacheIndex));
}
-Object** DescriptorArray::GetEnumCacheSlot() {
- DCHECK(HasEnumCache());
- return HeapObject::RawField(reinterpret_cast<HeapObject*>(this),
- kEnumCacheOffset);
-}
-
// Perform a binary search in a fixed array.
template <SearchMode search_mode, typename T>
int BinarySearch(T* array, Name* name, int valid_entries,
@@ -3092,7 +3111,11 @@ void HashTableBase::ElementsRemoved(int n) {
// static
int HashTableBase::ComputeCapacity(int at_least_space_for) {
- int capacity = base::bits::RoundUpToPowerOfTwo32(at_least_space_for * 2);
+ // Add 50% slack to make slot collisions sufficiently unlikely.
+ // See matching computation in HashTable::HasSufficientCapacityToAdd().
+ // Must be kept in sync with CodeStubAssembler::HashTableComputeCapacity().
+ int raw_cap = at_least_space_for + (at_least_space_for >> 1);
+ int capacity = base::bits::RoundUpToPowerOfTwo32(raw_cap);
return Max(capacity, kMinCapacity);
}
@@ -4026,13 +4049,20 @@ int BytecodeArray::parameter_count() const {
ACCESSORS(BytecodeArray, constant_pool, FixedArray, kConstantPoolOffset)
ACCESSORS(BytecodeArray, handler_table, FixedArray, kHandlerTableOffset)
-ACCESSORS(BytecodeArray, source_position_table, ByteArray,
+ACCESSORS(BytecodeArray, source_position_table, Object,
kSourcePositionTableOffset)
Address BytecodeArray::GetFirstBytecodeAddress() {
return reinterpret_cast<Address>(this) - kHeapObjectTag + kHeaderSize;
}
+ByteArray* BytecodeArray::SourcePositionTable() {
+ Object* maybe_table = source_position_table();
+ if (maybe_table->IsByteArray()) return ByteArray::cast(maybe_table);
+ DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
+ return SourcePositionTableWithFrameCache::cast(maybe_table)
+ ->source_position_table();
+}
int BytecodeArray::BytecodeArraySize() { return SizeFor(this->length()); }
@@ -4040,7 +4070,7 @@ int BytecodeArray::SizeIncludingMetadata() {
int size = BytecodeArraySize();
size += constant_pool()->Size();
size += handler_table()->Size();
- size += source_position_table()->Size();
+ size += SourcePositionTable()->Size();
return size;
}
@@ -4152,50 +4182,56 @@ typename Traits::ElementType FixedTypedArray<Traits>::get_scalar(int index) {
template <class Traits>
void FixedTypedArray<Traits>::set(int index, ElementType value) {
- DCHECK((index >= 0) && (index < this->length()));
+ CHECK((index >= 0) && (index < this->length()));
ElementType* ptr = reinterpret_cast<ElementType*>(DataPtr());
ptr[index] = value;
}
-
template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::from_int(int value) {
+typename Traits::ElementType FixedTypedArray<Traits>::from(int value) {
return static_cast<ElementType>(value);
}
-
-template <> inline
-uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from_int(int value) {
+template <>
+inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(int value) {
if (value < 0) return 0;
if (value > 0xFF) return 0xFF;
return static_cast<uint8_t>(value);
}
+template <class Traits>
+typename Traits::ElementType FixedTypedArray<Traits>::from(uint32_t value) {
+ return static_cast<ElementType>(value);
+}
+
+template <>
+inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(uint32_t value) {
+ // We need this special case for Uint32 -> Uint8Clamped, because the highest
+ // Uint32 values will be negative as an int, clamping to 0, rather than 255.
+ if (value > 0xFF) return 0xFF;
+ return static_cast<uint8_t>(value);
+}
template <class Traits>
-typename Traits::ElementType FixedTypedArray<Traits>::from_double(
- double value) {
+typename Traits::ElementType FixedTypedArray<Traits>::from(double value) {
return static_cast<ElementType>(DoubleToInt32(value));
}
-
-template<> inline
-uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from_double(double value) {
+template <>
+inline uint8_t FixedTypedArray<Uint8ClampedArrayTraits>::from(double value) {
// Handle NaNs and less than zero values which clamp to zero.
if (!(value > 0)) return 0;
if (value > 0xFF) return 0xFF;
return static_cast<uint8_t>(lrint(value));
}
-
-template<> inline
-float FixedTypedArray<Float32ArrayTraits>::from_double(double value) {
+template <>
+inline float FixedTypedArray<Float32ArrayTraits>::from(double value) {
return static_cast<float>(value);
}
-
-template<> inline
-double FixedTypedArray<Float64ArrayTraits>::from_double(double value) {
+template <>
+inline double FixedTypedArray<Float64ArrayTraits>::from(double value) {
return value;
}
@@ -4211,10 +4247,10 @@ void FixedTypedArray<Traits>::SetValue(uint32_t index, Object* value) {
ElementType cast_value = Traits::defaultValue();
if (value->IsSmi()) {
int int_value = Smi::cast(value)->value();
- cast_value = from_int(int_value);
+ cast_value = from(int_value);
} else if (value->IsHeapNumber()) {
double double_value = HeapNumber::cast(value)->value();
- cast_value = from_double(double_value);
+ cast_value = from(double_value);
} else {
// Clamp undefined to the default value. All other types have been
// converted to a number type further up in the call chain.
@@ -5253,9 +5289,9 @@ int AbstractCode::instruction_size() {
ByteArray* AbstractCode::source_position_table() {
if (IsCode()) {
- return GetCode()->source_position_table();
+ return GetCode()->SourcePositionTable();
} else {
- return GetBytecodeArray()->source_position_table();
+ return GetBytecodeArray()->SourcePositionTable();
}
}
@@ -5267,6 +5303,20 @@ void AbstractCode::set_source_position_table(ByteArray* source_position_table) {
}
}
+Object* AbstractCode::stack_frame_cache() {
+ Object* maybe_table;
+ if (IsCode()) {
+ maybe_table = GetCode()->source_position_table();
+ } else {
+ maybe_table = GetBytecodeArray()->source_position_table();
+ }
+ if (maybe_table->IsSourcePositionTableWithFrameCache()) {
+ return SourcePositionTableWithFrameCache::cast(maybe_table)
+ ->stack_frame_cache();
+ }
+ return Smi::kZero;
+}
+
int AbstractCode::SizeIncludingMetadata() {
if (IsCode()) {
return GetCode()->SizeIncludingMetadata();
@@ -5464,6 +5514,9 @@ void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
set_constructor_or_backpointer(value, mode);
}
+ACCESSORS(JSArgumentsObject, length, Object, kLengthOffset);
+ACCESSORS(JSSloppyArgumentsObject, callee, Object, kCalleeOffset);
+
ACCESSORS(Map, code_cache, FixedArray, kCodeCacheOffset)
ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset)
ACCESSORS(Map, weak_cell_cache, Object, kWeakCellCacheOffset)
@@ -5763,7 +5816,7 @@ ACCESSORS(Script, source_url, Object, kSourceUrlOffset)
ACCESSORS(Script, source_mapping_url, Object, kSourceMappingUrlOffset)
ACCESSORS_CHECKED(Script, wasm_compiled_module, Object, kEvalFromSharedOffset,
this->type() == TYPE_WASM)
-ACCESSORS(Script, preparsed_scope_data, FixedTypedArrayBase,
+ACCESSORS(Script, preparsed_scope_data, PodArray<uint32_t>,
kPreParsedScopeDataOffset)
Script::CompilationType Script::compilation_type() {
@@ -5838,15 +5891,19 @@ SMI_ACCESSORS(StackFrameInfo, flag, kFlagIndex)
BOOL_ACCESSORS(StackFrameInfo, flag, is_eval, kIsEvalBit)
BOOL_ACCESSORS(StackFrameInfo, flag, is_constructor, kIsConstructorBit)
BOOL_ACCESSORS(StackFrameInfo, flag, is_wasm, kIsWasmBit)
+SMI_ACCESSORS(StackFrameInfo, id, kIdIndex)
+
+ACCESSORS(SourcePositionTableWithFrameCache, source_position_table, ByteArray,
+ kSourcePositionTableIndex)
+ACCESSORS(SourcePositionTableWithFrameCache, stack_frame_cache,
+ UnseededNumberDictionary, kStackFrameCacheIndex)
ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
-ACCESSORS(SharedFunctionInfo, optimized_code_map, FixedArray,
- kOptimizedCodeMapOffset)
ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
ACCESSORS(SharedFunctionInfo, feedback_metadata, FeedbackMetadata,
kFeedbackMetadataOffset)
SMI_ACCESSORS(SharedFunctionInfo, function_literal_id, kFunctionLiteralIdOffset)
-#if TRACE_MAPS
+#if V8_SFI_HAS_UNIQUE_ID
SMI_ACCESSORS(SharedFunctionInfo, unique_id, kUniqueIdOffset)
#endif
ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
@@ -6138,6 +6195,16 @@ bool SharedFunctionInfo::is_compiled() const {
return code() != builtins->builtin(Builtins::kCompileLazy);
}
+int SharedFunctionInfo::GetLength() const {
+ DCHECK(is_compiled());
+ DCHECK(HasLength());
+ return length();
+}
+
+bool SharedFunctionInfo::HasLength() const {
+ DCHECK_IMPLIES(length() < 0, length() == kInvalidLength);
+ return length() != kInvalidLength;
+}
bool SharedFunctionInfo::has_simple_parameters() {
return scope_info()->HasSimpleParameters();
@@ -6349,10 +6416,6 @@ bool SharedFunctionInfo::IsSubjectToDebugging() {
return IsUserJavaScript() && !HasAsmWasmData();
}
-bool SharedFunctionInfo::OptimizedCodeMapIsCleared() const {
- return optimized_code_map() == GetHeap()->empty_fixed_array();
-}
-
FeedbackVector* JSFunction::feedback_vector() const {
DCHECK(feedback_vector_cell()->value()->IsFeedbackVector());
return FeedbackVector::cast(feedback_vector_cell()->value());
@@ -6436,14 +6499,24 @@ void JSFunction::set_code_no_write_barrier(Code* value) {
WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
}
+void JSFunction::ClearOptimizedCodeSlot(const char* reason) {
+ if (has_feedback_vector() && feedback_vector()->has_optimized_code()) {
+ if (FLAG_trace_opt) {
+ PrintF("[evicting entry from optimizing code feedback slot (%s) for ",
+ reason);
+ shared()->ShortPrint();
+ PrintF("]\n");
+ }
+ feedback_vector()->ClearOptimizedCode();
+ }
+}
void JSFunction::ReplaceCode(Code* code) {
bool was_optimized = IsOptimized();
bool is_optimized = code->kind() == Code::OPTIMIZED_FUNCTION;
if (was_optimized && is_optimized) {
- shared()->EvictFromOptimizedCodeMap(this->code(),
- "Replacing with another optimized code");
+ ClearOptimizedCodeSlot("Replacing with another optimized code");
}
set_code(code);
@@ -6666,7 +6739,7 @@ INT_ACCESSORS(Code, constant_pool_offset, kConstantPoolOffset)
CODE_ACCESSORS(relocation_info, ByteArray, kRelocationInfoOffset)
CODE_ACCESSORS(handler_table, FixedArray, kHandlerTableOffset)
CODE_ACCESSORS(deoptimization_data, FixedArray, kDeoptimizationDataOffset)
-CODE_ACCESSORS(source_position_table, ByteArray, kSourcePositionTableOffset)
+CODE_ACCESSORS(source_position_table, Object, kSourcePositionTableOffset)
CODE_ACCESSORS(trap_handler_index, Smi, kTrapHandlerIndex)
CODE_ACCESSORS(raw_type_feedback_info, Object, kTypeFeedbackInfoOffset)
CODE_ACCESSORS(next_code_link, Object, kNextCodeLinkOffset)
@@ -6699,6 +6772,13 @@ void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) {
value, mode);
}
+ByteArray* Code::SourcePositionTable() {
+ Object* maybe_table = source_position_table();
+ if (maybe_table->IsByteArray()) return ByteArray::cast(maybe_table);
+ DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
+ return SourcePositionTableWithFrameCache::cast(maybe_table)
+ ->source_position_table();
+}
uint32_t Code::stub_key() {
DCHECK(IsCodeStubOrIC());
@@ -6765,7 +6845,9 @@ int Code::SizeIncludingMetadata() {
size += relocation_info()->Size();
size += deoptimization_data()->Size();
size += handler_table()->Size();
- if (kind() == FUNCTION) size += source_position_table()->Size();
+ if (kind() == FUNCTION) {
+ size += SourcePositionTable()->Size();
+ }
return size;
}
@@ -6822,6 +6904,31 @@ void JSArrayBuffer::set_backing_store(void* value, WriteBarrierMode mode) {
ACCESSORS(JSArrayBuffer, byte_length, Object, kByteLengthOffset)
+void* JSArrayBuffer::allocation_base() const {
+ intptr_t ptr = READ_INTPTR_FIELD(this, kAllocationBaseOffset);
+ return reinterpret_cast<void*>(ptr);
+}
+
+void JSArrayBuffer::set_allocation_base(void* value, WriteBarrierMode mode) {
+ intptr_t ptr = reinterpret_cast<intptr_t>(value);
+ WRITE_INTPTR_FIELD(this, kAllocationBaseOffset, ptr);
+}
+
+size_t JSArrayBuffer::allocation_length() const {
+ return *reinterpret_cast<const size_t*>(
+ FIELD_ADDR_CONST(this, kAllocationLengthOffset));
+}
+
+void JSArrayBuffer::set_allocation_length(size_t value) {
+ (*reinterpret_cast<size_t*>(FIELD_ADDR(this, kAllocationLengthOffset))) =
+ value;
+}
+
+ArrayBuffer::Allocator::AllocationMode JSArrayBuffer::allocation_mode() const {
+ using AllocationMode = ArrayBuffer::Allocator::AllocationMode;
+ return has_guard_region() ? AllocationMode::kReservation
+ : AllocationMode::kNormal;
+}
void JSArrayBuffer::set_bit_field(uint32_t bits) {
if (kInt32Size != kPointerSize) {
@@ -6844,7 +6951,6 @@ bool JSArrayBuffer::is_external() { return IsExternal::decode(bit_field()); }
void JSArrayBuffer::set_is_external(bool value) {
- DCHECK(!value || !has_guard_region());
set_bit_field(IsExternal::update(bit_field(), value));
}
@@ -6874,7 +6980,7 @@ void JSArrayBuffer::set_is_shared(bool value) {
set_bit_field(IsShared::update(bit_field(), value));
}
-bool JSArrayBuffer::has_guard_region() {
+bool JSArrayBuffer::has_guard_region() const {
return HasGuardRegion::decode(bit_field());
}
@@ -6954,8 +7060,18 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
THROW_NEW_ERROR(isolate, NewTypeError(message), JSTypedArray);
}
- // TODO(caitp): throw if array.[[ViewedArrayBuffer]] is neutered (per v8:4648)
- return Handle<JSTypedArray>::cast(receiver);
+ Handle<JSTypedArray> array = Handle<JSTypedArray>::cast(receiver);
+ if (V8_UNLIKELY(array->WasNeutered())) {
+ const MessageTemplate::Template message =
+ MessageTemplate::kDetachedOperation;
+ Handle<String> operation =
+ isolate->factory()->NewStringFromAsciiChecked(method_name);
+ THROW_NEW_ERROR(isolate, NewTypeError(message, operation), JSTypedArray);
+ }
+
+ // spec describes to return `buffer`, but it may disrupt current
+ // implementations, and it's much useful to return array for now.
+ return array;
}
#ifdef VERIFY_HEAP
@@ -7206,152 +7322,6 @@ bool Name::IsPrivate() {
return this->IsSymbol() && Symbol::cast(this)->is_private();
}
-
-StringHasher::StringHasher(int length, uint32_t seed)
- : length_(length),
- raw_running_hash_(seed),
- array_index_(0),
- is_array_index_(0 < length_ && length_ <= String::kMaxArrayIndexSize),
- is_first_char_(true) {
- DCHECK(FLAG_randomize_hashes || raw_running_hash_ == 0);
-}
-
-
-bool StringHasher::has_trivial_hash() {
- return length_ > String::kMaxHashCalcLength;
-}
-
-
-uint32_t StringHasher::AddCharacterCore(uint32_t running_hash, uint16_t c) {
- running_hash += c;
- running_hash += (running_hash << 10);
- running_hash ^= (running_hash >> 6);
- return running_hash;
-}
-
-
-uint32_t StringHasher::GetHashCore(uint32_t running_hash) {
- running_hash += (running_hash << 3);
- running_hash ^= (running_hash >> 11);
- running_hash += (running_hash << 15);
- if ((running_hash & String::kHashBitMask) == 0) {
- return kZeroHash;
- }
- return running_hash;
-}
-
-
-uint32_t StringHasher::ComputeRunningHash(uint32_t running_hash,
- const uc16* chars, int length) {
- DCHECK_NOT_NULL(chars);
- DCHECK(length >= 0);
- for (int i = 0; i < length; ++i) {
- running_hash = AddCharacterCore(running_hash, *chars++);
- }
- return running_hash;
-}
-
-
-uint32_t StringHasher::ComputeRunningHashOneByte(uint32_t running_hash,
- const char* chars,
- int length) {
- DCHECK_NOT_NULL(chars);
- DCHECK(length >= 0);
- for (int i = 0; i < length; ++i) {
- uint16_t c = static_cast<uint16_t>(*chars++);
- running_hash = AddCharacterCore(running_hash, c);
- }
- return running_hash;
-}
-
-
-void StringHasher::AddCharacter(uint16_t c) {
- // Use the Jenkins one-at-a-time hash function to update the hash
- // for the given character.
- raw_running_hash_ = AddCharacterCore(raw_running_hash_, c);
-}
-
-
-bool StringHasher::UpdateIndex(uint16_t c) {
- DCHECK(is_array_index_);
- if (c < '0' || c > '9') {
- is_array_index_ = false;
- return false;
- }
- int d = c - '0';
- if (is_first_char_) {
- is_first_char_ = false;
- if (c == '0' && length_ > 1) {
- is_array_index_ = false;
- return false;
- }
- }
- if (array_index_ > 429496729U - ((d + 3) >> 3)) {
- is_array_index_ = false;
- return false;
- }
- array_index_ = array_index_ * 10 + d;
- return true;
-}
-
-
-template<typename Char>
-inline void StringHasher::AddCharacters(const Char* chars, int length) {
- DCHECK(sizeof(Char) == 1 || sizeof(Char) == 2);
- int i = 0;
- if (is_array_index_) {
- for (; i < length; i++) {
- AddCharacter(chars[i]);
- if (!UpdateIndex(chars[i])) {
- i++;
- break;
- }
- }
- }
- for (; i < length; i++) {
- DCHECK(!is_array_index_);
- AddCharacter(chars[i]);
- }
-}
-
-
-template <typename schar>
-uint32_t StringHasher::HashSequentialString(const schar* chars,
- int length,
- uint32_t seed) {
- StringHasher hasher(length, seed);
- if (!hasher.has_trivial_hash()) hasher.AddCharacters(chars, length);
- return hasher.GetHashField();
-}
-
-
-IteratingStringHasher::IteratingStringHasher(int len, uint32_t seed)
- : StringHasher(len, seed) {}
-
-
-uint32_t IteratingStringHasher::Hash(String* string, uint32_t seed) {
- IteratingStringHasher hasher(string->length(), seed);
- // Nothing to do.
- if (hasher.has_trivial_hash()) return hasher.GetHashField();
- ConsString* cons_string = String::VisitFlat(&hasher, string);
- if (cons_string == nullptr) return hasher.GetHashField();
- hasher.VisitConsString(cons_string);
- return hasher.GetHashField();
-}
-
-
-void IteratingStringHasher::VisitOneByteString(const uint8_t* chars,
- int length) {
- AddCharacters(chars, length);
-}
-
-
-void IteratingStringHasher::VisitTwoByteString(const uint16_t* chars,
- int length) {
- AddCharacters(chars, length);
-}
-
-
bool Name::AsArrayIndex(uint32_t* index) {
return IsString() && String::cast(this)->AsArrayIndex(index);
}
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 8974f2815c..2ea68863cf 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -599,14 +599,6 @@ void Map::MapPrint(std::ostream& os) { // NOLINT
}
-void TypeFeedbackInfo::TypeFeedbackInfoPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "TypeFeedbackInfo");
- os << "\n - ic_total_count: " << ic_total_count()
- << ", ic_with_type_info_count: " << ic_with_type_info_count()
- << ", ic_generic_count: " << ic_generic_count() << "\n";
-}
-
-
void AliasedArgumentsEntry::AliasedArgumentsEntryPrint(
std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "AliasedArgumentsEntry");
@@ -712,6 +704,8 @@ void FeedbackVector::FeedbackVectorPrint(std::ostream& os) { // NOLINT
return;
}
+ os << "\n Optimized Code: " << Brief(optimized_code());
+
FeedbackMetadataIterator iter(metadata());
while (iter.HasNext()) {
FeedbackSlot slot = iter.Next();
@@ -1100,7 +1094,6 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) { // NOLINT
os << "\n - no debug info";
}
os << "\n - length = " << length();
- os << "\n - optimized_code_map = " << Brief(optimized_code_map());
os << "\n - feedback_metadata = ";
feedback_metadata()->FeedbackMetadataPrint(os);
if (HasBytecodeArray()) {
@@ -1338,14 +1331,6 @@ void ContextExtension::ContextExtensionPrint(std::ostream& os) { // NOLINT
os << "\n";
}
-void ConstantElementsPair::ConstantElementsPairPrint(
- std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "ConstantElementsPair");
- os << "\n - elements_kind: " << static_cast<ElementsKind>(elements_kind());
- os << "\n - constant_values: " << Brief(constant_values());
- os << "\n";
-}
-
void AccessorPair::AccessorPairPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "AccessorPair");
os << "\n - getter: " << Brief(getter());
@@ -1376,14 +1361,6 @@ void InterceptorInfo::InterceptorInfoPrint(std::ostream& os) { // NOLINT
}
-void CallHandlerInfo::CallHandlerInfoPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "CallHandlerInfo");
- os << "\n - callback: " << Brief(callback());
- os << "\n - data: " << Brief(data());
- os << "\n";
-}
-
-
void FunctionTemplateInfo::FunctionTemplateInfoPrint(
std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "FunctionTemplateInfo");
@@ -1485,13 +1462,6 @@ void DebugInfo::DebugInfoPrint(std::ostream& os) { // NOLINT
}
-void BreakPointInfo::BreakPointInfoPrint(std::ostream& os) { // NOLINT
- HeapObject::PrintHeader(os, "BreakPointInfo");
- os << "\n - source_position: " << source_position();
- os << "\n - break_point_objects: " << Brief(break_point_objects());
- os << "\n";
-}
-
void StackFrameInfo::StackFrameInfoPrint(std::ostream& os) { // NOLINT
HeapObject::PrintHeader(os, "StackFrame");
os << "\n - line_number: " << line_number();
@@ -1553,9 +1523,7 @@ void LayoutDescriptor::Print(std::ostream& os) { // NOLINT
#endif // OBJECT_PRINT
-
-#if TRACE_MAPS
-
+#if V8_TRACE_MAPS
void Name::NameShortPrint() {
if (this->IsString()) {
@@ -1586,9 +1554,7 @@ int Name::NameShortPrint(Vector<char> str) {
}
}
-
-#endif // TRACE_MAPS
-
+#endif // V8_TRACE_MAPS
#if defined(DEBUG) || defined(OBJECT_PRINT)
// This method is only meant to be called from gdb for debugging purposes.
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index fe7f0ccdfa..e2748a5216 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -58,6 +58,7 @@
#include "src/objects/code-cache-inl.h"
#include "src/objects/compilation-cache-inl.h"
#include "src/objects/frame-array-inl.h"
+#include "src/objects/map.h"
#include "src/property-descriptor.h"
#include "src/prototype.h"
#include "src/regexp/jsregexp.h"
@@ -108,10 +109,10 @@ Handle<FieldType> Object::OptimalType(Isolate* isolate,
return FieldType::Any(isolate);
}
-
MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
Handle<Object> object,
- Handle<Context> native_context) {
+ Handle<Context> native_context,
+ const char* method_name) {
if (object->IsJSReceiver()) return Handle<JSReceiver>::cast(object);
Handle<JSFunction> constructor;
if (object->IsSmi()) {
@@ -120,6 +121,14 @@ MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
int constructor_function_index =
Handle<HeapObject>::cast(object)->map()->GetConstructorFunctionIndex();
if (constructor_function_index == Map::kNoConstructorFunctionIndex) {
+ if (method_name != nullptr) {
+ THROW_NEW_ERROR(
+ isolate,
+ NewTypeError(
+ MessageTemplate::kCalledOnNullOrUndefined,
+ isolate->factory()->NewStringFromAsciiChecked(method_name)),
+ JSReceiver);
+ }
THROW_NEW_ERROR(isolate,
NewTypeError(MessageTemplate::kUndefinedOrNullToObject),
JSReceiver);
@@ -2054,11 +2063,10 @@ MUST_USE_RESULT Maybe<bool> FastAssign(
if (use_set) {
LookupIterator it(target, next_key, target);
- bool call_to_js = it.IsFound() && it.state() != LookupIterator::DATA;
Maybe<bool> result = Object::SetProperty(
&it, prop_value, STRICT, Object::CERTAINLY_NOT_STORE_FROM_KEYED);
if (result.IsNothing()) return result;
- if (stable && call_to_js) stable = from->map() == *map;
+ if (stable) stable = from->map() == *map;
} else {
if (excluded_properties != nullptr &&
HasExcludedProperty(excluded_properties, next_key)) {
@@ -2468,6 +2476,7 @@ Handle<String> String::SlowFlatten(Handle<ConsString> cons,
bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
+ DisallowHeapAllocation no_allocation;
// Externalizing twice leaks the external resource, so it's
// prohibited by the API.
DCHECK(!this->IsExternalString());
@@ -2490,7 +2499,9 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool is_one_byte = this->IsOneByteRepresentation();
bool is_internalized = this->IsInternalizedString();
bool has_pointers = StringShape(this).IsIndirect();
-
+ if (has_pointers) {
+ heap->NotifyObjectLayoutChange(this, no_allocation);
+ }
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
// string occupies is too small for a regular external string.
@@ -2536,6 +2547,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
+ DisallowHeapAllocation no_allocation;
// Externalizing twice leaks the external resource, so it's
// prohibited by the API.
DCHECK(!this->IsExternalString());
@@ -2563,6 +2575,10 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
bool is_internalized = this->IsInternalizedString();
bool has_pointers = StringShape(this).IsIndirect();
+ if (has_pointers) {
+ heap->NotifyObjectLayoutChange(this, no_allocation);
+ }
+
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
// string occupies is too small for a regular external string.
@@ -3273,6 +3289,16 @@ MaybeHandle<Map> Map::CopyWithField(Handle<Map> map, Handle<Name> name,
if (map->instance_type() == JS_CONTEXT_EXTENSION_OBJECT_TYPE) {
representation = Representation::Tagged();
type = FieldType::Any(isolate);
+ } else if (IsTransitionableFastElementsKind(map->elements_kind()) &&
+ IsInplaceGeneralizableField(constness, representation, *type)) {
+ // We don't support propagation of field generalization through elements
+ // kind transitions because they are inserted into the transition tree
+ // before field transitions. In order to avoid complexity of handling
+ // such a case we ensure that all maps with transitionable elements kinds
+ // do not have fields that can be generalized in-place (without creation
+ // of a new map).
+ DCHECK(representation.IsHeapObject());
+ type = FieldType::Any(isolate);
}
Handle<Object> wrapped_type(WrapFieldType(type));
@@ -3330,8 +3356,8 @@ const char* Representation::Mnemonic() const {
}
bool Map::TransitionRemovesTaggedField(Map* target) {
- int inobject = GetInObjectProperties();
- int target_inobject = target->GetInObjectProperties();
+ int inobject = NumberOfFields();
+ int target_inobject = target->NumberOfFields();
for (int i = target_inobject; i < inobject; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(this, i);
if (!IsUnboxedDoubleField(index)) return true;
@@ -3340,8 +3366,8 @@ bool Map::TransitionRemovesTaggedField(Map* target) {
}
bool Map::TransitionChangesTaggedFieldToUntaggedField(Map* target) {
- int inobject = GetInObjectProperties();
- int target_inobject = target->GetInObjectProperties();
+ int inobject = NumberOfFields();
+ int target_inobject = target->NumberOfFields();
int limit = Min(inobject, target_inobject);
for (int i = 0; i < limit; i++) {
FieldIndex index = FieldIndex::ForPropertyIndex(target, i);
@@ -3458,20 +3484,23 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
}
PropertyDetails details = new_map->GetLastDescriptorDetails();
+ int target_index = details.field_index() - new_map->GetInObjectProperties();
+ bool have_space = old_map->unused_property_fields() > 0 ||
+ (details.location() == kField && target_index >= 0 &&
+ object->properties()->length() > target_index);
// Either new_map adds an kDescriptor property, or a kField property for
// which there is still space, and which does not require a mutable double
// box (an out-of-object double).
if (details.location() == kDescriptor ||
- (old_map->unused_property_fields() > 0 &&
- ((FLAG_unbox_double_fields && object->properties()->length() == 0) ||
- !details.representation().IsDouble()))) {
+ (have_space && ((FLAG_unbox_double_fields && target_index < 0) ||
+ !details.representation().IsDouble()))) {
object->synchronized_set_map(*new_map);
return;
}
// If there is still space in the object, we need to allocate a mutable
// double box.
- if (old_map->unused_property_fields() > 0) {
+ if (have_space) {
FieldIndex index =
FieldIndex::ForDescriptor(*new_map, new_map->LastAdded());
DCHECK(details.representation().IsDouble());
@@ -3498,7 +3527,6 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
}
DCHECK_EQ(kField, details.location());
DCHECK_EQ(kData, details.kind());
- int target_index = details.field_index() - new_map->GetInObjectProperties();
DCHECK(target_index >= 0); // Must be a backing store index.
new_storage->set(target_index, *value);
@@ -3917,7 +3945,6 @@ Handle<Map> Map::CopyGeneralizeAllFields(Handle<Map> map,
return new_map;
}
-
void Map::DeprecateTransitionTree() {
if (is_deprecated()) return;
Object* transitions = raw_transitions();
@@ -4060,7 +4087,6 @@ Handle<FieldType> Map::GeneralizeFieldType(Representation rep1,
return FieldType::Any(isolate);
}
-
// static
void Map::GeneralizeField(Handle<Map> map, int modify_index,
PropertyConstness new_constness,
@@ -4094,8 +4120,8 @@ void Map::GeneralizeField(Handle<Map> map, int modify_index,
// Determine the field owner.
Handle<Map> field_owner(map->FindFieldOwner(modify_index), isolate);
- Handle<DescriptorArray> descriptors(
- field_owner->instance_descriptors(), isolate);
+ Handle<DescriptorArray> descriptors(field_owner->instance_descriptors(),
+ isolate);
DCHECK_EQ(*old_field_type, descriptors->GetFieldType(modify_index));
new_field_type =
@@ -4823,16 +4849,15 @@ int AccessorInfo::AppendUnique(Handle<Object> descriptors,
valid_descriptors);
}
-
-static bool ContainsMap(MapHandleList* maps, Map* map) {
+static bool ContainsMap(MapHandles const& maps, Map* map) {
DCHECK_NOT_NULL(map);
- for (int i = 0; i < maps->length(); ++i) {
- if (!maps->at(i).is_null() && *maps->at(i) == map) return true;
+ for (Handle<Map> current : maps) {
+ if (!current.is_null() && *current == map) return true;
}
return false;
}
-Map* Map::FindElementsKindTransitionedMap(MapHandleList* candidates) {
+Map* Map::FindElementsKindTransitionedMap(MapHandles const& candidates) {
DisallowHeapAllocation no_allocation;
DisallowDeoptimization no_deoptimization(GetIsolate());
@@ -4843,7 +4868,7 @@ Map* Map::FindElementsKindTransitionedMap(MapHandleList* candidates) {
if (IsTransitionableFastElementsKind(kind)) {
// Check the state of the root map.
Map* root_map = FindRootMap();
- if (!EquivalentToForTransition(root_map)) return nullptr;
+ if (!EquivalentToForElementsKindTransition(root_map)) return nullptr;
root_map = root_map->LookupElementsTransitionMap(kind);
DCHECK_NOT_NULL(root_map);
// Starting from the next existing elements kind transition try to
@@ -5290,15 +5315,16 @@ MaybeHandle<Smi> JSFunction::GetLength(Isolate* isolate,
Handle<JSFunction> function) {
int length = 0;
if (function->shared()->is_compiled()) {
- length = function->shared()->length();
+ length = function->shared()->GetLength();
} else {
// If the function isn't compiled yet, the length is not computed
// correctly yet. Compile it now and return the right length.
if (Compiler::Compile(function, Compiler::KEEP_EXCEPTION)) {
- length = function->shared()->length();
+ length = function->shared()->GetLength();
}
if (isolate->has_pending_exception()) return MaybeHandle<Smi>();
}
+ DCHECK_GE(length, 0);
return handle(Smi::FromInt(length), isolate);
}
@@ -5690,7 +5716,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
NotifyMapChange(old_map, new_map, isolate);
-#if TRACE_MAPS
+#if V8_TRACE_MAPS
if (FLAG_trace_maps) {
PrintF("[TraceMaps: SlowToFast from= %p to= %p reason= %s ]\n",
reinterpret_cast<void*>(*old_map), reinterpret_cast<void*>(*new_map),
@@ -5822,14 +5848,13 @@ Handle<SeededNumberDictionary> JSObject::NormalizeElements(
Handle<JSObject> object) {
DCHECK(!object->HasFixedTypedArrayElements());
Isolate* isolate = object->GetIsolate();
- bool is_arguments = object->HasSloppyArgumentsElements();
+ bool is_sloppy_arguments = object->HasSloppyArgumentsElements();
{
DisallowHeapAllocation no_gc;
FixedArrayBase* elements = object->elements();
- if (is_arguments) {
- FixedArray* parameter_map = FixedArray::cast(elements);
- elements = FixedArrayBase::cast(parameter_map->get(1));
+ if (is_sloppy_arguments) {
+ elements = SloppyArgumentsElements::cast(elements)->arguments();
}
if (elements->IsDictionary()) {
@@ -5846,7 +5871,7 @@ Handle<SeededNumberDictionary> JSObject::NormalizeElements(
object->GetElementsAccessor()->Normalize(object);
// Switch to using the dictionary as the backing storage for elements.
- ElementsKind target_kind = is_arguments
+ ElementsKind target_kind = is_sloppy_arguments
? SLOW_SLOPPY_ARGUMENTS_ELEMENTS
: object->HasFastStringWrapperElements()
? SLOW_STRING_WRAPPER_ELEMENTS
@@ -5855,8 +5880,9 @@ Handle<SeededNumberDictionary> JSObject::NormalizeElements(
// Set the new map first to satify the elements type assert in set_elements().
JSObject::MigrateToMap(object, new_map);
- if (is_arguments) {
- FixedArray::cast(object->elements())->set(1, *dictionary);
+ if (is_sloppy_arguments) {
+ SloppyArgumentsElements::cast(object->elements())
+ ->set_arguments(*dictionary);
} else {
object->set_elements(*dictionary);
}
@@ -8646,7 +8672,7 @@ Handle<Map> Map::Normalize(Handle<Map> fast_map, PropertyNormalizationMode mode,
cache->Set(fast_map, new_map);
isolate->counters()->maps_normalized()->Increment();
}
-#if TRACE_MAPS
+#if V8_TRACE_MAPS
if (FLAG_trace_maps) {
PrintF("[TraceMaps: Normalize from= %p to= %p reason= %s ]\n",
reinterpret_cast<void*>(*fast_map),
@@ -8695,9 +8721,8 @@ Handle<Map> Map::TransitionToImmutableProto(Handle<Map> map) {
return new_map;
}
-Handle<Map> Map::CopyInitialMap(Handle<Map> map, int instance_size,
- int in_object_properties,
- int unused_property_fields) {
+namespace {
+void EnsureInitialMap(Handle<Map> map) {
#ifdef DEBUG
Isolate* isolate = map->GetIsolate();
// Strict function maps have Function as a constructor but the
@@ -8715,7 +8740,21 @@ Handle<Map> Map::CopyInitialMap(Handle<Map> map, int instance_size,
DCHECK(map->owns_descriptors());
DCHECK_EQ(map->NumberOfOwnDescriptors(),
map->instance_descriptors()->number_of_descriptors());
+}
+} // namespace
+// static
+Handle<Map> Map::CopyInitialMapNormalized(Handle<Map> map,
+ PropertyNormalizationMode mode) {
+ EnsureInitialMap(map);
+ return CopyNormalized(map, mode);
+}
+
+// static
+Handle<Map> Map::CopyInitialMap(Handle<Map> map, int instance_size,
+ int in_object_properties,
+ int unused_property_fields) {
+ EnsureInitialMap(map);
Handle<Map> result = RawCopy(map, instance_size);
// Please note instance_type and instance_size are set when allocated.
@@ -8792,8 +8831,7 @@ Handle<Map> Map::ShareDescriptor(Handle<Map> map,
return result;
}
-
-#if TRACE_MAPS
+#if V8_TRACE_MAPS
// static
void Map::TraceTransition(const char* what, Map* from, Map* to, Name* name) {
@@ -8818,18 +8856,11 @@ void Map::TraceAllTransitions(Map* map) {
}
}
-#endif // TRACE_MAPS
-
+#endif // V8_TRACE_MAPS
void Map::ConnectTransition(Handle<Map> parent, Handle<Map> child,
Handle<Name> name, SimpleTransitionFlag flag) {
- Isolate* isolate = parent->GetIsolate();
- // Do not track transitions during bootstrap except for element transitions.
- if (isolate->bootstrapper()->IsActive() &&
- !name.is_identical_to(isolate->factory()->elements_transition_symbol())) {
- return;
- }
- if (!parent->GetBackPointer()->IsUndefined(isolate)) {
+ if (!parent->GetBackPointer()->IsUndefined(parent->GetIsolate())) {
parent->set_owns_descriptors(false);
} else {
// |parent| is initial map and it must keep the ownership, there must be no
@@ -8840,12 +8871,12 @@ void Map::ConnectTransition(Handle<Map> parent, Handle<Map> child,
}
if (parent->is_prototype_map()) {
DCHECK(child->is_prototype_map());
-#if TRACE_MAPS
+#if V8_TRACE_MAPS
Map::TraceTransition("NoTransition", *parent, *child, *name);
#endif
} else {
TransitionArray::Insert(parent, name, child, flag);
-#if TRACE_MAPS
+#if V8_TRACE_MAPS
Map::TraceTransition("Transition", *parent, *child, *name);
#endif
}
@@ -8877,7 +8908,7 @@ Handle<Map> Map::CopyReplaceDescriptors(
} else {
result->InitializeDescriptors(*descriptors, *layout_descriptor);
}
-#if TRACE_MAPS
+#if V8_TRACE_MAPS
if (FLAG_trace_maps &&
// Mirror conditions above that did not call ConnectTransition().
(map->is_prototype_map() ||
@@ -9077,7 +9108,7 @@ Handle<Map> Map::CopyForTransition(Handle<Map> map, const char* reason) {
new_map->InitializeDescriptors(*new_descriptors, *new_layout_descriptor);
}
-#if TRACE_MAPS
+#if V8_TRACE_MAPS
if (FLAG_trace_maps) {
PrintF("[TraceMaps: CopyForTransition from= %p to= %p reason= %s ]\n",
reinterpret_cast<void*>(*map), reinterpret_cast<void*>(*new_map),
@@ -9260,7 +9291,7 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
if (!maybe_map.ToHandle(&result)) {
Isolate* isolate = name->GetIsolate();
const char* reason = "TooManyFastProperties";
-#if TRACE_MAPS
+#if V8_TRACE_MAPS
std::unique_ptr<ScopedVector<char>> buffer;
if (FLAG_trace_maps) {
ScopedVector<char> name_buffer(100);
@@ -10156,11 +10187,13 @@ Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
factory->NewFixedArray(LengthFor(size), pretenure);
result->set(kDescriptorLengthIndex, Smi::FromInt(number_of_descriptors));
- result->set(kEnumCacheIndex, Smi::kZero);
+ result->set(kEnumCacheBridgeIndex, Smi::kZero);
return Handle<DescriptorArray>::cast(result);
}
-void DescriptorArray::ClearEnumCache() { set(kEnumCacheIndex, Smi::kZero); }
+void DescriptorArray::ClearEnumCache() {
+ set(kEnumCacheBridgeIndex, Smi::kZero);
+}
void DescriptorArray::Replace(int index, Descriptor* descriptor) {
descriptor->SetSortedKeyIndex(GetSortedKeyIndex(index));
@@ -10180,14 +10213,14 @@ void DescriptorArray::SetEnumCache(Handle<DescriptorArray> descriptors,
bridge_storage = *isolate->factory()->NewFixedArray(
DescriptorArray::kEnumCacheBridgeLength);
} else {
- bridge_storage = FixedArray::cast(descriptors->get(kEnumCacheIndex));
+ bridge_storage = FixedArray::cast(descriptors->get(kEnumCacheBridgeIndex));
}
bridge_storage->set(kEnumCacheBridgeCacheIndex, *new_cache);
bridge_storage->set(
kEnumCacheBridgeIndicesCacheIndex,
new_index_cache.is_null() ? Object::cast(Smi::kZero) : *new_index_cache);
if (needs_new_enum_cache) {
- descriptors->set(kEnumCacheIndex, bridge_storage);
+ descriptors->set(kEnumCacheBridgeIndex, bridge_storage);
}
}
@@ -10667,20 +10700,17 @@ char* Relocatable::RestoreState(Isolate* isolate, char* from) {
return from + ArchiveSpacePerThread();
}
-
-char* Relocatable::Iterate(ObjectVisitor* v, char* thread_storage) {
+char* Relocatable::Iterate(RootVisitor* v, char* thread_storage) {
Relocatable* top = *reinterpret_cast<Relocatable**>(thread_storage);
Iterate(v, top);
return thread_storage + ArchiveSpacePerThread();
}
-
-void Relocatable::Iterate(Isolate* isolate, ObjectVisitor* v) {
+void Relocatable::Iterate(Isolate* isolate, RootVisitor* v) {
Iterate(v, isolate->relocatable_top());
}
-
-void Relocatable::Iterate(ObjectVisitor* v, Relocatable* top) {
+void Relocatable::Iterate(RootVisitor* v, Relocatable* top) {
Relocatable* current = top;
while (current != NULL) {
current->IterateInstance(v);
@@ -11446,8 +11476,10 @@ int String::IndexOf(Isolate* isolate, Handle<String> receiver,
}
MaybeHandle<String> String::GetSubstitution(Isolate* isolate, Match* match,
- Handle<String> replacement) {
+ Handle<String> replacement,
+ int start_index) {
DCHECK_IMPLIES(match->HasNamedCaptures(), FLAG_harmony_regexp_named_captures);
+ DCHECK_GE(start_index, 0);
Factory* factory = isolate->factory();
@@ -11458,7 +11490,8 @@ MaybeHandle<String> String::GetSubstitution(Isolate* isolate, Match* match,
Handle<String> dollar_string =
factory->LookupSingleCharacterStringFromCode('$');
- int next_dollar_ix = String::IndexOf(isolate, replacement, dollar_string, 0);
+ int next_dollar_ix =
+ String::IndexOf(isolate, replacement, dollar_string, start_index);
if (next_dollar_ix < 0) {
return replacement;
}
@@ -12023,7 +12056,7 @@ bool Map::EquivalentToForTransition(Map* other) {
if (!CheckEquivalent(this, other)) return false;
if (instance_type() == JS_FUNCTION_TYPE) {
// JSFunctions require more checks to ensure that sloppy function is
- // not equvalent to strict function.
+ // not equivalent to strict function.
int nof = Min(NumberOfOwnDescriptors(), other->NumberOfOwnDescriptors());
return instance_descriptors()->IsEqualUpTo(other->instance_descriptors(),
nof);
@@ -12031,6 +12064,25 @@ bool Map::EquivalentToForTransition(Map* other) {
return true;
}
+bool Map::EquivalentToForElementsKindTransition(Map* other) {
+ if (!EquivalentToForTransition(other)) return false;
+#ifdef DEBUG
+ // Ensure that we don't try to generate elements kind transitions from maps
+ // with fields that may be generalized in-place. This must already be handled
+ // during addition of a new field.
+ DescriptorArray* descriptors = instance_descriptors();
+ int nof = NumberOfOwnDescriptors();
+ for (int i = 0; i < nof; i++) {
+ PropertyDetails details = descriptors->GetDetails(i);
+ if (details.location() == kField) {
+ DCHECK(!IsInplaceGeneralizableField(details.constness(),
+ details.representation(),
+ descriptors->GetFieldType(i)));
+ }
+ }
+#endif
+ return true;
+}
bool Map::EquivalentToForNormalization(Map* other,
PropertyNormalizationMode mode) {
@@ -12103,125 +12155,6 @@ void JSFunction::AttemptConcurrentOptimization() {
}
// static
-void SharedFunctionInfo::AddToOptimizedCodeMap(
- Handle<SharedFunctionInfo> shared, Handle<Context> native_context,
- Handle<Code> code, BailoutId osr_ast_id) {
- Isolate* isolate = shared->GetIsolate();
- if (isolate->serializer_enabled()) return;
- DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
- DCHECK(native_context->IsNativeContext());
- STATIC_ASSERT(kEntryLength == 2);
- Handle<FixedArray> new_code_map;
- int entry;
-
- if (!osr_ast_id.IsNone()) {
- Context::AddToOptimizedCodeMap(native_context, shared, code, osr_ast_id);
- return;
- }
-
- DCHECK(osr_ast_id.IsNone());
- if (shared->OptimizedCodeMapIsCleared()) {
- new_code_map = isolate->factory()->NewFixedArray(kInitialLength, TENURED);
- entry = kEntriesStart;
- } else {
- Handle<FixedArray> old_code_map(shared->optimized_code_map(), isolate);
- entry = shared->SearchOptimizedCodeMapEntry(*native_context);
- if (entry >= kEntriesStart) {
- // Just set the code of the entry.
- Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
- old_code_map->set(entry + kCachedCodeOffset, *code_cell);
- return;
- }
-
- // Can we reuse an entry?
- DCHECK(entry < kEntriesStart);
- int length = old_code_map->length();
- for (int i = kEntriesStart; i < length; i += kEntryLength) {
- if (WeakCell::cast(old_code_map->get(i + kContextOffset))->cleared()) {
- new_code_map = old_code_map;
- entry = i;
- break;
- }
- }
-
- if (entry < kEntriesStart) {
- // Copy old optimized code map and append one new entry.
- new_code_map = isolate->factory()->CopyFixedArrayAndGrow(
- old_code_map, kEntryLength, TENURED);
- // TODO(mstarzinger): Temporary workaround. The allocation above might
- // have flushed the optimized code map and the copy we created is full of
- // holes. For now we just give up on adding the entry and pretend it got
- // flushed.
- if (shared->OptimizedCodeMapIsCleared()) return;
- entry = old_code_map->length();
- }
- }
-
- Handle<WeakCell> code_cell = isolate->factory()->NewWeakCell(code);
- WeakCell* context_cell = native_context->self_weak_cell();
-
- new_code_map->set(entry + kContextOffset, context_cell);
- new_code_map->set(entry + kCachedCodeOffset, *code_cell);
-
-#ifdef DEBUG
- for (int i = kEntriesStart; i < new_code_map->length(); i += kEntryLength) {
- WeakCell* cell = WeakCell::cast(new_code_map->get(i + kContextOffset));
- DCHECK(cell->cleared() || cell->value()->IsNativeContext());
- cell = WeakCell::cast(new_code_map->get(i + kCachedCodeOffset));
- DCHECK(cell->cleared() ||
- (cell->value()->IsCode() &&
- Code::cast(cell->value())->kind() == Code::OPTIMIZED_FUNCTION));
- }
-#endif
-
- FixedArray* old_code_map = shared->optimized_code_map();
- if (old_code_map != *new_code_map) {
- shared->set_optimized_code_map(*new_code_map);
- }
-}
-
-
-void SharedFunctionInfo::ClearOptimizedCodeMap() {
- FixedArray* empty_fixed_array = GetHeap()->empty_fixed_array();
- set_optimized_code_map(empty_fixed_array, SKIP_WRITE_BARRIER);
-}
-
-
-void SharedFunctionInfo::EvictFromOptimizedCodeMap(Code* optimized_code,
- const char* reason) {
- DisallowHeapAllocation no_gc;
- Isolate* isolate = GetIsolate();
- bool found = false;
-
- if (!OptimizedCodeMapIsCleared()) {
- Heap* heap = isolate->heap();
- FixedArray* code_map = optimized_code_map();
- int length = code_map->length();
- for (int src = kEntriesStart; src < length; src += kEntryLength) {
- DCHECK(WeakCell::cast(code_map->get(src))->cleared() ||
- WeakCell::cast(code_map->get(src))->value()->IsNativeContext());
- found = WeakCell::cast(code_map->get(src + kCachedCodeOffset))->value() ==
- optimized_code;
- if (found) {
- if (FLAG_trace_opt) {
- PrintF("[evicting entry from optimizing code map (%s) for ", reason);
- ShortPrint();
- PrintF("]\n");
- }
- // Just clear the code.
- code_map->set(src + kCachedCodeOffset, heap->empty_weak_cell(),
- SKIP_WRITE_BARRIER);
- }
- }
- }
-
- if (!found) {
- // We didn't find the code in here. It must be osr'd code.
- isolate->EvictOSROptimizedCode(optimized_code, reason);
- }
-}
-
-// static
void JSFunction::EnsureLiterals(Handle<JSFunction> function) {
Handle<SharedFunctionInfo> shared(function->shared());
Isolate* isolate = shared->GetIsolate();
@@ -12626,13 +12559,10 @@ Handle<Object> CacheInitialJSArrayMaps(
return initial_map;
}
+namespace {
-void JSFunction::SetInstancePrototype(Handle<JSFunction> function,
- Handle<Object> value) {
- Isolate* isolate = function->GetIsolate();
-
- DCHECK(value->IsJSReceiver());
-
+void SetInstancePrototype(Isolate* isolate, Handle<JSFunction> function,
+ Handle<JSReceiver> value) {
// Now some logic for the maps of the objects that are created by using this
// function as a constructor.
if (function->has_initial_map()) {
@@ -12683,12 +12613,14 @@ void JSFunction::SetInstancePrototype(Handle<JSFunction> function,
isolate->heap()->ClearInstanceofCache();
}
+} // anonymous namespace
void JSFunction::SetPrototype(Handle<JSFunction> function,
Handle<Object> value) {
DCHECK(function->IsConstructor() ||
IsGeneratorFunction(function->shared()->kind()));
- Handle<Object> construct_prototype = value;
+ Isolate* isolate = function->GetIsolate();
+ Handle<JSReceiver> construct_prototype;
// If the value is not a JSReceiver, store the value in the map's
// constructor field so it can be accessed. Also, set the prototype
@@ -12703,23 +12635,23 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
JSObject::MigrateToMap(function, new_map);
new_map->SetConstructor(*value);
new_map->set_non_instance_prototype(true);
- Isolate* isolate = new_map->GetIsolate();
FunctionKind kind = function->shared()->kind();
Handle<Context> native_context(function->context()->native_context());
- construct_prototype =
- handle(IsGeneratorFunction(kind)
- ? IsAsyncFunction(kind)
- ? native_context->initial_async_generator_prototype()
- : native_context->initial_generator_prototype()
- : native_context->initial_object_prototype(),
- isolate);
+ construct_prototype = Handle<JSReceiver>(
+ IsGeneratorFunction(kind)
+ ? IsAsyncFunction(kind)
+ ? native_context->initial_async_generator_prototype()
+ : native_context->initial_generator_prototype()
+ : native_context->initial_object_prototype(),
+ isolate);
} else {
+ construct_prototype = Handle<JSReceiver>::cast(value);
function->map()->set_non_instance_prototype(false);
}
- return SetInstancePrototype(function, construct_prototype);
+ SetInstancePrototype(isolate, function, construct_prototype);
}
@@ -12753,7 +12685,7 @@ void JSFunction::SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
}
function->set_prototype_or_initial_map(*map);
map->SetConstructor(*function);
-#if TRACE_MAPS
+#if V8_TRACE_MAPS
if (FLAG_trace_maps) {
PrintF("[TraceMaps: InitialMap map= %p SFI= %d_%s ]\n",
reinterpret_cast<void*>(*map), function->shared()->unique_id(),
@@ -13399,11 +13331,6 @@ bool Script::HasPreparsedScopeData() const {
return preparsed_scope_data()->length() > 0;
}
-Handle<FixedUint32Array> Script::GetPreparsedScopeData() const {
- return Handle<FixedUint32Array>::cast(
- Handle<FixedTypedArrayBase>(preparsed_scope_data()));
-}
-
SharedFunctionInfo::ScriptIterator::ScriptIterator(Handle<Script> script)
: ScriptIterator(script->GetIsolate(),
handle(script->shared_function_infos())) {}
@@ -13745,7 +13672,6 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
Handle<SharedFunctionInfo> shared_info, FunctionLiteral* lit) {
// When adding fields here, make sure DeclarationScope::AnalyzePartially is
// updated accordingly.
- shared_info->set_length(lit->function_length());
shared_info->set_internal_formal_parameter_count(lit->parameter_count());
shared_info->set_function_token_position(lit->function_token_position());
shared_info->set_start_position(lit->start_position());
@@ -13771,8 +13697,14 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
// SetSharedFunctionFlagsFromLiteral (compiler.cc), when the function is
// really parsed and compiled.
if (lit->body() != nullptr) {
+ shared_info->set_length(lit->function_length());
shared_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
shared_info->SetExpectedNofPropertiesFromEstimate(lit);
+ } else {
+ // Set an invalid length for lazy functions. This way we can set the correct
+ // value after compiling, but avoid overwriting values set manually by the
+ // bootstrapper.
+ shared_info->set_length(SharedFunctionInfo::kInvalidLength);
}
}
@@ -13835,134 +13767,58 @@ void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
}
}
-int SharedFunctionInfo::SearchOptimizedCodeMapEntry(Context* native_context) {
- DisallowHeapAllocation no_gc;
- DCHECK(native_context->IsNativeContext());
- if (!OptimizedCodeMapIsCleared()) {
- FixedArray* optimized_code_map = this->optimized_code_map();
- int length = optimized_code_map->length();
- for (int i = kEntriesStart; i < length; i += kEntryLength) {
- if (WeakCell::cast(optimized_code_map->get(i + kContextOffset))
- ->value() == native_context) {
- return i;
- }
- }
- }
- return -1;
-}
-
-void SharedFunctionInfo::ClearCodeFromOptimizedCodeMap() {
- if (!OptimizedCodeMapIsCleared()) {
- FixedArray* optimized_code_map = this->optimized_code_map();
- int length = optimized_code_map->length();
- WeakCell* empty_weak_cell = GetHeap()->empty_weak_cell();
- for (int i = kEntriesStart; i < length; i += kEntryLength) {
- optimized_code_map->set(i + kCachedCodeOffset, empty_weak_cell,
- SKIP_WRITE_BARRIER);
- }
- }
-}
-
-Code* SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context,
- BailoutId osr_ast_id) {
- Code* result = nullptr;
- if (!osr_ast_id.IsNone()) {
- return native_context->SearchOptimizedCodeMap(this, osr_ast_id);
- }
-
- DCHECK(osr_ast_id.IsNone());
- int entry = SearchOptimizedCodeMapEntry(native_context);
- if (entry != kNotFound) {
- FixedArray* code_map = optimized_code_map();
- DCHECK_LE(entry + kEntryLength, code_map->length());
- WeakCell* cell = WeakCell::cast(code_map->get(entry + kCachedCodeOffset));
- result = cell->cleared() ? nullptr : Code::cast(cell->value());
- }
- return result;
-}
-
-
-#define DECLARE_TAG(ignore1, name, ignore2) name,
-const char* const VisitorSynchronization::kTags[
- VisitorSynchronization::kNumberOfSyncTags] = {
- VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_TAG)
-};
-#undef DECLARE_TAG
-
-
-#define DECLARE_TAG(ignore1, ignore2, name) name,
-const char* const VisitorSynchronization::kTagNames[
- VisitorSynchronization::kNumberOfSyncTags] = {
- VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_TAG)
-};
-#undef DECLARE_TAG
-
-
-void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
+void ObjectVisitor::VisitCodeTarget(Code* host, RelocInfo* rinfo) {
DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
Object* old_pointer = Code::GetCodeFromTargetAddress(rinfo->target_address());
Object* new_pointer = old_pointer;
- VisitPointer(&new_pointer);
+ VisitPointer(host, &new_pointer);
DCHECK_EQ(old_pointer, new_pointer);
}
-
-void ObjectVisitor::VisitCodeAgeSequence(RelocInfo* rinfo) {
+void ObjectVisitor::VisitCodeAgeSequence(Code* host, RelocInfo* rinfo) {
DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
Object* old_pointer = rinfo->code_age_stub();
Object* new_pointer = old_pointer;
if (old_pointer != nullptr) {
- VisitPointer(&new_pointer);
+ VisitPointer(host, &new_pointer);
DCHECK_EQ(old_pointer, new_pointer);
}
}
-
-void ObjectVisitor::VisitCodeEntry(Address entry_address) {
+void ObjectVisitor::VisitCodeEntry(JSFunction* host, Address entry_address) {
Object* old_pointer = Code::GetObjectFromEntryAddress(entry_address);
Object* new_pointer = old_pointer;
- VisitPointer(&new_pointer);
+ VisitPointer(host, &new_pointer);
DCHECK_EQ(old_pointer, new_pointer);
}
-
-void ObjectVisitor::VisitCell(RelocInfo* rinfo) {
+void ObjectVisitor::VisitCellPointer(Code* host, RelocInfo* rinfo) {
DCHECK(rinfo->rmode() == RelocInfo::CELL);
Object* old_pointer = rinfo->target_cell();
Object* new_pointer = old_pointer;
- VisitPointer(&new_pointer);
+ VisitPointer(host, &new_pointer);
DCHECK_EQ(old_pointer, new_pointer);
}
-
-void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
+void ObjectVisitor::VisitDebugTarget(Code* host, RelocInfo* rinfo) {
DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
rinfo->IsPatchedDebugBreakSlotSequence());
Object* old_pointer =
Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
Object* new_pointer = old_pointer;
- VisitPointer(&new_pointer);
+ VisitPointer(host, &new_pointer);
DCHECK_EQ(old_pointer, new_pointer);
}
-
-void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
+void ObjectVisitor::VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
Object* old_pointer = rinfo->target_object();
Object* new_pointer = old_pointer;
- VisitPointer(&new_pointer);
+ VisitPointer(host, &new_pointer);
DCHECK_EQ(old_pointer, new_pointer);
}
-void ObjectVisitor::VisitExternalReference(RelocInfo* rinfo) {
- Address old_reference = rinfo->target_external_reference();
- Address new_reference = old_reference;
- VisitExternalReference(&new_reference);
- DCHECK_EQ(old_reference, new_reference);
-}
-
-
void Code::InvalidateRelocation() {
InvalidateEmbeddedObjects();
set_relocation_info(GetHeap()->empty_byte_array());
@@ -14018,7 +13874,6 @@ void Code::CopyFrom(const CodeDesc& desc) {
static_cast<size_t>(desc.reloc_size));
// unbox handles and relocate
- intptr_t delta = instruction_start() - desc.buffer;
int mode_mask = RelocInfo::kCodeTargetMask |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::ModeMask(RelocInfo::CELL) |
@@ -14038,8 +13893,8 @@ void Code::CopyFrom(const CodeDesc& desc) {
it.rinfo()->set_target_cell(*cell, UPDATE_WRITE_BARRIER,
SKIP_ICACHE_FLUSH);
} else if (RelocInfo::IsCodeTarget(mode)) {
- // rewrite code handles in inline cache targets to direct
- // pointers to the first instruction in the code object
+ // rewrite code handles to direct pointers to the first instruction in the
+ // code object
Handle<Object> p = it.rinfo()->target_object_handle(origin);
Code* code = Code::cast(*p);
it.rinfo()->set_target_address(GetIsolate(), code->instruction_start(),
@@ -14053,6 +13908,7 @@ void Code::CopyFrom(const CodeDesc& desc) {
Code* code = Code::cast(*p);
it.rinfo()->set_code_age_stub(code, SKIP_ICACHE_FLUSH);
} else {
+ intptr_t delta = instruction_start() - desc.buffer;
it.rinfo()->apply(delta);
}
}
@@ -14131,6 +13987,55 @@ void Code::ClearInlineCaches() {
}
}
+namespace {
+template <typename Code>
+void SetStackFrameCacheCommon(Handle<Code> code,
+ Handle<UnseededNumberDictionary> cache) {
+ Handle<Object> maybe_table(code->source_position_table(), code->GetIsolate());
+ if (maybe_table->IsSourcePositionTableWithFrameCache()) {
+ Handle<SourcePositionTableWithFrameCache>::cast(maybe_table)
+ ->set_stack_frame_cache(*cache);
+ return;
+ }
+ DCHECK(maybe_table->IsByteArray());
+ Handle<ByteArray> table(Handle<ByteArray>::cast(maybe_table));
+ Handle<SourcePositionTableWithFrameCache> table_with_cache =
+ code->GetIsolate()->factory()->NewSourcePositionTableWithFrameCache(
+ table, cache);
+ code->set_source_position_table(*table_with_cache);
+}
+} // namespace
+
+// static
+void AbstractCode::SetStackFrameCache(Handle<AbstractCode> abstract_code,
+ Handle<UnseededNumberDictionary> cache) {
+ if (abstract_code->IsCode()) {
+ SetStackFrameCacheCommon(handle(abstract_code->GetCode()), cache);
+ } else {
+ SetStackFrameCacheCommon(handle(abstract_code->GetBytecodeArray()), cache);
+ }
+}
+
+namespace {
+template <typename Code>
+void DropStackFrameCacheCommon(Code* code) {
+ i::Object* maybe_table = code->source_position_table();
+ if (maybe_table->IsByteArray()) return;
+ DCHECK(maybe_table->IsSourcePositionTableWithFrameCache());
+ code->set_source_position_table(
+ i::SourcePositionTableWithFrameCache::cast(maybe_table)
+ ->source_position_table());
+}
+} // namespace
+
+void AbstractCode::DropStackFrameCache() {
+ if (IsCode()) {
+ DropStackFrameCacheCommon(GetCode());
+ } else {
+ DropStackFrameCacheCommon(GetBytecodeArray());
+ }
+}
+
int AbstractCode::SourcePosition(int offset) {
int position = 0;
// Subtract one because the current PC is one instruction after the call site.
@@ -14787,7 +14692,7 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
}
os << "\n";
- SourcePositionTableIterator it(source_position_table());
+ SourcePositionTableIterator it(SourcePositionTable());
if (!it.done()) {
os << "Source positions:\n pc offset position\n";
for (; !it.done(); it.Advance()) {
@@ -14851,7 +14756,11 @@ void Code::Disassemble(const char* name, std::ostream& os) { // NOLINT
}
#ifdef OBJECT_PRINT
if (!type_feedback_info()->IsUndefined(GetIsolate())) {
- TypeFeedbackInfo::cast(type_feedback_info())->TypeFeedbackInfoPrint(os);
+ TypeFeedbackInfo* info = TypeFeedbackInfo::cast(type_feedback_info());
+ HeapObject::PrintHeader(os, "TypeFeedbackInfo");
+ os << "\n - ic_total_count: " << info->ic_total_count()
+ << ", ic_with_type_info_count: " << info->ic_with_type_info_count()
+ << ", ic_generic_count: " << info->ic_generic_count() << "\n";
os << "\n";
}
#endif
@@ -14889,7 +14798,7 @@ void BytecodeArray::Disassemble(std::ostream& os) {
os << "Frame size " << frame_size() << "\n";
const uint8_t* base_address = GetFirstBytecodeAddress();
- SourcePositionTableIterator source_positions(source_position_table());
+ SourcePositionTableIterator source_positions(SourcePositionTable());
interpreter::BytecodeArrayIterator iterator(handle(this));
while (!iterator.done()) {
@@ -14911,18 +14820,33 @@ void BytecodeArray::Disassemble(std::ostream& os) {
os << " (" << jump_target << " @ " << iterator.GetJumpTargetOffset()
<< ")";
}
+ if (interpreter::Bytecodes::IsSwitch(iterator.current_bytecode())) {
+ os << " {";
+ bool first_entry = true;
+ for (const auto& entry : iterator.GetJumpTableTargetOffsets()) {
+ if (first_entry) {
+ first_entry = false;
+ } else {
+ os << ",";
+ }
+ os << " " << entry.case_value << ": @" << entry.target_offset;
+ }
+ os << " }";
+ }
os << std::endl;
iterator.Advance();
}
+ os << "Constant pool (size = " << constant_pool()->length() << ")\n";
+#ifdef OBJECT_PRINT
if (constant_pool()->length() > 0) {
- os << "Constant pool (size = " << constant_pool()->length() << ")\n";
constant_pool()->Print();
}
+#endif
+ os << "Handler Table (size = " << handler_table()->Size() << ")\n";
#ifdef ENABLE_DISASSEMBLER
if (handler_table()->length() > 0) {
- os << "Handler Table (size = " << handler_table()->Size() << ")\n";
HandlerTable::cast(handler_table())->HandlerTableRangePrint(os);
}
#endif
@@ -15513,13 +15437,14 @@ static bool ShouldConvertToSlowElements(JSObject* object, uint32_t capacity,
object->GetHeap()->InNewSpace(object))) {
return false;
}
- // If the fast-case backing storage takes up roughly three times as
- // much space (in machine words) as a dictionary backing storage
- // would, the object should have slow elements.
+ // If the fast-case backing storage takes up much more memory than a
+ // dictionary backing storage would, the object should have slow elements.
int used_elements = object->GetFastElementsUsage();
- int dictionary_size = SeededNumberDictionary::ComputeCapacity(used_elements) *
- SeededNumberDictionary::kEntrySize;
- return 3 * static_cast<uint32_t>(dictionary_size) <= *new_capacity;
+ uint32_t size_threshold =
+ SeededNumberDictionary::kPreferFastElementsSizeFactor *
+ SeededNumberDictionary::ComputeCapacity(used_elements) *
+ SeededNumberDictionary::kEntrySize;
+ return size_threshold <= *new_capacity;
}
@@ -15574,6 +15499,8 @@ static bool ShouldConvertToFastElements(JSObject* object,
Object* length = JSArray::cast(object)->length();
if (!length->IsSmi()) return false;
*new_capacity = static_cast<uint32_t>(Smi::cast(length)->value());
+ } else if (object->IsJSSloppyArgumentsObject()) {
+ return false;
} else {
*new_capacity = dictionary->max_number_key() + 1;
}
@@ -15618,7 +15545,7 @@ Maybe<bool> JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
FixedArrayBase* elements = object->elements();
ElementsKind dictionary_kind = DICTIONARY_ELEMENTS;
if (IsSloppyArgumentsElementsKind(kind)) {
- elements = FixedArrayBase::cast(FixedArray::cast(elements)->get(1));
+ elements = SloppyArgumentsElements::cast(elements)->arguments();
dictionary_kind = SLOW_SLOPPY_ARGUMENTS_ELEMENTS;
} else if (IsStringWrapperElementsKind(kind)) {
dictionary_kind = SLOW_STRING_WRAPPER_ELEMENTS;
@@ -15631,7 +15558,7 @@ Maybe<bool> JSObject::AddDataElement(Handle<JSObject> object, uint32_t index,
SeededNumberDictionary::cast(elements),
index, &new_capacity)
? BestFittingFastElementsKind(*object)
- : dictionary_kind; // Overwrite in case of arguments.
+ : dictionary_kind;
} else if (ShouldConvertToSlowElements(
*object, static_cast<uint32_t>(elements->length()), index,
&new_capacity)) {
@@ -16171,7 +16098,7 @@ JSRegExp::Flags RegExpFlagsFromString(Handle<String> flags, bool* success) {
JSRegExp::Flags value = JSRegExp::kNone;
int length = flags->length();
// A longer flags string cannot be valid.
- if (length > 5) return JSRegExp::Flags(0);
+ if (length > JSRegExp::FlagCount()) return JSRegExp::Flags(0);
for (int i = 0; i < length; i++) {
JSRegExp::Flag flag = JSRegExp::kNone;
switch (flags->Get(i)) {
@@ -16672,21 +16599,19 @@ Handle<Derived> HashTable<Derived, Shape, Key>::EnsureCapacity(
int n,
Key key,
PretenureFlag pretenure) {
+ if (table->HasSufficientCapacityToAdd(n)) return table;
+
Isolate* isolate = table->GetIsolate();
int capacity = table->Capacity();
- int nof = table->NumberOfElements() + n;
-
- if (table->HasSufficientCapacityToAdd(n)) return table;
+ int new_nof = table->NumberOfElements() + n;
const int kMinCapacityForPretenure = 256;
bool should_pretenure = pretenure == TENURED ||
((capacity > kMinCapacityForPretenure) &&
!isolate->heap()->InNewSpace(*table));
- Handle<Derived> new_table = HashTable::New(
- isolate,
- nof * 2,
- USE_DEFAULT_MINIMUM_CAPACITY,
- should_pretenure ? TENURED : NOT_TENURED);
+ Handle<Derived> new_table =
+ HashTable::New(isolate, new_nof, USE_DEFAULT_MINIMUM_CAPACITY,
+ should_pretenure ? TENURED : NOT_TENURED);
table->Rehash(new_table, key);
return new_table;
@@ -16777,17 +16702,16 @@ template class Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >;
template class Dictionary<GlobalDictionary, GlobalDictionaryShape,
Handle<Name> >;
-template class Dictionary<SeededNumberDictionary,
- SeededNumberDictionaryShape,
- uint32_t>;
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ HashTable<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>;
+
+template class EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
+ Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>;
template class Dictionary<UnseededNumberDictionary,
UnseededNumberDictionaryShape,
uint32_t>;
-template void
-HashTable<GlobalDictionary, GlobalDictionaryShape, Handle<Name> >::Rehash(Handle<Name> key);
-
template Handle<SeededNumberDictionary>
Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::New(
Isolate*, int at_least_space_for, PretenureFlag pretenure,
@@ -16860,10 +16784,6 @@ template Handle<NameDictionary>
HashTable<NameDictionary, NameDictionaryShape, Handle<Name> >::
Shrink(Handle<NameDictionary>, Handle<Name>);
-template Handle<SeededNumberDictionary>
-HashTable<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>::
- Shrink(Handle<SeededNumberDictionary>, uint32_t);
-
template Handle<UnseededNumberDictionary>
HashTable<UnseededNumberDictionary, UnseededNumberDictionaryShape,
uint32_t>::Shrink(Handle<UnseededNumberDictionary>, uint32_t);
@@ -16903,9 +16823,6 @@ template Handle<NameDictionary>
Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
EnsureCapacity(Handle<NameDictionary>, int, Handle<Name>);
-template int HashTable<SeededNumberDictionary, SeededNumberDictionaryShape,
- uint32_t>::FindEntry(uint32_t);
-
template int NameDictionaryBase<NameDictionary, NameDictionaryShape>::FindEntry(
Handle<Name>);
@@ -16953,6 +16870,16 @@ Dictionary<NameDictionary, NameDictionaryShape, Handle<Name>>::CollectKeysTo(
dictionary,
KeyAccumulator* keys);
+template int
+Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
+ uint32_t>::AddEntry(Handle<SeededNumberDictionary> dictionary,
+ uint32_t key, Handle<Object> value,
+ PropertyDetails details, uint32_t hash);
+
+template int
+Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
+ uint32_t>::NumberOfElementsFilterAttributes(PropertyFilter filter);
+
Handle<Object> JSObject::PrepareSlowElementsForSort(
Handle<JSObject> object, uint32_t limit) {
DCHECK(object->HasDictionaryElements());
@@ -17301,6 +17228,70 @@ size_t JSTypedArray::element_size() {
}
}
+// static
+MaybeHandle<JSTypedArray> JSTypedArray::Create(Isolate* isolate,
+ Handle<Object> default_ctor,
+ int argc, Handle<Object>* argv,
+ const char* method_name) {
+ // 1. Let newTypedArray be ? Construct(constructor, argumentList).
+ Handle<Object> new_obj;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, new_obj,
+ Execution::New(isolate, default_ctor, argc, argv),
+ JSTypedArray);
+
+ // 2. Perform ? ValidateTypedArray(newTypedArray).
+ Handle<JSTypedArray> new_array;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, new_array, JSTypedArray::Validate(isolate, new_obj, method_name),
+ JSTypedArray);
+
+ // 3. If argumentList is a List of a single Number, then
+ // If newTypedArray.[[ArrayLength]] < size, throw a TypeError exception.
+ DCHECK_IMPLIES(argc == 1, argv[0]->IsSmi());
+ if (argc == 1 && new_array->length_value() < argv[0]->Number()) {
+ const MessageTemplate::Template message =
+ MessageTemplate::kTypedArrayTooShort;
+ THROW_NEW_ERROR(isolate, NewTypeError(message), JSTypedArray);
+ }
+
+ // 4. Return newTypedArray.
+ return new_array;
+}
+
+// static
+MaybeHandle<JSTypedArray> JSTypedArray::SpeciesCreate(
+ Isolate* isolate, Handle<JSTypedArray> exemplar, int argc,
+ Handle<Object>* argv, const char* method_name) {
+ // 1. Assert: exemplar is an Object that has a [[TypedArrayName]] internal
+ // slot.
+ DCHECK(exemplar->IsJSTypedArray());
+
+ // 2. Let defaultConstructor be the intrinsic object listed in column one of
+ // Table 51 for exemplar.[[TypedArrayName]].
+ Handle<JSFunction> default_ctor = isolate->uint8_array_fun();
+ switch (exemplar->type()) {
+#define TYPED_ARRAY_CTOR(Type, type, TYPE, ctype, size) \
+ case kExternal##Type##Array: { \
+ default_ctor = isolate->type##_array_fun(); \
+ break; \
+ }
+
+ TYPED_ARRAYS(TYPED_ARRAY_CTOR)
+#undef TYPED_ARRAY_CTOR
+ default:
+ UNREACHABLE();
+ }
+
+ // 3. Let constructor be ? SpeciesConstructor(exemplar, defaultConstructor).
+ Handle<Object> ctor;
+ ASSIGN_RETURN_ON_EXCEPTION(
+ isolate, ctor,
+ Object::SpeciesConstructor(isolate, exemplar, default_ctor),
+ JSTypedArray);
+
+ // 4. Return ? TypedArrayCreate(constructor, argumentList).
+ return Create(isolate, ctor, argc, argv, method_name);
+}
void JSGlobalObject::InvalidatePropertyCell(Handle<JSGlobalObject> global,
Handle<Name> name) {
@@ -17467,10 +17458,9 @@ void StringTable::EnsureCapacityForDeserialization(Isolate* isolate,
namespace {
template <class StringClass>
-void MigrateExternalStringResource(Isolate* isolate, Handle<String> from,
- Handle<String> to) {
- Handle<StringClass> cast_from = Handle<StringClass>::cast(from);
- Handle<StringClass> cast_to = Handle<StringClass>::cast(to);
+void MigrateExternalStringResource(Isolate* isolate, String* from, String* to) {
+ StringClass* cast_from = StringClass::cast(from);
+ StringClass* cast_to = StringClass::cast(to);
const typename StringClass::Resource* to_resource = cast_to->resource();
if (to_resource == nullptr) {
// |to| is a just-created internalized copy of |from|. Migrate the resource.
@@ -17480,7 +17470,44 @@ void MigrateExternalStringResource(Isolate* isolate, Handle<String> from,
cast_from->set_resource(nullptr);
} else if (to_resource != cast_from->resource()) {
// |to| already existed and has its own resource. Finalize |from|.
- isolate->heap()->FinalizeExternalString(*from);
+ isolate->heap()->FinalizeExternalString(from);
+ }
+}
+
+void MakeStringThin(String* string, String* internalized, Isolate* isolate) {
+ if (string->IsExternalString()) {
+ if (internalized->IsExternalOneByteString()) {
+ MigrateExternalStringResource<ExternalOneByteString>(isolate, string,
+ internalized);
+ } else if (internalized->IsExternalTwoByteString()) {
+ MigrateExternalStringResource<ExternalTwoByteString>(isolate, string,
+ internalized);
+ } else {
+ // If the external string is duped into an existing non-external
+ // internalized string, free its resource (it's about to be rewritten
+ // into a ThinString below).
+ isolate->heap()->FinalizeExternalString(string);
+ }
+ }
+
+ if (!string->IsInternalizedString()) {
+ DisallowHeapAllocation no_gc;
+ isolate->heap()->NotifyObjectLayoutChange(string, no_gc);
+ bool one_byte = internalized->IsOneByteRepresentation();
+ Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map()
+ : isolate->factory()->thin_string_map();
+ int old_size = string->Size();
+ DCHECK(old_size >= ThinString::kSize);
+ string->synchronized_set_map(*map);
+ ThinString* thin = ThinString::cast(string);
+ thin->set_actual(internalized);
+ Address thin_end = thin->address() + ThinString::kSize;
+ int size_delta = old_size - ThinString::kSize;
+ if (size_delta != 0) {
+ Heap* heap = isolate->heap();
+ heap->CreateFillerObjectAt(thin_end, size_delta, ClearRecordedSlots::kNo);
+ heap->AdjustLiveBytes(thin, -size_delta);
+ }
}
}
@@ -17501,44 +17528,7 @@ Handle<String> StringTable::LookupString(Isolate* isolate,
Handle<String> result = LookupKey(isolate, &key);
if (FLAG_thin_strings) {
- if (string->IsExternalString()) {
- if (result->IsExternalOneByteString()) {
- MigrateExternalStringResource<ExternalOneByteString>(isolate, string,
- result);
- } else if (result->IsExternalTwoByteString()) {
- MigrateExternalStringResource<ExternalTwoByteString>(isolate, string,
- result);
- } else {
- // If the external string is duped into an existing non-external
- // internalized string, free its resource (it's about to be rewritten
- // into a ThinString below).
- isolate->heap()->FinalizeExternalString(*string);
- }
- }
-
- // The LookupKey() call above tries to internalize the string in-place.
- // In cases where that wasn't possible (e.g. new-space strings), turn them
- // into ThinStrings referring to their internalized versions now.
- if (!string->IsInternalizedString()) {
- DisallowHeapAllocation no_gc;
- bool one_byte = result->IsOneByteRepresentation();
- Handle<Map> map = one_byte
- ? isolate->factory()->thin_one_byte_string_map()
- : isolate->factory()->thin_string_map();
- int old_size = string->Size();
- DCHECK(old_size >= ThinString::kSize);
- string->synchronized_set_map(*map);
- Handle<ThinString> thin = Handle<ThinString>::cast(string);
- thin->set_actual(*result);
- Address thin_end = thin->address() + ThinString::kSize;
- int size_delta = old_size - ThinString::kSize;
- if (size_delta != 0) {
- Heap* heap = isolate->heap();
- heap->CreateFillerObjectAt(thin_end, size_delta,
- ClearRecordedSlots::kNo);
- heap->AdjustLiveBytes(*thin, -size_delta);
- }
- }
+ MakeStringThin(*string, *result, isolate);
} else { // !FLAG_thin_strings
if (string->IsConsString()) {
Handle<ConsString> cons = Handle<ConsString>::cast(string);
@@ -17588,10 +17578,173 @@ Handle<String> StringTable::LookupKey(Isolate* isolate, HashTableKey* key) {
return Handle<String>::cast(string);
}
+namespace {
+
+class StringTableNoAllocateKey : public HashTableKey {
+ public:
+ StringTableNoAllocateKey(String* string, uint32_t seed)
+ : string_(string), length_(string->length()) {
+ StringShape shape(string);
+ one_byte_ = shape.HasOnlyOneByteChars();
+ DCHECK(!shape.IsInternalized());
+ DCHECK(!shape.IsThin());
+ if (shape.IsCons() && length_ <= String::kMaxHashCalcLength) {
+ special_flattening_ = true;
+ uint32_t hash_field = 0;
+ if (one_byte_) {
+ one_byte_content_ = new uint8_t[length_];
+ String::WriteToFlat(string, one_byte_content_, 0, length_);
+ hash_field = StringHasher::HashSequentialString(one_byte_content_,
+ length_, seed);
+ } else {
+ two_byte_content_ = new uint16_t[length_];
+ String::WriteToFlat(string, two_byte_content_, 0, length_);
+ hash_field = StringHasher::HashSequentialString(two_byte_content_,
+ length_, seed);
+ }
+ string->set_hash_field(hash_field);
+ } else {
+ special_flattening_ = false;
+ one_byte_content_ = nullptr;
+ }
+ hash_ = string->Hash();
+ }
+
+ ~StringTableNoAllocateKey() {
+ if (one_byte_) {
+ delete[] one_byte_content_;
+ } else {
+ delete[] two_byte_content_;
+ }
+ }
+
+ bool IsMatch(Object* otherstring) override {
+ String* other = String::cast(otherstring);
+ DCHECK(other->IsInternalizedString());
+ DCHECK(other->IsFlat());
+ if (hash_ != other->Hash()) return false;
+ int len = length_;
+ if (len != other->length()) return false;
+
+ if (!special_flattening_) {
+ if (string_->Get(0) != other->Get(0)) return false;
+ if (string_->IsFlat()) {
+ StringShape shape1(string_);
+ StringShape shape2(other);
+ if (shape1.encoding_tag() == kOneByteStringTag &&
+ shape2.encoding_tag() == kOneByteStringTag) {
+ String::FlatContent flat1 = string_->GetFlatContent();
+ String::FlatContent flat2 = other->GetFlatContent();
+ return CompareRawStringContents(flat1.ToOneByteVector().start(),
+ flat2.ToOneByteVector().start(), len);
+ }
+ if (shape1.encoding_tag() == kTwoByteStringTag &&
+ shape2.encoding_tag() == kTwoByteStringTag) {
+ String::FlatContent flat1 = string_->GetFlatContent();
+ String::FlatContent flat2 = other->GetFlatContent();
+ return CompareRawStringContents(flat1.ToUC16Vector().start(),
+ flat2.ToUC16Vector().start(), len);
+ }
+ }
+ StringComparator comparator;
+ return comparator.Equals(string_, other);
+ }
+
+ String::FlatContent flat_content = other->GetFlatContent();
+ if (one_byte_) {
+ if (flat_content.IsOneByte()) {
+ return CompareRawStringContents(
+ one_byte_content_, flat_content.ToOneByteVector().start(), len);
+ } else {
+ DCHECK(flat_content.IsTwoByte());
+ for (int i = 0; i < len; i++) {
+ if (flat_content.Get(i) != one_byte_content_[i]) return false;
+ }
+ return true;
+ }
+ } else {
+ if (flat_content.IsTwoByte()) {
+ return CompareRawStringContents(
+ two_byte_content_, flat_content.ToUC16Vector().start(), len);
+ } else {
+ DCHECK(flat_content.IsOneByte());
+ for (int i = 0; i < len; i++) {
+ if (flat_content.Get(i) != two_byte_content_[i]) return false;
+ }
+ return true;
+ }
+ }
+ }
+
+ uint32_t Hash() override { return hash_; }
+
+ uint32_t HashForObject(Object* key) override {
+ return String::cast(key)->Hash();
+ }
+
+ MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) override {
+ UNREACHABLE();
+ return Handle<String>();
+ }
+
+ private:
+ String* string_;
+ int length_;
+ bool one_byte_;
+ bool special_flattening_;
+ uint32_t hash_ = 0;
+ union {
+ uint8_t* one_byte_content_;
+ uint16_t* two_byte_content_;
+ };
+};
+
+} // namespace
+
+// static
+Object* StringTable::LookupStringIfExists_NoAllocate(String* string) {
+ DisallowHeapAllocation no_gc;
+ Heap* heap = string->GetHeap();
+ Isolate* isolate = heap->isolate();
+ StringTable* table = heap->string_table();
+
+ StringTableNoAllocateKey key(string, heap->HashSeed());
+
+ // String could be an array index.
+ DCHECK(string->HasHashCode());
+ uint32_t hash = string->hash_field();
+
+ // Valid array indices are >= 0, so they cannot be mixed up with any of
+ // the result sentinels, which are negative.
+ STATIC_ASSERT(
+ !String::ArrayIndexValueBits::is_valid(ResultSentinel::kUnsupported));
+ STATIC_ASSERT(
+ !String::ArrayIndexValueBits::is_valid(ResultSentinel::kNotFound));
+
+ if ((hash & Name::kContainsCachedArrayIndexMask) == 0) {
+ return Smi::FromInt(String::ArrayIndexValueBits::decode(hash));
+ }
+ if ((hash & Name::kIsNotArrayIndexMask) == 0) {
+ // It is an indexed, but it's not cached.
+ return Smi::FromInt(ResultSentinel::kUnsupported);
+ }
+
+ int entry = table->FindEntry(isolate, &key, key.Hash());
+ if (entry != kNotFound) {
+ String* internalized = String::cast(table->KeyAt(entry));
+ if (FLAG_thin_strings) {
+ MakeStringThin(string, internalized, isolate);
+ }
+ return internalized;
+ }
+ // A string that's not an array index, and not in the string table,
+ // cannot have been used as a property name before.
+ return Smi::FromInt(ResultSentinel::kNotFound);
+}
String* StringTable::LookupKeyIfExists(Isolate* isolate, HashTableKey* key) {
Handle<StringTable> table = isolate->factory()->string_table();
- int entry = table->FindEntry(key);
+ int entry = table->FindEntry(isolate, key);
if (entry != kNotFound) return String::cast(table->KeyAt(entry));
return NULL;
}
@@ -19467,8 +19620,10 @@ Handle<String> JSMessageObject::GetSourceLine() const {
void JSArrayBuffer::Neuter() {
CHECK(is_neuterable());
CHECK(is_external());
- set_backing_store(NULL);
+ set_backing_store(nullptr);
set_byte_length(Smi::kZero);
+ set_allocation_base(nullptr);
+ set_allocation_length(0);
set_was_neutered(true);
// Invalidate the neutering protector.
Isolate* const isolate = GetIsolate();
@@ -19477,10 +19632,36 @@ void JSArrayBuffer::Neuter() {
}
}
+void JSArrayBuffer::FreeBackingStore() {
+ if (allocation_base() == nullptr) {
+ return;
+ }
+ using AllocationMode = ArrayBuffer::Allocator::AllocationMode;
+ const size_t length = allocation_length();
+ const AllocationMode mode = allocation_mode();
+ GetIsolate()->array_buffer_allocator()->Free(allocation_base(), length, mode);
+
+ // Zero out the backing store and allocation base to avoid dangling
+ // pointers.
+ set_backing_store(nullptr);
+ // TODO(eholk): set_byte_length(0) once we aren't using Smis for the
+ // byte_length. We can't do it now because the GC needs to call
+ // FreeBackingStore while it is collecting.
+ set_allocation_base(nullptr);
+ set_allocation_length(0);
+}
void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
bool is_external, void* data, size_t allocated_length,
SharedFlag shared) {
+ return Setup(array_buffer, isolate, is_external, data, allocated_length, data,
+ allocated_length, shared);
+}
+
+void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
+ bool is_external, void* allocation_base,
+ size_t allocation_length, void* data,
+ size_t byte_length, SharedFlag shared) {
DCHECK(array_buffer->GetEmbedderFieldCount() ==
v8::ArrayBuffer::kEmbedderFieldCount);
for (int i = 0; i < v8::ArrayBuffer::kEmbedderFieldCount; i++) {
@@ -19491,16 +19672,19 @@ void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
array_buffer->set_is_neuterable(shared == SharedFlag::kNotShared);
array_buffer->set_is_shared(shared == SharedFlag::kShared);
- Handle<Object> byte_length =
- isolate->factory()->NewNumberFromSize(allocated_length);
- CHECK(byte_length->IsSmi() || byte_length->IsHeapNumber());
- array_buffer->set_byte_length(*byte_length);
+ Handle<Object> heap_byte_length =
+ isolate->factory()->NewNumberFromSize(byte_length);
+ CHECK(heap_byte_length->IsSmi() || heap_byte_length->IsHeapNumber());
+ array_buffer->set_byte_length(*heap_byte_length);
// Initialize backing store at last to avoid handling of |JSArrayBuffers| that
// are currently being constructed in the |ArrayBufferTracker|. The
// registration method below handles the case of registering a buffer that has
// already been promoted.
array_buffer->set_backing_store(data);
+ array_buffer->set_allocation_base(data);
+ array_buffer->set_allocation_length(allocation_length);
+
if (data && !is_external) {
isolate->heap()->RegisterNewArrayBuffer(*array_buffer);
}
@@ -19523,8 +19707,9 @@ bool JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer> array_buffer,
// Prevent creating array buffers when serializing.
DCHECK(!isolate->serializer_enabled());
if (allocated_length != 0) {
- isolate->counters()->array_buffer_big_allocations()->AddSample(
- ConvertToMb(allocated_length));
+ if (allocated_length >= MB)
+ isolate->counters()->array_buffer_big_allocations()->AddSample(
+ ConvertToMb(allocated_length));
if (initialize) {
data = isolate->array_buffer_allocator()->Allocate(allocated_length);
} else {
@@ -19540,8 +19725,9 @@ bool JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer> array_buffer,
data = NULL;
}
- JSArrayBuffer::Setup(array_buffer, isolate, false, data, allocated_length,
- shared);
+ const bool is_external = false;
+ JSArrayBuffer::Setup(array_buffer, isolate, is_external, data,
+ allocated_length, shared);
return true;
}
@@ -19572,6 +19758,8 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
// already been promoted.
buffer->set_backing_store(backing_store);
isolate->heap()->RegisterNewArrayBuffer(*buffer);
+ buffer->set_allocation_base(backing_store);
+ buffer->set_allocation_length(NumberToSize(buffer->byte_length()));
memcpy(buffer->backing_store(),
fixed_typed_array->DataPtr(),
fixed_typed_array->DataSize());
@@ -19937,33 +20125,34 @@ void Module::CreateExport(Handle<Module> module, int cell_index,
module->set_exports(*exports);
}
-Handle<Object> Module::LoadVariable(Handle<Module> module, int cell_index) {
- Isolate* isolate = module->GetIsolate();
- Handle<Object> object;
+Cell* Module::GetCell(int cell_index) {
+ DisallowHeapAllocation no_gc;
+ Object* cell;
switch (ModuleDescriptor::GetCellIndexKind(cell_index)) {
case ModuleDescriptor::kImport:
- object = handle(module->regular_imports()->get(ImportIndex(cell_index)),
- isolate);
+ cell = regular_imports()->get(ImportIndex(cell_index));
break;
case ModuleDescriptor::kExport:
- object = handle(module->regular_exports()->get(ExportIndex(cell_index)),
- isolate);
+ cell = regular_exports()->get(ExportIndex(cell_index));
break;
case ModuleDescriptor::kInvalid:
UNREACHABLE();
+ cell = nullptr;
break;
}
- return handle(Handle<Cell>::cast(object)->value(), isolate);
+ return Cell::cast(cell);
+}
+
+Handle<Object> Module::LoadVariable(Handle<Module> module, int cell_index) {
+ Isolate* isolate = module->GetIsolate();
+ return handle(module->GetCell(cell_index)->value(), isolate);
}
void Module::StoreVariable(Handle<Module> module, int cell_index,
Handle<Object> value) {
- Isolate* isolate = module->GetIsolate();
DCHECK_EQ(ModuleDescriptor::GetCellIndexKind(cell_index),
ModuleDescriptor::kExport);
- Handle<Object> object(module->regular_exports()->get(ExportIndex(cell_index)),
- isolate);
- Handle<Cell>::cast(object)->set_value(*value);
+ module->GetCell(cell_index)->set_value(*value);
}
MaybeHandle<Cell> Module::ResolveImport(Handle<Module> module,
@@ -20120,15 +20309,10 @@ bool Module::PrepareInstantiate(Handle<Module> module,
for (int i = 0, length = module_requests->length(); i < length; ++i) {
Handle<String> specifier(String::cast(module_requests->get(i)), isolate);
v8::Local<v8::Module> api_requested_module;
- // TODO(adamk): Revisit these failure cases once d8 knows how to
- // persist a module_map across multiple top-level module loads, as
- // the current module is left in a "half-instantiated" state.
if (!callback(context, v8::Utils::ToLocal(specifier),
v8::Utils::ToLocal(module))
.ToLocal(&api_requested_module)) {
- // TODO(adamk): Give this a better error message. But this is a
- // misuse of the API anyway.
- isolate->ThrowIllegalOperation();
+ isolate->PromoteScheduledException();
return false;
}
Handle<Module> requested_module = Utils::OpenHandle(*api_requested_module);
@@ -20469,6 +20653,5 @@ ElementsKind JSArrayIterator::ElementsKindForInstanceType(InstanceType type) {
return kind;
}
}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 46697b55cc..1cfdbe6f04 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -249,7 +249,6 @@ enum SimpleTransitionFlag {
SPECIAL_TRANSITION
};
-
// Indicates whether we are only interested in the descriptors of a particular
// map, or in all descriptors in the descriptor array.
enum DescriptorFlag {
@@ -293,159 +292,161 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
// NOTE: Everything following JS_VALUE_TYPE is considered a
// JSObject for GC purposes. The first four entries here have typeof
// 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
-#define INSTANCE_TYPE_LIST(V) \
- V(INTERNALIZED_STRING_TYPE) \
- V(EXTERNAL_INTERNALIZED_STRING_TYPE) \
- V(ONE_BYTE_INTERNALIZED_STRING_TYPE) \
- V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
- V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
- V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE) \
- V(SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
- V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
- V(STRING_TYPE) \
- V(CONS_STRING_TYPE) \
- V(EXTERNAL_STRING_TYPE) \
- V(SLICED_STRING_TYPE) \
- V(THIN_STRING_TYPE) \
- V(ONE_BYTE_STRING_TYPE) \
- V(CONS_ONE_BYTE_STRING_TYPE) \
- V(EXTERNAL_ONE_BYTE_STRING_TYPE) \
- V(SLICED_ONE_BYTE_STRING_TYPE) \
- V(THIN_ONE_BYTE_STRING_TYPE) \
- V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
- V(SHORT_EXTERNAL_STRING_TYPE) \
- V(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE) \
- V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
- \
- V(SYMBOL_TYPE) \
- V(HEAP_NUMBER_TYPE) \
- V(ODDBALL_TYPE) \
- \
- V(MAP_TYPE) \
- V(CODE_TYPE) \
- V(MUTABLE_HEAP_NUMBER_TYPE) \
- V(FOREIGN_TYPE) \
- V(BYTE_ARRAY_TYPE) \
- V(BYTECODE_ARRAY_TYPE) \
- V(FREE_SPACE_TYPE) \
- \
- V(FIXED_INT8_ARRAY_TYPE) \
- V(FIXED_UINT8_ARRAY_TYPE) \
- V(FIXED_INT16_ARRAY_TYPE) \
- V(FIXED_UINT16_ARRAY_TYPE) \
- V(FIXED_INT32_ARRAY_TYPE) \
- V(FIXED_UINT32_ARRAY_TYPE) \
- V(FIXED_FLOAT32_ARRAY_TYPE) \
- V(FIXED_FLOAT64_ARRAY_TYPE) \
- V(FIXED_UINT8_CLAMPED_ARRAY_TYPE) \
- \
- V(FIXED_DOUBLE_ARRAY_TYPE) \
- V(FILLER_TYPE) \
- \
- V(ACCESSOR_INFO_TYPE) \
- V(ACCESSOR_PAIR_TYPE) \
- V(ACCESS_CHECK_INFO_TYPE) \
- V(INTERCEPTOR_INFO_TYPE) \
- V(CALL_HANDLER_INFO_TYPE) \
- V(FUNCTION_TEMPLATE_INFO_TYPE) \
- V(OBJECT_TEMPLATE_INFO_TYPE) \
- V(ALLOCATION_SITE_TYPE) \
- V(ALLOCATION_MEMENTO_TYPE) \
- V(SCRIPT_TYPE) \
- V(TYPE_FEEDBACK_INFO_TYPE) \
- V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
- V(PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE) \
- V(PROMISE_REACTION_JOB_INFO_TYPE) \
- V(DEBUG_INFO_TYPE) \
- V(BREAK_POINT_INFO_TYPE) \
- V(STACK_FRAME_INFO_TYPE) \
- V(PROTOTYPE_INFO_TYPE) \
- V(TUPLE2_TYPE) \
- V(TUPLE3_TYPE) \
- V(CONTEXT_EXTENSION_TYPE) \
- V(CONSTANT_ELEMENTS_PAIR_TYPE) \
- V(MODULE_TYPE) \
- V(MODULE_INFO_ENTRY_TYPE) \
- V(ASYNC_GENERATOR_REQUEST_TYPE) \
- V(FIXED_ARRAY_TYPE) \
- V(TRANSITION_ARRAY_TYPE) \
- V(SHARED_FUNCTION_INFO_TYPE) \
- V(CELL_TYPE) \
- V(WEAK_CELL_TYPE) \
- V(PROPERTY_CELL_TYPE) \
- \
- V(JS_PROXY_TYPE) \
- V(JS_GLOBAL_OBJECT_TYPE) \
- V(JS_GLOBAL_PROXY_TYPE) \
- V(JS_SPECIAL_API_OBJECT_TYPE) \
- V(JS_VALUE_TYPE) \
- V(JS_MESSAGE_OBJECT_TYPE) \
- V(JS_DATE_TYPE) \
- V(JS_API_OBJECT_TYPE) \
- V(JS_OBJECT_TYPE) \
- V(JS_ARGUMENTS_TYPE) \
- V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
- V(JS_GENERATOR_OBJECT_TYPE) \
- V(JS_ASYNC_GENERATOR_OBJECT_TYPE) \
- V(JS_MODULE_NAMESPACE_TYPE) \
- V(JS_ARRAY_TYPE) \
- V(JS_ARRAY_BUFFER_TYPE) \
- V(JS_TYPED_ARRAY_TYPE) \
- V(JS_DATA_VIEW_TYPE) \
- V(JS_SET_TYPE) \
- V(JS_MAP_TYPE) \
- V(JS_SET_ITERATOR_TYPE) \
- V(JS_MAP_ITERATOR_TYPE) \
- V(JS_WEAK_MAP_TYPE) \
- V(JS_WEAK_SET_TYPE) \
- V(JS_PROMISE_CAPABILITY_TYPE) \
- V(JS_PROMISE_TYPE) \
- V(JS_REGEXP_TYPE) \
- V(JS_ERROR_TYPE) \
- V(JS_ASYNC_FROM_SYNC_ITERATOR_TYPE) \
- V(JS_STRING_ITERATOR_TYPE) \
- \
- V(JS_TYPED_ARRAY_KEY_ITERATOR_TYPE) \
- V(JS_FAST_ARRAY_KEY_ITERATOR_TYPE) \
- V(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE) \
- \
- V(JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- \
- V(JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- V(JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
- \
- V(JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_INT8_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_INT16_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_INT32_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE) \
- \
- V(JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_FAST_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE) \
- V(JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE) \
- \
- V(JS_BOUND_FUNCTION_TYPE) \
+#define INSTANCE_TYPE_LIST(V) \
+ V(INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_INTERNALIZED_STRING_TYPE) \
+ V(ONE_BYTE_INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
+ V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+ V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE) \
+ V(SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE) \
+ V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+ V(STRING_TYPE) \
+ V(CONS_STRING_TYPE) \
+ V(EXTERNAL_STRING_TYPE) \
+ V(SLICED_STRING_TYPE) \
+ V(THIN_STRING_TYPE) \
+ V(ONE_BYTE_STRING_TYPE) \
+ V(CONS_ONE_BYTE_STRING_TYPE) \
+ V(EXTERNAL_ONE_BYTE_STRING_TYPE) \
+ V(SLICED_ONE_BYTE_STRING_TYPE) \
+ V(THIN_ONE_BYTE_STRING_TYPE) \
+ V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+ V(SHORT_EXTERNAL_STRING_TYPE) \
+ V(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE) \
+ V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+ \
+ V(SYMBOL_TYPE) \
+ V(HEAP_NUMBER_TYPE) \
+ V(ODDBALL_TYPE) \
+ \
+ V(MAP_TYPE) \
+ V(CODE_TYPE) \
+ V(MUTABLE_HEAP_NUMBER_TYPE) \
+ V(FOREIGN_TYPE) \
+ V(BYTE_ARRAY_TYPE) \
+ V(BYTECODE_ARRAY_TYPE) \
+ V(FREE_SPACE_TYPE) \
+ \
+ V(FIXED_INT8_ARRAY_TYPE) \
+ V(FIXED_UINT8_ARRAY_TYPE) \
+ V(FIXED_INT16_ARRAY_TYPE) \
+ V(FIXED_UINT16_ARRAY_TYPE) \
+ V(FIXED_INT32_ARRAY_TYPE) \
+ V(FIXED_UINT32_ARRAY_TYPE) \
+ V(FIXED_FLOAT32_ARRAY_TYPE) \
+ V(FIXED_FLOAT64_ARRAY_TYPE) \
+ V(FIXED_UINT8_CLAMPED_ARRAY_TYPE) \
+ \
+ V(FIXED_DOUBLE_ARRAY_TYPE) \
+ V(FILLER_TYPE) \
+ \
+ V(ACCESSOR_INFO_TYPE) \
+ V(ACCESSOR_PAIR_TYPE) \
+ V(ACCESS_CHECK_INFO_TYPE) \
+ V(INTERCEPTOR_INFO_TYPE) \
+ V(FUNCTION_TEMPLATE_INFO_TYPE) \
+ V(OBJECT_TEMPLATE_INFO_TYPE) \
+ V(ALLOCATION_SITE_TYPE) \
+ V(ALLOCATION_MEMENTO_TYPE) \
+ V(SCRIPT_TYPE) \
+ V(ALIASED_ARGUMENTS_ENTRY_TYPE) \
+ V(PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE) \
+ V(PROMISE_REACTION_JOB_INFO_TYPE) \
+ V(DEBUG_INFO_TYPE) \
+ V(STACK_FRAME_INFO_TYPE) \
+ V(PROTOTYPE_INFO_TYPE) \
+ V(TUPLE2_TYPE) \
+ V(TUPLE3_TYPE) \
+ V(CONTEXT_EXTENSION_TYPE) \
+ V(MODULE_TYPE) \
+ V(MODULE_INFO_ENTRY_TYPE) \
+ V(ASYNC_GENERATOR_REQUEST_TYPE) \
+ V(FIXED_ARRAY_TYPE) \
+ V(TRANSITION_ARRAY_TYPE) \
+ V(SHARED_FUNCTION_INFO_TYPE) \
+ V(CELL_TYPE) \
+ V(WEAK_CELL_TYPE) \
+ V(PROPERTY_CELL_TYPE) \
+ /* TODO(yangguo): these padding types are for ABI stability. Remove after*/ \
+ /* version 6.0 branch, or replace them when there is demand for new types.*/ \
+ V(PADDING_TYPE_1) \
+ V(PADDING_TYPE_2) \
+ V(PADDING_TYPE_3) \
+ V(PADDING_TYPE_4) \
+ \
+ V(JS_PROXY_TYPE) \
+ V(JS_GLOBAL_OBJECT_TYPE) \
+ V(JS_GLOBAL_PROXY_TYPE) \
+ V(JS_SPECIAL_API_OBJECT_TYPE) \
+ V(JS_VALUE_TYPE) \
+ V(JS_MESSAGE_OBJECT_TYPE) \
+ V(JS_DATE_TYPE) \
+ V(JS_API_OBJECT_TYPE) \
+ V(JS_OBJECT_TYPE) \
+ V(JS_ARGUMENTS_TYPE) \
+ V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
+ V(JS_GENERATOR_OBJECT_TYPE) \
+ V(JS_ASYNC_GENERATOR_OBJECT_TYPE) \
+ V(JS_MODULE_NAMESPACE_TYPE) \
+ V(JS_ARRAY_TYPE) \
+ V(JS_ARRAY_BUFFER_TYPE) \
+ V(JS_TYPED_ARRAY_TYPE) \
+ V(JS_DATA_VIEW_TYPE) \
+ V(JS_SET_TYPE) \
+ V(JS_MAP_TYPE) \
+ V(JS_SET_ITERATOR_TYPE) \
+ V(JS_MAP_ITERATOR_TYPE) \
+ V(JS_WEAK_MAP_TYPE) \
+ V(JS_WEAK_SET_TYPE) \
+ V(JS_PROMISE_CAPABILITY_TYPE) \
+ V(JS_PROMISE_TYPE) \
+ V(JS_REGEXP_TYPE) \
+ V(JS_ERROR_TYPE) \
+ V(JS_ASYNC_FROM_SYNC_ITERATOR_TYPE) \
+ V(JS_STRING_ITERATOR_TYPE) \
+ \
+ V(JS_TYPED_ARRAY_KEY_ITERATOR_TYPE) \
+ V(JS_FAST_ARRAY_KEY_ITERATOR_TYPE) \
+ V(JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE) \
+ \
+ V(JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ \
+ V(JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ V(JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE) \
+ \
+ V(JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_INT8_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_INT16_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_INT32_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE) \
+ \
+ V(JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_FAST_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE) \
+ V(JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE) \
+ \
+ V(JS_BOUND_FUNCTION_TYPE) \
V(JS_FUNCTION_TYPE)
// Since string types are not consecutive, this macro is used to
@@ -516,26 +517,22 @@ const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
V(ACCESSOR_PAIR, AccessorPair, accessor_pair) \
V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \
V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \
- V(CALL_HANDLER_INFO, CallHandlerInfo, call_handler_info) \
V(FUNCTION_TEMPLATE_INFO, FunctionTemplateInfo, function_template_info) \
V(OBJECT_TEMPLATE_INFO, ObjectTemplateInfo, object_template_info) \
V(ALLOCATION_SITE, AllocationSite, allocation_site) \
V(ALLOCATION_MEMENTO, AllocationMemento, allocation_memento) \
V(SCRIPT, Script, script) \
- V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info) \
V(ALIASED_ARGUMENTS_ENTRY, AliasedArgumentsEntry, aliased_arguments_entry) \
V(PROMISE_RESOLVE_THENABLE_JOB_INFO, PromiseResolveThenableJobInfo, \
promise_resolve_thenable_job_info) \
V(PROMISE_REACTION_JOB_INFO, PromiseReactionJobInfo, \
promise_reaction_job_info) \
V(DEBUG_INFO, DebugInfo, debug_info) \
- V(BREAK_POINT_INFO, BreakPointInfo, break_point_info) \
V(STACK_FRAME_INFO, StackFrameInfo, stack_frame_info) \
V(PROTOTYPE_INFO, PrototypeInfo, prototype_info) \
V(TUPLE2, Tuple2, tuple2) \
V(TUPLE3, Tuple3, tuple3) \
V(CONTEXT_EXTENSION, ContextExtension, context_extension) \
- V(CONSTANT_ELEMENTS_PAIR, ConstantElementsPair, constant_elements_pair) \
V(MODULE, Module, module) \
V(MODULE_INFO_ENTRY, ModuleInfoEntry, module_info_entry) \
V(ASYNC_GENERATOR_REQUEST, AsyncGeneratorRequest, async_generator_request)
@@ -690,24 +687,20 @@ enum InstanceType {
ACCESSOR_PAIR_TYPE,
ACCESS_CHECK_INFO_TYPE,
INTERCEPTOR_INFO_TYPE,
- CALL_HANDLER_INFO_TYPE,
FUNCTION_TEMPLATE_INFO_TYPE,
OBJECT_TEMPLATE_INFO_TYPE,
ALLOCATION_SITE_TYPE,
ALLOCATION_MEMENTO_TYPE,
SCRIPT_TYPE,
- TYPE_FEEDBACK_INFO_TYPE,
ALIASED_ARGUMENTS_ENTRY_TYPE,
PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE,
PROMISE_REACTION_JOB_INFO_TYPE,
DEBUG_INFO_TYPE,
- BREAK_POINT_INFO_TYPE,
STACK_FRAME_INFO_TYPE,
PROTOTYPE_INFO_TYPE,
TUPLE2_TYPE,
TUPLE3_TYPE,
CONTEXT_EXTENSION_TYPE,
- CONSTANT_ELEMENTS_PAIR_TYPE,
MODULE_TYPE,
MODULE_INFO_ENTRY_TYPE,
ASYNC_GENERATOR_REQUEST_TYPE,
@@ -720,6 +713,13 @@ enum InstanceType {
// All the following types are subtypes of JSReceiver, which corresponds to
// objects in the JS sense. The first and the last type in this range are
+ PADDING_TYPE_1,
+ PADDING_TYPE_2,
+ PADDING_TYPE_3,
+ PADDING_TYPE_4,
+
+ // All the following types are subtypes of JSReceiver, which corresponds to
+ // objects in the JS sense. The first and the last type in this range are
// the two forms of function. This organization enables using the same
// compares for checking the JS_RECEIVER and the NONCALLABLE_JS_OBJECT range.
JS_PROXY_TYPE, // FIRST_JS_RECEIVER_TYPE
@@ -951,6 +951,7 @@ class ObjectHashTable;
class ObjectVisitor;
class PropertyCell;
class PropertyDescriptor;
+class RootVisitor;
class SafepointEntry;
class SharedFunctionInfo;
class StringStream;
@@ -977,128 +978,133 @@ template <class C> inline bool Is(Object* obj);
V(Primitive) \
V(Number)
-#define HEAP_OBJECT_TYPE_LIST(V) \
- V(HeapNumber) \
- V(MutableHeapNumber) \
- V(Name) \
- V(UniqueName) \
- V(String) \
- V(SeqString) \
- V(ExternalString) \
- V(ConsString) \
- V(SlicedString) \
- V(ExternalTwoByteString) \
- V(ExternalOneByteString) \
- V(SeqTwoByteString) \
- V(SeqOneByteString) \
- V(InternalizedString) \
- V(ThinString) \
- V(Symbol) \
- \
- V(FixedTypedArrayBase) \
- V(FixedUint8Array) \
- V(FixedInt8Array) \
- V(FixedUint16Array) \
- V(FixedInt16Array) \
- V(FixedUint32Array) \
- V(FixedInt32Array) \
- V(FixedFloat32Array) \
- V(FixedFloat64Array) \
- V(FixedUint8ClampedArray) \
- V(ByteArray) \
- V(BytecodeArray) \
- V(FreeSpace) \
- V(JSReceiver) \
- V(JSObject) \
- V(JSArgumentsObject) \
- V(JSContextExtensionObject) \
- V(JSGeneratorObject) \
- V(JSAsyncGeneratorObject) \
- V(JSModuleNamespace) \
- V(Map) \
- V(DescriptorArray) \
- V(FrameArray) \
- V(TransitionArray) \
- V(FeedbackMetadata) \
- V(FeedbackVector) \
- V(DeoptimizationInputData) \
- V(DeoptimizationOutputData) \
- V(DependentCode) \
- V(HandlerTable) \
- V(FixedArray) \
- V(BoilerplateDescription) \
- V(FixedDoubleArray) \
- V(WeakFixedArray) \
- V(ArrayList) \
- V(RegExpMatchInfo) \
- V(Context) \
- V(ScriptContextTable) \
- V(NativeContext) \
- V(ScopeInfo) \
- V(ModuleInfo) \
- V(JSBoundFunction) \
- V(JSFunction) \
- V(Code) \
- V(AbstractCode) \
- V(Oddball) \
- V(SharedFunctionInfo) \
- V(JSValue) \
- V(JSDate) \
- V(JSMessageObject) \
- V(StringWrapper) \
- V(Foreign) \
- V(Boolean) \
- V(JSArray) \
- V(JSArrayBuffer) \
- V(JSArrayBufferView) \
- V(JSAsyncFromSyncIterator) \
- V(JSCollection) \
- V(JSTypedArray) \
- V(JSArrayIterator) \
- V(JSDataView) \
- V(JSProxy) \
- V(JSError) \
- V(JSPromiseCapability) \
- V(JSPromise) \
- V(JSStringIterator) \
- V(JSSet) \
- V(JSMap) \
- V(JSSetIterator) \
- V(JSMapIterator) \
- V(JSWeakCollection) \
- V(JSWeakMap) \
- V(JSWeakSet) \
- V(JSRegExp) \
- V(HashTable) \
- V(Dictionary) \
- V(UnseededNumberDictionary) \
- V(StringTable) \
- V(StringSet) \
- V(NormalizedMapCache) \
- V(CompilationCacheTable) \
- V(CodeCacheHashTable) \
- V(MapCache) \
- V(JSGlobalObject) \
- V(JSGlobalProxy) \
- V(Undetectable) \
- V(AccessCheckNeeded) \
- V(Callable) \
- V(Function) \
- V(Constructor) \
- V(TemplateInfo) \
- V(Filler) \
- V(FixedArrayBase) \
- V(External) \
- V(Struct) \
- V(Cell) \
- V(TemplateList) \
- V(PropertyCell) \
- V(WeakCell) \
- V(ObjectHashTable) \
- V(ObjectHashSet) \
- V(WeakHashTable) \
- V(OrderedHashTable) \
- V(SloppyArgumentsElements)
+#define HEAP_OBJECT_TYPE_LIST(V) \
+ V(AbstractCode) \
+ V(AccessCheckNeeded) \
+ V(ArrayList) \
+ V(BoilerplateDescription) \
+ V(Boolean) \
+ V(BreakPointInfo) \
+ V(ByteArray) \
+ V(BytecodeArray) \
+ V(Callable) \
+ V(CallHandlerInfo) \
+ V(Cell) \
+ V(Code) \
+ V(CodeCacheHashTable) \
+ V(CompilationCacheTable) \
+ V(ConsString) \
+ V(ConstantElementsPair) \
+ V(Constructor) \
+ V(Context) \
+ V(DeoptimizationInputData) \
+ V(DeoptimizationOutputData) \
+ V(DependentCode) \
+ V(DescriptorArray) \
+ V(Dictionary) \
+ V(External) \
+ V(ExternalOneByteString) \
+ V(ExternalString) \
+ V(ExternalTwoByteString) \
+ V(FeedbackMetadata) \
+ V(FeedbackVector) \
+ V(Filler) \
+ V(FixedArray) \
+ V(FixedArrayBase) \
+ V(FixedDoubleArray) \
+ V(FixedFloat32Array) \
+ V(FixedFloat64Array) \
+ V(FixedInt16Array) \
+ V(FixedInt32Array) \
+ V(FixedInt8Array) \
+ V(FixedTypedArrayBase) \
+ V(FixedUint16Array) \
+ V(FixedUint32Array) \
+ V(FixedUint8Array) \
+ V(FixedUint8ClampedArray) \
+ V(Foreign) \
+ V(FrameArray) \
+ V(FreeSpace) \
+ V(Function) \
+ V(HandlerTable) \
+ V(HashTable) \
+ V(HeapNumber) \
+ V(InternalizedString) \
+ V(JSArgumentsObject) \
+ V(JSArray) \
+ V(JSArrayBuffer) \
+ V(JSArrayBufferView) \
+ V(JSArrayIterator) \
+ V(JSAsyncFromSyncIterator) \
+ V(JSAsyncGeneratorObject) \
+ V(JSBoundFunction) \
+ V(JSCollection) \
+ V(JSContextExtensionObject) \
+ V(JSDataView) \
+ V(JSDate) \
+ V(JSError) \
+ V(JSFunction) \
+ V(JSGeneratorObject) \
+ V(JSGlobalObject) \
+ V(JSGlobalProxy) \
+ V(JSMap) \
+ V(JSMapIterator) \
+ V(JSMessageObject) \
+ V(JSModuleNamespace) \
+ V(JSObject) \
+ V(JSPromise) \
+ V(JSPromiseCapability) \
+ V(JSProxy) \
+ V(JSReceiver) \
+ V(JSRegExp) \
+ V(JSSet) \
+ V(JSSetIterator) \
+ V(JSSloppyArgumentsObject) \
+ V(JSStringIterator) \
+ V(JSTypedArray) \
+ V(JSValue) \
+ V(JSWeakCollection) \
+ V(JSWeakMap) \
+ V(JSWeakSet) \
+ V(Map) \
+ V(MapCache) \
+ V(ModuleInfo) \
+ V(MutableHeapNumber) \
+ V(Name) \
+ V(NativeContext) \
+ V(NormalizedMapCache) \
+ V(ObjectHashSet) \
+ V(ObjectHashTable) \
+ V(Oddball) \
+ V(OrderedHashTable) \
+ V(PropertyCell) \
+ V(RegExpMatchInfo) \
+ V(ScopeInfo) \
+ V(ScriptContextTable) \
+ V(SeqOneByteString) \
+ V(SeqString) \
+ V(SeqTwoByteString) \
+ V(SharedFunctionInfo) \
+ V(SlicedString) \
+ V(SloppyArgumentsElements) \
+ V(SourcePositionTableWithFrameCache) \
+ V(String) \
+ V(StringSet) \
+ V(StringTable) \
+ V(StringWrapper) \
+ V(Struct) \
+ V(Symbol) \
+ V(TemplateInfo) \
+ V(TemplateList) \
+ V(ThinString) \
+ V(TransitionArray) \
+ V(TypeFeedbackInfo) \
+ V(Undetectable) \
+ V(UniqueName) \
+ V(UnseededNumberDictionary) \
+ V(WeakCell) \
+ V(WeakFixedArray) \
+ V(WeakHashTable)
#define ODDBALL_LIST(V) \
V(Undefined, undefined_value) \
@@ -1182,7 +1188,7 @@ class Object {
inline double Number() const;
INLINE(bool IsNaN() const);
INLINE(bool IsMinusZero() const);
- bool ToInt32(int32_t* value);
+ V8_EXPORT_PRIVATE bool ToInt32(int32_t* value);
inline bool ToUint32(uint32_t* value);
inline Representation OptimalRepresentation();
@@ -1227,12 +1233,19 @@ class Object {
// ES6 section 7.2.13 Strict Equality Comparison
bool StrictEquals(Object* that);
+ // ES6 section 7.1.13 ToObject
// Convert to a JSObject if needed.
// native_context is used when creating wrapper object.
+ //
+ // Passing a non-null method_name allows us to give a more informative
+ // error message for those cases where ToObject is being called on
+ // the receiver of a built-in method.
MUST_USE_RESULT static inline MaybeHandle<JSReceiver> ToObject(
- Isolate* isolate, Handle<Object> object);
+ Isolate* isolate, Handle<Object> object,
+ const char* method_name = nullptr);
MUST_USE_RESULT static MaybeHandle<JSReceiver> ToObject(
- Isolate* isolate, Handle<Object> object, Handle<Context> context);
+ Isolate* isolate, Handle<Object> object, Handle<Context> native_context,
+ const char* method_name = nullptr);
// ES6 section 9.2.1.2, OrdinaryCallBindThis for sloppy callee.
MUST_USE_RESULT static MaybeHandle<JSReceiver> ConvertReceiver(
@@ -1655,6 +1668,9 @@ class HeapObject: public Object {
// information.
inline Map* map() const;
inline void set_map(Map* value);
+
+ inline HeapObject** map_slot();
+
// The no-write-barrier version. This is OK if the object is white and in
// new space, or if the value is an immortal immutable object, like the maps
// of primitive (non-JS) objects like strings, heap numbers etc.
@@ -1666,9 +1682,13 @@ class HeapObject: public Object {
// Set the map using release store
inline void synchronized_set_map(Map* value);
- inline void synchronized_set_map_no_write_barrier(Map* value);
inline void synchronized_set_map_word(MapWord map_word);
+ // Initialize the map immediately after the object is allocated.
+ // Do not use this outside Heap.
+ inline void set_map_after_allocation(
+ Map* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
// During garbage collection, the map word of a heap object does not
// necessarily contain a map pointer.
inline MapWord map_word() const;
@@ -2340,10 +2360,12 @@ class JSObject: public JSReceiver {
// an access at key?
bool WouldConvertToSlowElements(uint32_t index);
+ static const uint32_t kMinAddedElementsCapacity = 16;
+
// Computes the new capacity when expanding the elements of a JSObject.
static uint32_t NewElementsCapacity(uint32_t old_capacity) {
- // (old_capacity + 50%) + 16
- return old_capacity + (old_capacity >> 1) + 16;
+ // (old_capacity + 50%) + kMinAddedElementsCapacity
+ return old_capacity + (old_capacity >> 1) + kMinAddedElementsCapacity;
}
// These methods do not perform access checks!
@@ -2682,6 +2704,11 @@ class JSArgumentsObject: public JSObject {
// Indices of in-object properties.
static const int kLengthIndex = 0;
+ DECL_ACCESSORS(length, Object)
+
+ DECLARE_VERIFIER(JSArgumentsObject)
+ DECLARE_CAST(JSArgumentsObject)
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArgumentsObject);
};
@@ -2695,7 +2722,12 @@ class JSSloppyArgumentsObject: public JSArgumentsObject {
static const int kCalleeOffset = JSArgumentsObject::kHeaderSize;
static const int kSize = kCalleeOffset + kPointerSize;
// Indices of in-object properties.
- static const int kCalleeIndex = 1;
+ static const int kCalleeIndex = kLengthIndex + 1;
+
+ DECL_ACCESSORS(callee, Object)
+
+ DECLARE_VERIFIER(JSSloppyArgumentsObject)
+ DECLARE_CAST(JSSloppyArgumentsObject)
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSSloppyArgumentsObject);
@@ -2709,6 +2741,8 @@ class JSStrictArgumentsObject: public JSArgumentsObject {
// Offsets of object fields.
static const int kSize = JSArgumentsObject::kHeaderSize;
+ DECLARE_CAST(JSStrictArgumentsObject)
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(JSStrictArgumentsObject);
};
@@ -2808,6 +2842,10 @@ class FixedArray: public FixedArrayBase {
static const int kMaxSize = 128 * MB * kPointerSize;
// Maximally allowed length of a FixedArray.
static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
+ // Maximally allowed length for regular (non large object space) object.
+ STATIC_ASSERT(kMaxRegularHeapObjectSize < kMaxSize);
+ static const int kMaxRegularLength =
+ (kMaxRegularHeapObjectSize - kHeaderSize) / kPointerSize;
// Dispatched behavior.
DECLARE_PRINTER(FixedArray)
@@ -2874,6 +2912,8 @@ class FixedDoubleArray: public FixedArrayBase {
DECLARE_PRINTER(FixedDoubleArray)
DECLARE_VERIFIER(FixedDoubleArray)
+ class BodyDescriptor;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedDoubleArray);
};
@@ -2900,6 +2940,7 @@ class FixedDoubleArray: public FixedArrayBase {
// JSArgumentsObject:
// - FAST_SLOPPY_ARGUMENTS_ELEMENTS: FAST_HOLEY_ELEMENTS
// - SLOW_SLOPPY_ARGUMENTS_ELEMENTS: DICTIONARY_ELEMENTS
+// - SLOW_SLOPPY_ARGUMENTS_ELEMENTS: DICTIONARY_ELEMENTS
class SloppyArgumentsElements : public FixedArray {
public:
static const int kContextIndex = 0;
@@ -2914,6 +2955,9 @@ class SloppyArgumentsElements : public FixedArray {
inline void set_mapped_entry(uint32_t entry, Object* object);
DECLARE_CAST(SloppyArgumentsElements)
+#ifdef VERIFY_HEAP
+ void SloppyArgumentsElementsVerify(JSSloppyArgumentsObject* holder);
+#endif
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SloppyArgumentsElements);
@@ -3044,39 +3088,9 @@ template <SearchMode search_mode, typename T>
inline int Search(T* array, Name* name, int valid_entries = 0,
int* out_insertion_index = NULL);
-
// The cache for maps used by normalized (dictionary mode) objects.
// Such maps do not have property descriptors, so a typical program
// needs very limited number of distinct normalized maps.
-class NormalizedMapCache: public FixedArray {
- public:
- static Handle<NormalizedMapCache> New(Isolate* isolate);
-
- MUST_USE_RESULT MaybeHandle<Map> Get(Handle<Map> fast_map,
- PropertyNormalizationMode mode);
- void Set(Handle<Map> fast_map, Handle<Map> normalized_map);
-
- void Clear();
-
- DECLARE_CAST(NormalizedMapCache)
-
- static inline bool IsNormalizedMapCache(const HeapObject* obj);
-
- DECLARE_VERIFIER(NormalizedMapCache)
- private:
- static const int kEntries = 64;
-
- static inline int GetIndex(Handle<Map> map);
-
- // The following declarations hide base class methods.
- Object* get(int index);
- void set(int index, Object* value);
-};
-
-// HandlerTable is a fixed array containing entries for exception handlers in
-// the code object it is associated with. The tables comes in two flavors:
-// 1) Based on ranges: Used for unoptimized code. Contains one entry per
-// exception handler and a range representing the try-block covered by that
// handler. Layout looks as follows:
// [ range-start , range-end , handler-offset , handler-data ]
// 2) Based on return addresses: Used for turbofanned code. Contains one entry
@@ -3208,6 +3222,8 @@ class ByteArray: public FixedArrayBase {
// Maximal length of a single ByteArray.
static const int kMaxLength = kMaxSize - kHeaderSize;
+ class BodyDescriptor;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray);
};
@@ -3295,7 +3311,9 @@ class BytecodeArray : public FixedArrayBase {
// Accessors for source position table containing mappings between byte code
// offset and source position.
- DECL_ACCESSORS(source_position_table, ByteArray)
+ DECL_ACCESSORS(source_position_table, Object)
+
+ inline ByteArray* SourcePositionTable();
DECLARE_CAST(BytecodeArray)
@@ -3467,8 +3485,9 @@ class FixedTypedArray: public FixedTypedArrayBase {
static inline Handle<Object> get(FixedTypedArray* array, int index);
inline void set(int index, ElementType value);
- static inline ElementType from_int(int value);
- static inline ElementType from_double(double value);
+ static inline ElementType from(int value);
+ static inline ElementType from(uint32_t value);
+ static inline ElementType from(double value);
// This accessor applies the correct conversion from Smi, HeapNumber
// and undefined.
@@ -3500,10 +3519,9 @@ TYPED_ARRAYS(FIXED_TYPED_ARRAY_TRAITS)
#undef FIXED_TYPED_ARRAY_TRAITS
// DeoptimizationInputData is a fixed array used to hold the deoptimization
-// data for code generated by the Hydrogen/Lithium compiler. It also
-// contains information about functions that were inlined. If N different
-// functions were inlined then first N elements of the literal array will
-// contain these functions.
+// data for optimized code. It also contains information about functions that
+// were inlined. If N different functions were inlined then first N elements of
+// the literal array will contain these functions.
//
// It can be empty.
class DeoptimizationInputData: public FixedArray {
@@ -3708,7 +3726,10 @@ class Code: public HeapObject {
DECL_ACCESSORS(deoptimization_data, FixedArray)
// [source_position_table]: ByteArray for the source positions table.
- DECL_ACCESSORS(source_position_table, ByteArray)
+ // SourcePositionTableWithFrameCache.
+ DECL_ACCESSORS(source_position_table, Object)
+
+ inline ByteArray* SourcePositionTable();
// [trap_handler_index]: An index into the trap handler's master list of code
// objects.
@@ -3868,8 +3889,8 @@ class Code: public HeapObject {
inline bool marked_for_deoptimization();
inline void set_marked_for_deoptimization(bool flag);
- // [deopt_already_counted]: For kind OPTIMIZED_FUNCTION tells whether
- // the code was already deoptimized.
+ // [is_promise_rejection]: For kind BUILTIN tells whether the exception
+ // thrown by the code will lead to promise rejection.
inline bool deopt_already_counted();
inline void set_deopt_already_counted(bool flag);
@@ -4248,6 +4269,11 @@ class AbstractCode : public HeapObject {
// Set the source position table.
inline void set_source_position_table(ByteArray* source_position_table);
+ inline Object* stack_frame_cache();
+ static void SetStackFrameCache(Handle<AbstractCode> abstract_code,
+ Handle<UnseededNumberDictionary> cache);
+ void DropStackFrameCache();
+
// Returns the size of instructions and the metadata.
inline int SizeIncludingMetadata();
@@ -4388,783 +4414,8 @@ class DependentCode: public FixedArray {
STATIC_ASSERT(kGroupCount <= GroupField::kMax + 1);
};
-
class PrototypeInfo;
-
-// All heap objects have a Map that describes their structure.
-// A Map contains information about:
-// - Size information about the object
-// - How to iterate over an object (for garbage collection)
-class Map: public HeapObject {
- public:
- // Instance size.
- // Size in bytes or kVariableSizeSentinel if instances do not have
- // a fixed size.
- inline int instance_size();
- inline void set_instance_size(int value);
-
- // Only to clear an unused byte, remove once byte is used.
- inline void clear_unused();
-
- // [inobject_properties_or_constructor_function_index]: Provides access
- // to the inobject properties in case of JSObject maps, or the constructor
- // function index in case of primitive maps.
- inline int inobject_properties_or_constructor_function_index();
- inline void set_inobject_properties_or_constructor_function_index(int value);
- // Count of properties allocated in the object (JSObject only).
- inline int GetInObjectProperties();
- inline void SetInObjectProperties(int value);
- // Index of the constructor function in the native context (primitives only),
- // or the special sentinel value to indicate that there is no object wrapper
- // for the primitive (i.e. in case of null or undefined).
- static const int kNoConstructorFunctionIndex = 0;
- inline int GetConstructorFunctionIndex();
- inline void SetConstructorFunctionIndex(int value);
- static MaybeHandle<JSFunction> GetConstructorFunction(
- Handle<Map> map, Handle<Context> native_context);
-
- // Retrieve interceptors.
- inline InterceptorInfo* GetNamedInterceptor();
- inline InterceptorInfo* GetIndexedInterceptor();
-
- // Instance type.
- inline InstanceType instance_type();
- inline void set_instance_type(InstanceType value);
-
- // Tells how many unused property fields are available in the
- // instance (only used for JSObject in fast mode).
- inline int unused_property_fields();
- inline void set_unused_property_fields(int value);
-
- // Bit field.
- inline byte bit_field() const;
- inline void set_bit_field(byte value);
-
- // Bit field 2.
- inline byte bit_field2() const;
- inline void set_bit_field2(byte value);
-
- // Bit field 3.
- inline uint32_t bit_field3() const;
- inline void set_bit_field3(uint32_t bits);
-
- class EnumLengthBits: public BitField<int,
- 0, kDescriptorIndexBitCount> {}; // NOLINT
- class NumberOfOwnDescriptorsBits: public BitField<int,
- kDescriptorIndexBitCount, kDescriptorIndexBitCount> {}; // NOLINT
- STATIC_ASSERT(kDescriptorIndexBitCount + kDescriptorIndexBitCount == 20);
- class DictionaryMap : public BitField<bool, 20, 1> {};
- class OwnsDescriptors : public BitField<bool, 21, 1> {};
- class HasHiddenPrototype : public BitField<bool, 22, 1> {};
- class Deprecated : public BitField<bool, 23, 1> {};
- class IsUnstable : public BitField<bool, 24, 1> {};
- class IsMigrationTarget : public BitField<bool, 25, 1> {};
- class ImmutablePrototype : public BitField<bool, 26, 1> {};
- class NewTargetIsBase : public BitField<bool, 27, 1> {};
- // Bit 28 is free.
-
- // Keep this bit field at the very end for better code in
- // Builtins::kJSConstructStubGeneric stub.
- // This counter is used for in-object slack tracking.
- // The in-object slack tracking is considered enabled when the counter is
- // non zero. The counter only has a valid count for initial maps. For
- // transitioned maps only kNoSlackTracking has a meaning, namely that inobject
- // slack tracking already finished for the transition tree. Any other value
- // indicates that either inobject slack tracking is still in progress, or that
- // the map isn't part of the transition tree anymore.
- class ConstructionCounter : public BitField<int, 29, 3> {};
- static const int kSlackTrackingCounterStart = 7;
- static const int kSlackTrackingCounterEnd = 1;
- static const int kNoSlackTracking = 0;
- STATIC_ASSERT(kSlackTrackingCounterStart <= ConstructionCounter::kMax);
-
-
- // Inobject slack tracking is the way to reclaim unused inobject space.
- //
- // The instance size is initially determined by adding some slack to
- // expected_nof_properties (to allow for a few extra properties added
- // after the constructor). There is no guarantee that the extra space
- // will not be wasted.
- //
- // Here is the algorithm to reclaim the unused inobject space:
- // - Detect the first constructor call for this JSFunction.
- // When it happens enter the "in progress" state: initialize construction
- // counter in the initial_map.
- // - While the tracking is in progress initialize unused properties of a new
- // object with one_pointer_filler_map instead of undefined_value (the "used"
- // part is initialized with undefined_value as usual). This way they can
- // be resized quickly and safely.
- // - Once enough objects have been created compute the 'slack'
- // (traverse the map transition tree starting from the
- // initial_map and find the lowest value of unused_property_fields).
- // - Traverse the transition tree again and decrease the instance size
- // of every map. Existing objects will resize automatically (they are
- // filled with one_pointer_filler_map). All further allocations will
- // use the adjusted instance size.
- // - SharedFunctionInfo's expected_nof_properties left unmodified since
- // allocations made using different closures could actually create different
- // kind of objects (see prototype inheritance pattern).
- //
- // Important: inobject slack tracking is not attempted during the snapshot
- // creation.
-
- static const int kGenerousAllocationCount =
- kSlackTrackingCounterStart - kSlackTrackingCounterEnd + 1;
-
- // Starts the tracking by initializing object constructions countdown counter.
- void StartInobjectSlackTracking();
-
- // True if the object constructions countdown counter is a range
- // [kSlackTrackingCounterEnd, kSlackTrackingCounterStart].
- inline bool IsInobjectSlackTrackingInProgress();
-
- // Does the tracking step.
- inline void InobjectSlackTrackingStep();
-
- // Completes inobject slack tracking for the transition tree starting at this
- // initial map.
- void CompleteInobjectSlackTracking();
-
- // Tells whether the object in the prototype property will be used
- // for instances created from this function. If the prototype
- // property is set to a value that is not a JSObject, the prototype
- // property will not be used to create instances of the function.
- // See ECMA-262, 13.2.2.
- inline void set_non_instance_prototype(bool value);
- inline bool has_non_instance_prototype();
-
- // Tells whether the instance has a [[Construct]] internal method.
- // This property is implemented according to ES6, section 7.2.4.
- inline void set_is_constructor(bool value);
- inline bool is_constructor() const;
-
- // Tells whether the instance with this map has a hidden prototype.
- inline void set_has_hidden_prototype(bool value);
- inline bool has_hidden_prototype() const;
-
- // Records and queries whether the instance has a named interceptor.
- inline void set_has_named_interceptor();
- inline bool has_named_interceptor();
-
- // Records and queries whether the instance has an indexed interceptor.
- inline void set_has_indexed_interceptor();
- inline bool has_indexed_interceptor();
-
- // Tells whether the instance is undetectable.
- // An undetectable object is a special class of JSObject: 'typeof' operator
- // returns undefined, ToBoolean returns false. Otherwise it behaves like
- // a normal JS object. It is useful for implementing undetectable
- // document.all in Firefox & Safari.
- // See https://bugzilla.mozilla.org/show_bug.cgi?id=248549.
- inline void set_is_undetectable();
- inline bool is_undetectable();
-
- // Tells whether the instance has a [[Call]] internal method.
- // This property is implemented according to ES6, section 7.2.3.
- inline void set_is_callable();
- inline bool is_callable() const;
-
- inline void set_new_target_is_base(bool value);
- inline bool new_target_is_base();
- inline void set_is_extensible(bool value);
- inline bool is_extensible();
- inline void set_is_prototype_map(bool value);
- inline bool is_prototype_map() const;
-
- inline void set_elements_kind(ElementsKind elements_kind);
- inline ElementsKind elements_kind();
-
- // Tells whether the instance has fast elements that are only Smis.
- inline bool has_fast_smi_elements();
-
- // Tells whether the instance has fast elements.
- inline bool has_fast_object_elements();
- inline bool has_fast_smi_or_object_elements();
- inline bool has_fast_double_elements();
- inline bool has_fast_elements();
- inline bool has_sloppy_arguments_elements();
- inline bool has_fast_sloppy_arguments_elements();
- inline bool has_fast_string_wrapper_elements();
- inline bool has_fixed_typed_array_elements();
- inline bool has_dictionary_elements();
-
- static bool IsValidElementsTransition(ElementsKind from_kind,
- ElementsKind to_kind);
-
- // Returns true if the current map doesn't have DICTIONARY_ELEMENTS but if a
- // map with DICTIONARY_ELEMENTS was found in the prototype chain.
- bool DictionaryElementsInPrototypeChainOnly();
-
- inline Map* ElementsTransitionMap();
-
- inline FixedArrayBase* GetInitialElements();
-
- // [raw_transitions]: Provides access to the transitions storage field.
- // Don't call set_raw_transitions() directly to overwrite transitions, use
- // the TransitionArray::ReplaceTransitions() wrapper instead!
- DECL_ACCESSORS(raw_transitions, Object)
- // [prototype_info]: Per-prototype metadata. Aliased with transitions
- // (which prototype maps don't have).
- DECL_ACCESSORS(prototype_info, Object)
- // PrototypeInfo is created lazily using this helper (which installs it on
- // the given prototype's map).
- static Handle<PrototypeInfo> GetOrCreatePrototypeInfo(
- Handle<JSObject> prototype, Isolate* isolate);
- static Handle<PrototypeInfo> GetOrCreatePrototypeInfo(
- Handle<Map> prototype_map, Isolate* isolate);
- inline bool should_be_fast_prototype_map() const;
- static void SetShouldBeFastPrototypeMap(Handle<Map> map, bool value,
- Isolate* isolate);
-
- // [prototype chain validity cell]: Associated with a prototype object,
- // stored in that object's map's PrototypeInfo, indicates that prototype
- // chains through this object are currently valid. The cell will be
- // invalidated and replaced when the prototype chain changes.
- static Handle<Cell> GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
- Isolate* isolate);
- static const int kPrototypeChainValid = 0;
- static const int kPrototypeChainInvalid = 1;
-
- // Return the map of the root of object's prototype chain.
- Map* GetPrototypeChainRootMap(Isolate* isolate);
-
- // Returns a WeakCell object containing given prototype. The cell is cached
- // in PrototypeInfo which is created lazily.
- static Handle<WeakCell> GetOrCreatePrototypeWeakCell(
- Handle<JSObject> prototype, Isolate* isolate);
-
- Map* FindRootMap();
- Map* FindFieldOwner(int descriptor);
-
- inline int GetInObjectPropertyOffset(int index);
-
- int NumberOfFields();
-
- // Returns true if transition to the given map requires special
- // synchronization with the concurrent marker.
- bool TransitionRequiresSynchronizationWithGC(Map* target);
- // Returns true if transition to the given map removes a tagged in-object
- // field.
- bool TransitionRemovesTaggedField(Map* target);
- // Returns true if transition to the given map replaces a tagged in-object
- // field with an untagged in-object field.
- bool TransitionChangesTaggedFieldToUntaggedField(Map* target);
-
- // TODO(ishell): candidate with JSObject::MigrateToMap().
- bool InstancesNeedRewriting(Map* target);
- bool InstancesNeedRewriting(Map* target, int target_number_of_fields,
- int target_inobject, int target_unused,
- int* old_number_of_fields);
- // TODO(ishell): moveit!
- static Handle<Map> GeneralizeAllFields(Handle<Map> map);
- MUST_USE_RESULT static Handle<FieldType> GeneralizeFieldType(
- Representation rep1, Handle<FieldType> type1, Representation rep2,
- Handle<FieldType> type2, Isolate* isolate);
- static void GeneralizeField(Handle<Map> map, int modify_index,
- PropertyConstness new_constness,
- Representation new_representation,
- Handle<FieldType> new_field_type);
-
- static Handle<Map> ReconfigureProperty(Handle<Map> map, int modify_index,
- PropertyKind new_kind,
- PropertyAttributes new_attributes,
- Representation new_representation,
- Handle<FieldType> new_field_type);
-
- static Handle<Map> ReconfigureElementsKind(Handle<Map> map,
- ElementsKind new_elements_kind);
-
- static Handle<Map> PrepareForDataProperty(Handle<Map> old_map,
- int descriptor_number,
- PropertyConstness constness,
- Handle<Object> value);
-
- static Handle<Map> Normalize(Handle<Map> map, PropertyNormalizationMode mode,
- const char* reason);
-
- // Tells whether the map is used for JSObjects in dictionary mode (ie
- // normalized objects, ie objects for which HasFastProperties returns false).
- // A map can never be used for both dictionary mode and fast mode JSObjects.
- // False by default and for HeapObjects that are not JSObjects.
- inline void set_dictionary_map(bool value);
- inline bool is_dictionary_map();
-
- // Tells whether the instance needs security checks when accessing its
- // properties.
- inline void set_is_access_check_needed(bool access_check_needed);
- inline bool is_access_check_needed();
-
- // Returns true if map has a non-empty stub code cache.
- inline bool has_code_cache();
-
- // [prototype]: implicit prototype object.
- DECL_ACCESSORS(prototype, Object)
- // TODO(jkummerow): make set_prototype private.
- static void SetPrototype(
- Handle<Map> map, Handle<Object> prototype,
- PrototypeOptimizationMode proto_mode = FAST_PROTOTYPE);
-
- // [constructor]: points back to the function or FunctionTemplateInfo
- // responsible for this map.
- // The field overlaps with the back pointer. All maps in a transition tree
- // have the same constructor, so maps with back pointers can walk the
- // back pointer chain until they find the map holding their constructor.
- // Returns null_value if there's neither a constructor function nor a
- // FunctionTemplateInfo available.
- DECL_ACCESSORS(constructor_or_backpointer, Object)
- inline Object* GetConstructor() const;
- inline FunctionTemplateInfo* GetFunctionTemplateInfo() const;
- inline void SetConstructor(Object* constructor,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
- // [back pointer]: points back to the parent map from which a transition
- // leads to this map. The field overlaps with the constructor (see above).
- inline Object* GetBackPointer();
- inline void SetBackPointer(Object* value,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-
- // [instance descriptors]: describes the object.
- DECL_ACCESSORS(instance_descriptors, DescriptorArray)
-
- // [layout descriptor]: describes the object layout.
- DECL_ACCESSORS(layout_descriptor, LayoutDescriptor)
- // |layout descriptor| accessor which can be used from GC.
- inline LayoutDescriptor* layout_descriptor_gc_safe();
- inline bool HasFastPointerLayout() const;
-
- // |layout descriptor| accessor that is safe to call even when
- // FLAG_unbox_double_fields is disabled (in this case Map does not contain
- // |layout_descriptor| field at all).
- inline LayoutDescriptor* GetLayoutDescriptor();
-
- inline void UpdateDescriptors(DescriptorArray* descriptors,
- LayoutDescriptor* layout_descriptor);
- inline void InitializeDescriptors(DescriptorArray* descriptors,
- LayoutDescriptor* layout_descriptor);
-
- // [stub cache]: contains stubs compiled for this map.
- DECL_ACCESSORS(code_cache, FixedArray)
-
- // [dependent code]: list of optimized codes that weakly embed this map.
- DECL_ACCESSORS(dependent_code, DependentCode)
-
- // [weak cell cache]: cache that stores a weak cell pointing to this map.
- DECL_ACCESSORS(weak_cell_cache, Object)
-
- inline PropertyDetails GetLastDescriptorDetails();
-
- inline int LastAdded();
-
- inline int NumberOfOwnDescriptors();
- inline void SetNumberOfOwnDescriptors(int number);
-
- inline Cell* RetrieveDescriptorsPointer();
-
- // Checks whether all properties are stored either in the map or on the object
- // (inobject, properties, or elements backing store), requiring no special
- // checks.
- bool OnlyHasSimpleProperties();
- inline int EnumLength();
- inline void SetEnumLength(int length);
-
- inline bool owns_descriptors();
- inline void set_owns_descriptors(bool owns_descriptors);
- inline void mark_unstable();
- inline bool is_stable();
- inline void set_migration_target(bool value);
- inline bool is_migration_target();
- inline void set_immutable_proto(bool value);
- inline bool is_immutable_proto();
- inline void set_construction_counter(int value);
- inline int construction_counter();
- inline void deprecate();
- inline bool is_deprecated();
- inline bool CanBeDeprecated();
- // Returns a non-deprecated version of the input. If the input was not
- // deprecated, it is directly returned. Otherwise, the non-deprecated version
- // is found by re-transitioning from the root of the transition tree using the
- // descriptor array of the map. Returns MaybeHandle<Map>() if no updated map
- // is found.
- static MaybeHandle<Map> TryUpdate(Handle<Map> map) WARN_UNUSED_RESULT;
-
- // Returns a non-deprecated version of the input. This method may deprecate
- // existing maps along the way if encodings conflict. Not for use while
- // gathering type feedback. Use TryUpdate in those cases instead.
- static Handle<Map> Update(Handle<Map> map);
-
- static inline Handle<Map> CopyInitialMap(Handle<Map> map);
- static Handle<Map> CopyInitialMap(Handle<Map> map, int instance_size,
- int in_object_properties,
- int unused_property_fields);
- static Handle<Map> CopyDropDescriptors(Handle<Map> map);
- static Handle<Map> CopyInsertDescriptor(Handle<Map> map,
- Descriptor* descriptor,
- TransitionFlag flag);
-
- static Handle<Object> WrapFieldType(Handle<FieldType> type);
- static FieldType* UnwrapFieldType(Object* wrapped_type);
-
- MUST_USE_RESULT static MaybeHandle<Map> CopyWithField(
- Handle<Map> map, Handle<Name> name, Handle<FieldType> type,
- PropertyAttributes attributes, PropertyConstness constness,
- Representation representation, TransitionFlag flag);
-
- MUST_USE_RESULT static MaybeHandle<Map> CopyWithConstant(
- Handle<Map> map,
- Handle<Name> name,
- Handle<Object> constant,
- PropertyAttributes attributes,
- TransitionFlag flag);
-
- // Returns a new map with all transitions dropped from the given map and
- // the ElementsKind set.
- static Handle<Map> TransitionElementsTo(Handle<Map> map,
- ElementsKind to_kind);
-
- static Handle<Map> AsElementsKind(Handle<Map> map, ElementsKind kind);
-
- static Handle<Map> CopyAsElementsKind(Handle<Map> map,
- ElementsKind kind,
- TransitionFlag flag);
-
- static Handle<Map> AsLanguageMode(Handle<Map> initial_map,
- LanguageMode language_mode,
- FunctionKind kind);
-
-
- static Handle<Map> CopyForPreventExtensions(Handle<Map> map,
- PropertyAttributes attrs_to_add,
- Handle<Symbol> transition_marker,
- const char* reason);
-
- static Handle<Map> FixProxy(Handle<Map> map, InstanceType type, int size);
-
-
- // Maximal number of fast properties. Used to restrict the number of map
- // transitions to avoid an explosion in the number of maps for objects used as
- // dictionaries.
- inline bool TooManyFastProperties(StoreFromKeyed store_mode);
- static Handle<Map> TransitionToDataProperty(Handle<Map> map,
- Handle<Name> name,
- Handle<Object> value,
- PropertyAttributes attributes,
- PropertyConstness constness,
- StoreFromKeyed store_mode);
- static Handle<Map> TransitionToAccessorProperty(
- Isolate* isolate, Handle<Map> map, Handle<Name> name, int descriptor,
- Handle<Object> getter, Handle<Object> setter,
- PropertyAttributes attributes);
- static Handle<Map> ReconfigureExistingProperty(Handle<Map> map,
- int descriptor,
- PropertyKind kind,
- PropertyAttributes attributes);
-
- inline void AppendDescriptor(Descriptor* desc);
-
- // Returns a copy of the map, prepared for inserting into the transition
- // tree (if the |map| owns descriptors then the new one will share
- // descriptors with |map|).
- static Handle<Map> CopyForTransition(Handle<Map> map, const char* reason);
-
- // Returns a copy of the map, with all transitions dropped from the
- // instance descriptors.
- static Handle<Map> Copy(Handle<Map> map, const char* reason);
- static Handle<Map> Create(Isolate* isolate, int inobject_properties);
-
- // Returns the next free property index (only valid for FAST MODE).
- int NextFreePropertyIndex();
-
- // Returns the number of properties described in instance_descriptors
- // filtering out properties with the specified attributes.
- int NumberOfDescribedProperties(DescriptorFlag which = OWN_DESCRIPTORS,
- PropertyFilter filter = ALL_PROPERTIES);
-
- DECLARE_CAST(Map)
-
- // Code cache operations.
-
- // Clears the code cache.
- inline void ClearCodeCache(Heap* heap);
-
- // Update code cache.
- static void UpdateCodeCache(Handle<Map> map,
- Handle<Name> name,
- Handle<Code> code);
-
- // Extend the descriptor array of the map with the list of descriptors.
- // In case of duplicates, the latest descriptor is used.
- static void AppendCallbackDescriptors(Handle<Map> map,
- Handle<Object> descriptors);
-
- static inline int SlackForArraySize(int old_size, int size_limit);
-
- static void EnsureDescriptorSlack(Handle<Map> map, int slack);
-
- Code* LookupInCodeCache(Name* name, Code::Flags code);
-
- static Handle<Map> GetObjectCreateMap(Handle<HeapObject> prototype);
-
- // Computes a hash value for this map, to be used in HashTables and such.
- int Hash();
-
- // Returns the transitioned map for this map with the most generic
- // elements_kind that's found in |candidates|, or |nullptr| if no match is
- // found at all.
- Map* FindElementsKindTransitionedMap(MapHandleList* candidates);
-
- inline bool CanTransition();
-
- inline bool IsBooleanMap();
- inline bool IsPrimitiveMap();
- inline bool IsJSReceiverMap();
- inline bool IsJSObjectMap();
- inline bool IsJSArrayMap();
- inline bool IsJSFunctionMap();
- inline bool IsStringMap();
- inline bool IsJSProxyMap();
- inline bool IsModuleMap();
- inline bool IsJSGlobalProxyMap();
- inline bool IsJSGlobalObjectMap();
- inline bool IsJSTypedArrayMap();
- inline bool IsJSDataViewMap();
-
- inline bool IsSpecialReceiverMap();
-
- inline bool CanOmitMapChecks();
-
- static void AddDependentCode(Handle<Map> map,
- DependentCode::DependencyGroup group,
- Handle<Code> code);
-
- bool IsMapInArrayPrototypeChain();
-
- static Handle<WeakCell> WeakCellForMap(Handle<Map> map);
-
- // Dispatched behavior.
- DECLARE_PRINTER(Map)
- DECLARE_VERIFIER(Map)
-
-#ifdef VERIFY_HEAP
- void DictionaryMapVerify();
- void VerifyOmittedMapChecks();
-#endif
-
- inline int visitor_id();
- inline void set_visitor_id(int visitor_id);
-
- static Handle<Map> TransitionToPrototype(Handle<Map> map,
- Handle<Object> prototype,
- PrototypeOptimizationMode mode);
-
- static Handle<Map> TransitionToImmutableProto(Handle<Map> map);
-
- static const int kMaxPreAllocatedPropertyFields = 255;
-
- // Layout description.
- static const int kInstanceSizesOffset = HeapObject::kHeaderSize;
- static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize;
- static const int kBitField3Offset = kInstanceAttributesOffset + kIntSize;
- static const int kPrototypeOffset = kBitField3Offset + kPointerSize;
- static const int kConstructorOrBackPointerOffset =
- kPrototypeOffset + kPointerSize;
- // When there is only one transition, it is stored directly in this field;
- // otherwise a transition array is used.
- // For prototype maps, this slot is used to store this map's PrototypeInfo
- // struct.
- static const int kTransitionsOrPrototypeInfoOffset =
- kConstructorOrBackPointerOffset + kPointerSize;
- static const int kDescriptorsOffset =
- kTransitionsOrPrototypeInfoOffset + kPointerSize;
-#if V8_DOUBLE_FIELDS_UNBOXING
- static const int kLayoutDescriptorOffset = kDescriptorsOffset + kPointerSize;
- static const int kCodeCacheOffset = kLayoutDescriptorOffset + kPointerSize;
-#else
- static const int kLayoutDescriptorOffset = 1; // Must not be ever accessed.
- static const int kCodeCacheOffset = kDescriptorsOffset + kPointerSize;
-#endif
- static const int kDependentCodeOffset = kCodeCacheOffset + kPointerSize;
- static const int kWeakCellCacheOffset = kDependentCodeOffset + kPointerSize;
- static const int kSize = kWeakCellCacheOffset + kPointerSize;
-
- // Layout of pointer fields. Heap iteration code relies on them
- // being continuously allocated.
- static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
- static const int kPointerFieldsEndOffset = kSize;
-
- // Byte offsets within kInstanceSizesOffset.
- static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
- static const int kInObjectPropertiesOrConstructorFunctionIndexByte = 1;
- static const int kInObjectPropertiesOrConstructorFunctionIndexOffset =
- kInstanceSizesOffset + kInObjectPropertiesOrConstructorFunctionIndexByte;
- // Note there is one byte available for use here.
- static const int kUnusedByte = 2;
- static const int kUnusedOffset = kInstanceSizesOffset + kUnusedByte;
- static const int kVisitorIdByte = 3;
- static const int kVisitorIdOffset = kInstanceSizesOffset + kVisitorIdByte;
-
- // Byte offsets within kInstanceAttributesOffset attributes.
-#if V8_TARGET_LITTLE_ENDIAN
- // Order instance type and bit field together such that they can be loaded
- // together as a 16-bit word with instance type in the lower 8 bits regardless
- // of endianess. Also provide endian-independent offset to that 16-bit word.
- static const int kInstanceTypeOffset = kInstanceAttributesOffset + 0;
- static const int kBitFieldOffset = kInstanceAttributesOffset + 1;
-#else
- static const int kBitFieldOffset = kInstanceAttributesOffset + 0;
- static const int kInstanceTypeOffset = kInstanceAttributesOffset + 1;
-#endif
- static const int kInstanceTypeAndBitFieldOffset =
- kInstanceAttributesOffset + 0;
- static const int kBitField2Offset = kInstanceAttributesOffset + 2;
- static const int kUnusedPropertyFieldsOffset = kInstanceAttributesOffset + 3;
-
- STATIC_ASSERT(kInstanceTypeAndBitFieldOffset ==
- Internals::kMapInstanceTypeAndBitFieldOffset);
-
- // Bit positions for bit field.
- static const int kHasNonInstancePrototype = 0;
- static const int kIsCallable = 1;
- static const int kHasNamedInterceptor = 2;
- static const int kHasIndexedInterceptor = 3;
- static const int kIsUndetectable = 4;
- static const int kIsAccessCheckNeeded = 5;
- static const int kIsConstructor = 6;
- // Bit 7 is free.
-
- // Bit positions for bit field 2
- static const int kIsExtensible = 0;
- // Bit 1 is free.
- class IsPrototypeMapBits : public BitField<bool, 2, 1> {};
- class ElementsKindBits: public BitField<ElementsKind, 3, 5> {};
-
- // Derived values from bit field 2
- static const int8_t kMaximumBitField2FastElementValue = static_cast<int8_t>(
- (FAST_ELEMENTS + 1) << Map::ElementsKindBits::kShift) - 1;
- static const int8_t kMaximumBitField2FastSmiElementValue =
- static_cast<int8_t>((FAST_SMI_ELEMENTS + 1) <<
- Map::ElementsKindBits::kShift) - 1;
- static const int8_t kMaximumBitField2FastHoleyElementValue =
- static_cast<int8_t>((FAST_HOLEY_ELEMENTS + 1) <<
- Map::ElementsKindBits::kShift) - 1;
- static const int8_t kMaximumBitField2FastHoleySmiElementValue =
- static_cast<int8_t>((FAST_HOLEY_SMI_ELEMENTS + 1) <<
- Map::ElementsKindBits::kShift) - 1;
-
- typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
- kPointerFieldsEndOffset,
- kSize> BodyDescriptor;
-
- // Compares this map to another to see if they describe equivalent objects.
- // If |mode| is set to CLEAR_INOBJECT_PROPERTIES, |other| is treated as if
- // it had exactly zero inobject properties.
- // The "shared" flags of both this map and |other| are ignored.
- bool EquivalentToForNormalization(Map* other, PropertyNormalizationMode mode);
-
- // Returns true if given field is unboxed double.
- inline bool IsUnboxedDoubleField(FieldIndex index);
-
-#if TRACE_MAPS
- static void TraceTransition(const char* what, Map* from, Map* to, Name* name);
- static void TraceAllTransitions(Map* map);
-#endif
-
- static inline Handle<Map> AddMissingTransitionsForTesting(
- Handle<Map> split_map, Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> full_layout_descriptor);
-
- // Fires when the layout of an object with a leaf map changes.
- // This includes adding transitions to the leaf map or changing
- // the descriptor array.
- inline void NotifyLeafMapLayoutChange();
-
- private:
- // Returns the map that this (root) map transitions to if its elements_kind
- // is changed to |elements_kind|, or |nullptr| if no such map is cached yet.
- Map* LookupElementsTransitionMap(ElementsKind elements_kind);
-
- // Tries to replay property transitions starting from this (root) map using
- // the descriptor array of the |map|. The |root_map| is expected to have
- // proper elements kind and therefore elements kinds transitions are not
- // taken by this function. Returns |nullptr| if matching transition map is
- // not found.
- Map* TryReplayPropertyTransitions(Map* map);
-
- static void ConnectTransition(Handle<Map> parent, Handle<Map> child,
- Handle<Name> name, SimpleTransitionFlag flag);
-
- bool EquivalentToForTransition(Map* other);
- static Handle<Map> RawCopy(Handle<Map> map, int instance_size);
- static Handle<Map> ShareDescriptor(Handle<Map> map,
- Handle<DescriptorArray> descriptors,
- Descriptor* descriptor);
- static Handle<Map> AddMissingTransitions(
- Handle<Map> map, Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> full_layout_descriptor);
- static void InstallDescriptors(
- Handle<Map> parent_map, Handle<Map> child_map, int new_descriptor,
- Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> full_layout_descriptor);
- static Handle<Map> CopyAddDescriptor(Handle<Map> map,
- Descriptor* descriptor,
- TransitionFlag flag);
- static Handle<Map> CopyReplaceDescriptors(
- Handle<Map> map, Handle<DescriptorArray> descriptors,
- Handle<LayoutDescriptor> layout_descriptor, TransitionFlag flag,
- MaybeHandle<Name> maybe_name, const char* reason,
- SimpleTransitionFlag simple_flag);
-
- static Handle<Map> CopyReplaceDescriptor(Handle<Map> map,
- Handle<DescriptorArray> descriptors,
- Descriptor* descriptor,
- int index,
- TransitionFlag flag);
- static MUST_USE_RESULT MaybeHandle<Map> TryReconfigureExistingProperty(
- Handle<Map> map, int descriptor, PropertyKind kind,
- PropertyAttributes attributes, const char** reason);
-
- static Handle<Map> CopyNormalized(Handle<Map> map,
- PropertyNormalizationMode mode);
-
- // TODO(ishell): Move to MapUpdater.
- static Handle<Map> CopyGeneralizeAllFields(
- Handle<Map> map, ElementsKind elements_kind, int modify_index,
- PropertyKind kind, PropertyAttributes attributes, const char* reason);
-
- void DeprecateTransitionTree();
-
- void ReplaceDescriptors(DescriptorArray* new_descriptors,
- LayoutDescriptor* new_layout_descriptor);
-
-
- // Update field type of the given descriptor to new representation and new
- // type. The type must be prepared for storing in descriptor array:
- // it must be either a simple type or a map wrapped in a weak cell.
- void UpdateFieldType(int descriptor_number, Handle<Name> name,
- PropertyConstness new_constness,
- Representation new_representation,
- Handle<Object> new_wrapped_type);
-
- // TODO(ishell): Move to MapUpdater.
- void PrintReconfiguration(FILE* file, int modify_index, PropertyKind kind,
- PropertyAttributes attributes);
- // TODO(ishell): Move to MapUpdater.
- void PrintGeneralization(FILE* file, const char* reason, int modify_index,
- int split, int descriptors, bool constant_to_field,
- Representation old_representation,
- Representation new_representation,
- MaybeHandle<FieldType> old_field_type,
- MaybeHandle<Object> old_value,
- MaybeHandle<FieldType> new_field_type,
- MaybeHandle<Object> new_value);
- static const int kFastPropertiesSoftLimit = 12;
- static const int kMaxFastProperties = 128;
-
- friend class MapUpdater;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
-};
-
-
// An abstract superclass, a marker class really, for simple structure classes.
// It doesn't carry much functionality but allows struct classes to be
// identified in the type system.
@@ -5455,7 +4706,7 @@ class Script: public Struct {
// This must only be called if the type of this script is TYPE_WASM.
DECL_ACCESSORS(wasm_compiled_module, Object)
- DECL_ACCESSORS(preparsed_scope_data, FixedTypedArrayBase)
+ DECL_ACCESSORS(preparsed_scope_data, PodArray<uint32_t>)
// [compilation_type]: how the the script was compiled. Encoded in the
// 'flags' field.
@@ -5544,7 +4795,6 @@ class Script: public Struct {
};
bool HasPreparsedScopeData() const;
- Handle<FixedUint32Array> GetPreparsedScopeData() const;
// Dispatched behavior.
DECLARE_PRINTER(Script)
@@ -5802,34 +5052,6 @@ class SharedFunctionInfo: public HeapObject {
inline void ReplaceCode(Code* code);
inline bool HasBaselineCode() const;
- // [optimized_code_map]: Map from native context to optimized code
- // and a shared literals array.
- DECL_ACCESSORS(optimized_code_map, FixedArray)
-
- // Returns entry from optimized code map for specified context and OSR entry.
- Code* SearchOptimizedCodeMap(Context* native_context, BailoutId osr_ast_id);
-
- // Clear optimized code map.
- void ClearOptimizedCodeMap();
-
- // Like ClearOptimizedCodeMap, but preserves literals.
- void ClearCodeFromOptimizedCodeMap();
-
- // We have a special root FixedArray with the right shape and values
- // to represent the cleared optimized code map. This predicate checks
- // if that root is installed.
- inline bool OptimizedCodeMapIsCleared() const;
-
- // Removes a specific optimized code object from the optimized code map.
- // In case of non-OSR the code reference is cleared from the cache entry but
- // the entry itself is left in the map in order to proceed sharing literals.
- void EvictFromOptimizedCodeMap(Code* optimized_code, const char* reason);
-
- // Add or update entry in the optimized code map for context-dependent code.
- static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
- Handle<Context> native_context,
- Handle<Code> code, BailoutId osr_ast_id);
-
// Set up the link between shared function info and the script. The shared
// function info is added to the list on the script.
V8_EXPORT_PRIVATE static void SetScript(Handle<SharedFunctionInfo> shared,
@@ -5843,6 +5065,7 @@ class SharedFunctionInfo: public HeapObject {
static const int kInitialLength = kEntriesStart + kEntryLength;
static const int kNotFound = -1;
+ static const int kInvalidLength = -1;
// Helpers for assembly code that does a backwards walk of the optimized code
// map.
@@ -5871,7 +5094,9 @@ class SharedFunctionInfo: public HeapObject {
// [length]: The function length - usually the number of declared parameters.
// Use up to 2^30 parameters.
- inline int length() const;
+ // been compiled.
+ inline int GetLength() const;
+ inline bool HasLength() const;
inline void set_length(int value);
// [internal formal parameter count]: The declared number of parameters.
@@ -5900,7 +5125,7 @@ class SharedFunctionInfo: public HeapObject {
inline int function_literal_id() const;
inline void set_function_literal_id(int value);
-#if TRACE_MAPS
+#if V8_SFI_HAS_UNIQUE_ID
// [unique_id] - For --trace-maps purposes, an identifier that's persistent
// even if the GC moves this SharedFunctionInfo.
inline int unique_id() const;
@@ -6226,8 +5451,7 @@ class SharedFunctionInfo: public HeapObject {
// Pointer fields.
static const int kCodeOffset = HeapObject::kHeaderSize;
static const int kNameOffset = kCodeOffset + kPointerSize;
- static const int kOptimizedCodeMapOffset = kNameOffset + kPointerSize;
- static const int kScopeInfoOffset = kOptimizedCodeMapOffset + kPointerSize;
+ static const int kScopeInfoOffset = kNameOffset + kPointerSize;
static const int kOuterScopeInfoOffset = kScopeInfoOffset + kPointerSize;
static const int kConstructStubOffset = kOuterScopeInfoOffset + kPointerSize;
static const int kInstanceClassNameOffset =
@@ -6241,7 +5465,7 @@ class SharedFunctionInfo: public HeapObject {
kFunctionIdentifierOffset + kPointerSize;
static const int kFunctionLiteralIdOffset =
kFeedbackMetadataOffset + kPointerSize;
-#if TRACE_MAPS
+#if V8_SFI_HAS_UNIQUE_ID
static const int kUniqueIdOffset = kFunctionLiteralIdOffset + kPointerSize;
static const int kLastPointerFieldOffset = kUniqueIdOffset;
#else
@@ -6422,6 +5646,10 @@ class SharedFunctionInfo: public HeapObject {
class DisabledOptimizationReasonBits : public BitField<int, 22, 8> {};
private:
+ FRIEND_TEST(PreParserTest, LazyFunctionLength);
+
+ inline int length() const;
+
#if V8_HOST_ARCH_32_BIT
// On 32 bit platforms, compiler hints is a smi.
static const int kCompilerHintsSmiTagSize = kSmiTagSize;
@@ -6465,6 +5693,10 @@ class SharedFunctionInfo: public HeapObject {
FunctionKind::kClassConstructor << kCompilerHintsSmiTagSize;
STATIC_ASSERT(kClassConstructorBitsWithinByte < (1 << kBitsPerByte));
+ static const int kDerivedConstructorBitsWithinByte =
+ FunctionKind::kDerivedConstructor << kCompilerHintsSmiTagSize;
+ STATIC_ASSERT(kDerivedConstructorBitsWithinByte < (1 << kBitsPerByte));
+
static const int kMarkedForTierUpBitWithinByte =
kMarkedForTierUpBit % kBitsPerByte;
@@ -6488,11 +5720,6 @@ class SharedFunctionInfo: public HeapObject {
#undef BYTE_OFFSET
private:
- // Returns entry from optimized code map for specified context.
- // The result is either kNotFound, or a start index of the context-dependent
- // entry.
- int SearchOptimizedCodeMapEntry(Context* native_context);
-
DISALLOW_IMPLICIT_CONSTRUCTORS(SharedFunctionInfo);
};
@@ -6685,7 +5912,8 @@ class Module : public Struct {
// Implementation of spec operation ModuleDeclarationInstantiation.
// Returns false if an exception occurred during instantiation, true
- // otherwise.
+ // otherwise. (In the case where the callback throws an exception, that
+ // exception is propagated.)
static MUST_USE_RESULT bool Instantiate(Handle<Module> module,
v8::Local<v8::Context> context,
v8::Module::ResolveCallback callback);
@@ -6693,6 +5921,7 @@ class Module : public Struct {
// Implementation of spec operation ModuleEvaluation.
static MUST_USE_RESULT MaybeHandle<Object> Evaluate(Handle<Module> module);
+ Cell* GetCell(int cell_index);
static Handle<Object> LoadVariable(Handle<Module> module, int cell_index);
static void StoreVariable(Handle<Module> module, int cell_index,
Handle<Object> value);
@@ -6858,6 +6087,9 @@ class JSFunction: public JSObject {
// Tells whether or not the function is on the concurrent recompilation queue.
inline bool IsInOptimizationQueue();
+ // Clears the optimized code slot in the function's feedback vector.
+ inline void ClearOptimizedCodeSlot(const char* reason);
+
// Completes inobject slack tracking on initial map if it is active.
inline void CompleteInobjectSlackTrackingIfActive();
@@ -6904,8 +6136,6 @@ class JSFunction: public JSObject {
inline Object* instance_prototype();
static void SetPrototype(Handle<JSFunction> function,
Handle<Object> value);
- static void SetInstancePrototype(Handle<JSFunction> function,
- Handle<Object> value);
// After prototype is removed, it will not be created when accessed, and
// [[Construct]] from this function will not be allowed.
@@ -7378,9 +6608,12 @@ class JSRegExp: public JSObject {
kSticky = 1 << 3,
kUnicode = 1 << 4,
kDotAll = 1 << 5,
+ // Update FlagCount when adding new flags.
};
typedef base::Flags<Flag> Flags;
+ static int FlagCount() { return FLAG_harmony_regexp_dotall ? 6 : 5; }
+
DECL_ACCESSORS(data, Object)
DECL_ACCESSORS(flags, Object)
DECL_ACCESSORS(source, Object)
@@ -7493,8 +6726,7 @@ class JSRegExp: public JSObject {
DEFINE_OPERATORS_FOR_FLAGS(JSRegExp::Flags)
-
-class TypeFeedbackInfo: public Struct {
+class TypeFeedbackInfo : public Tuple3 {
public:
inline int ic_total_count();
inline void set_ic_total_count(int count);
@@ -7515,14 +6747,9 @@ class TypeFeedbackInfo: public Struct {
DECLARE_CAST(TypeFeedbackInfo)
- // Dispatched behavior.
- DECLARE_PRINTER(TypeFeedbackInfo)
- DECLARE_VERIFIER(TypeFeedbackInfo)
-
- static const int kStorage1Offset = HeapObject::kHeaderSize;
- static const int kStorage2Offset = kStorage1Offset + kPointerSize;
- static const int kStorage3Offset = kStorage2Offset + kPointerSize;
- static const int kSize = kStorage3Offset + kPointerSize;
+ static const int kStorage1Offset = kValue1Offset;
+ static const int kStorage2Offset = kValue2Offset;
+ static const int kStorage3Offset = kValue3Offset;
private:
static const int kTypeChangeChecksumBits = 7;
@@ -7726,78 +6953,6 @@ class AliasedArgumentsEntry: public Struct {
enum AllowNullsFlag {ALLOW_NULLS, DISALLOW_NULLS};
enum RobustnessFlag {ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL};
-class V8_EXPORT_PRIVATE StringHasher {
- public:
- explicit inline StringHasher(int length, uint32_t seed);
-
- template <typename schar>
- static inline uint32_t HashSequentialString(const schar* chars,
- int length,
- uint32_t seed);
-
- // Reads all the data, even for long strings and computes the utf16 length.
- static uint32_t ComputeUtf8Hash(Vector<const char> chars,
- uint32_t seed,
- int* utf16_length_out);
-
- // Calculated hash value for a string consisting of 1 to
- // String::kMaxArrayIndexSize digits with no leading zeros (except "0").
- // value is represented decimal value.
- static uint32_t MakeArrayIndexHash(uint32_t value, int length);
-
- // No string is allowed to have a hash of zero. That value is reserved
- // for internal properties. If the hash calculation yields zero then we
- // use 27 instead.
- static const int kZeroHash = 27;
-
- // Reusable parts of the hashing algorithm.
- INLINE(static uint32_t AddCharacterCore(uint32_t running_hash, uint16_t c));
- INLINE(static uint32_t GetHashCore(uint32_t running_hash));
- INLINE(static uint32_t ComputeRunningHash(uint32_t running_hash,
- const uc16* chars, int length));
- INLINE(static uint32_t ComputeRunningHashOneByte(uint32_t running_hash,
- const char* chars,
- int length));
-
- protected:
- // Returns the value to store in the hash field of a string with
- // the given length and contents.
- uint32_t GetHashField();
- // Returns true if the hash of this string can be computed without
- // looking at the contents.
- inline bool has_trivial_hash();
- // Adds a block of characters to the hash.
- template<typename Char>
- inline void AddCharacters(const Char* chars, int len);
-
- private:
- // Add a character to the hash.
- inline void AddCharacter(uint16_t c);
- // Update index. Returns true if string is still an index.
- inline bool UpdateIndex(uint16_t c);
-
- int length_;
- uint32_t raw_running_hash_;
- uint32_t array_index_;
- bool is_array_index_;
- bool is_first_char_;
- DISALLOW_COPY_AND_ASSIGN(StringHasher);
-};
-
-
-class IteratingStringHasher : public StringHasher {
- public:
- static inline uint32_t Hash(String* string, uint32_t seed);
- inline void VisitOneByteString(const uint8_t* chars, int length);
- inline void VisitTwoByteString(const uint16_t* chars, int length);
-
- private:
- inline IteratingStringHasher(int len, uint32_t seed);
- void VisitConsString(ConsString* cons_string);
- DISALLOW_COPY_AND_ASSIGN(IteratingStringHasher);
-};
-
-
// The characteristics of a string are stored in its map. Retrieving these
// few bits of information is moderately expensive, involving two memory
// loads where the second is dependent on the first. To improve efficiency
@@ -7828,7 +6983,7 @@ class StringShape BASE_EMBEDDED {
inline StringRepresentationTag representation_tag();
inline uint32_t encoding_tag();
inline uint32_t full_representation_tag();
- inline uint32_t size_tag();
+ inline bool HasOnlyOneByteChars();
#ifdef DEBUG
inline uint32_t type() { return type_; }
inline void invalidate() { valid_ = false; }
@@ -7883,7 +7038,7 @@ class Name: public HeapObject {
DECLARE_CAST(Name)
DECLARE_PRINTER(Name)
-#if TRACE_MAPS
+#if V8_TRACE_MAPS
void NameShortPrint();
int NameShortPrint(Vector<char> str);
#endif
@@ -8002,7 +7157,7 @@ class Symbol: public Name {
private:
const char* PrivateSymbolToName() const;
-#if TRACE_MAPS
+#if V8_TRACE_MAPS
friend class Name; // For PrivateSymbolToName.
#endif
@@ -8213,8 +7368,11 @@ class String: public Name {
// GetSubstitution(matched, str, position, captures, replacement)
// Expand the $-expressions in the string and return a new string with
// the result.
+ // A {start_index} can be passed to specify where to start scanning the
+ // replacement string.
MUST_USE_RESULT static MaybeHandle<String> GetSubstitution(
- Isolate* isolate, Match* match, Handle<String> replacement);
+ Isolate* isolate, Match* match, Handle<String> replacement,
+ int start_index = 0);
// String equality operations.
inline bool Equals(String* other);
@@ -8452,6 +7610,8 @@ class SeqOneByteString: public SeqString {
static const int kMaxSize = 512 * MB - 1;
STATIC_ASSERT((kMaxSize - kHeaderSize) >= String::kMaxLength);
+ class BodyDescriptor;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqOneByteString);
};
@@ -8492,6 +7652,8 @@ class SeqTwoByteString: public SeqString {
STATIC_ASSERT(static_cast<int>((kMaxSize - kHeaderSize)/sizeof(uint16_t)) >=
String::kMaxLength);
+ class BodyDescriptor;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqTwoByteString);
};
@@ -8721,16 +7883,16 @@ class Relocatable BASE_EMBEDDED {
public:
explicit inline Relocatable(Isolate* isolate);
inline virtual ~Relocatable();
- virtual void IterateInstance(ObjectVisitor* v) { }
+ virtual void IterateInstance(RootVisitor* v) {}
virtual void PostGarbageCollection() { }
static void PostGarbageCollectionProcessing(Isolate* isolate);
static int ArchiveSpacePerThread();
static char* ArchiveState(Isolate* isolate, char* to);
static char* RestoreState(Isolate* isolate, char* from);
- static void Iterate(Isolate* isolate, ObjectVisitor* v);
- static void Iterate(ObjectVisitor* v, Relocatable* top);
- static char* Iterate(ObjectVisitor* v, char* t);
+ static void Iterate(Isolate* isolate, RootVisitor* v);
+ static void Iterate(RootVisitor* v, Relocatable* top);
+ static char* Iterate(RootVisitor* v, char* t);
private:
Isolate* isolate_;
@@ -9345,6 +8507,15 @@ class JSArrayBuffer: public JSObject {
// [byte_length]: length in bytes
DECL_ACCESSORS(byte_length, Object)
+ // [allocation_base]: the start of the memory allocation for this array,
+ // normally equal to backing_store
+ DECL_ACCESSORS(allocation_base, void)
+
+ // [allocation_length]: the size of the memory allocation for this array,
+ // normally equal to byte_length
+ inline size_t allocation_length() const;
+ inline void set_allocation_length(size_t value);
+
inline uint32_t bit_field() const;
inline void set_bit_field(uint32_t bits);
@@ -9363,7 +8534,7 @@ class JSArrayBuffer: public JSObject {
inline bool is_shared();
inline void set_is_shared(bool value);
- inline bool has_guard_region();
+ inline bool has_guard_region() const;
inline void set_has_guard_region(bool value);
// TODO(gdeepti): This flag is introduced to disable asm.js optimizations in
@@ -9375,11 +8546,20 @@ class JSArrayBuffer: public JSObject {
void Neuter();
+ inline ArrayBuffer::Allocator::AllocationMode allocation_mode() const;
+
+ void FreeBackingStore();
+
V8_EXPORT_PRIVATE static void Setup(
Handle<JSArrayBuffer> array_buffer, Isolate* isolate, bool is_external,
void* data, size_t allocated_length,
SharedFlag shared = SharedFlag::kNotShared);
+ V8_EXPORT_PRIVATE static void Setup(
+ Handle<JSArrayBuffer> array_buffer, Isolate* isolate, bool is_external,
+ void* allocation_base, size_t allocation_length, void* data,
+ size_t byte_length, SharedFlag shared = SharedFlag::kNotShared);
+
// Returns false if array buffer contents could not be allocated.
// In this case, |array_buffer| will not be set up.
static bool SetupAllocatingData(
@@ -9392,8 +8572,13 @@ class JSArrayBuffer: public JSObject {
DECLARE_VERIFIER(JSArrayBuffer)
static const int kByteLengthOffset = JSObject::kHeaderSize;
+ // The rest of the fields are not JSObjects, so they are not iterated over in
+ // objects-body-descriptors-inl.h.
static const int kBackingStoreOffset = kByteLengthOffset + kPointerSize;
- static const int kBitFieldSlot = kBackingStoreOffset + kPointerSize;
+ static const int kAllocationBaseOffset = kBackingStoreOffset + kPointerSize;
+ static const int kAllocationLengthOffset =
+ kAllocationBaseOffset + kPointerSize;
+ static const int kBitFieldSlot = kAllocationLengthOffset + kSizetSize;
#if V8_TARGET_LITTLE_ENDIAN || !V8_HOST_ARCH_64_BIT
static const int kBitFieldOffset = kBitFieldSlot;
#else
@@ -9473,6 +8658,16 @@ class JSTypedArray: public JSArrayBufferView {
static inline MaybeHandle<JSTypedArray> Validate(Isolate* isolate,
Handle<Object> receiver,
const char* method_name);
+ // ES7 section 22.2.4.6 Create ( constructor, argumentList )
+ static MaybeHandle<JSTypedArray> Create(Isolate* isolate,
+ Handle<Object> default_ctor, int argc,
+ Handle<Object>* argv,
+ const char* method_name);
+ // ES7 section 22.2.4.7 TypedArraySpeciesCreate ( exemplar, argumentList )
+ static MaybeHandle<JSTypedArray> SpeciesCreate(Isolate* isolate,
+ Handle<JSTypedArray> exemplar,
+ int argc, Handle<Object>* argv,
+ const char* method_name);
// Dispatched behavior.
DECLARE_PRINTER(JSTypedArray)
@@ -9609,7 +8804,7 @@ class JSArray: public JSObject {
static const int kSize = kLengthOffset + kPointerSize;
// Max. number of elements being copied in Array builtins.
- static const int kMaxCopyElements = 16;
+ static const int kMaxCopyElements = 100;
static const int kInitialMaxFastElementArray =
(kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - kSize -
@@ -9853,21 +9048,15 @@ class InterceptorInfo: public Struct {
DISALLOW_IMPLICIT_CONSTRUCTORS(InterceptorInfo);
};
-
-class CallHandlerInfo: public Struct {
+class CallHandlerInfo : public Tuple2 {
public:
DECL_ACCESSORS(callback, Object)
DECL_ACCESSORS(data, Object)
DECLARE_CAST(CallHandlerInfo)
- // Dispatched behavior.
- DECLARE_PRINTER(CallHandlerInfo)
- DECLARE_VERIFIER(CallHandlerInfo)
-
- static const int kCallbackOffset = HeapObject::kHeaderSize;
- static const int kDataOffset = kCallbackOffset + kPointerSize;
- static const int kSize = kDataOffset + kPointerSize;
+ static const int kCallbackOffset = kValue1Offset;
+ static const int kDataOffset = kValue2Offset;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(CallHandlerInfo);
@@ -9896,6 +9085,11 @@ class TemplateInfo: public Struct {
static const int kFastTemplateInstantiationsCacheSize = 1 * KB;
+ // While we could grow the slow cache until we run out of memory, we put
+ // a limit on it anyway to not crash for embedders that re-create templates
+ // instead of caching them.
+ static const int kSlowTemplateInstantiationsCacheSize = 1 * MB;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(TemplateInfo);
};
@@ -10088,7 +9282,7 @@ class DebugInfo: public Struct {
// The BreakPointInfo class holds information for break points set in a
// function. The DebugInfo object holds a BreakPointInfo object for each code
// position with one or more break points.
-class BreakPointInfo: public Struct {
+class BreakPointInfo : public Tuple2 {
public:
// The position in the source for the break position.
DECL_INT_ACCESSORS(source_position)
@@ -10111,14 +9305,8 @@ class BreakPointInfo: public Struct {
DECLARE_CAST(BreakPointInfo)
- // Dispatched behavior.
- DECLARE_PRINTER(BreakPointInfo)
- DECLARE_VERIFIER(BreakPointInfo)
-
- static const int kSourcePositionIndex = Struct::kHeaderSize;
- static const int kBreakPointObjectsIndex =
- kSourcePositionIndex + kPointerSize;
- static const int kSize = kBreakPointObjectsIndex + kPointerSize;
+ static const int kSourcePositionIndex = kValue1Offset;
+ static const int kBreakPointObjectsIndex = kValue2Offset;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(BreakPointInfo);
@@ -10136,6 +9324,7 @@ class StackFrameInfo : public Struct {
DECL_BOOLEAN_ACCESSORS(is_constructor)
DECL_BOOLEAN_ACCESSORS(is_wasm)
DECL_INT_ACCESSORS(flag)
+ DECL_INT_ACCESSORS(id)
DECLARE_CAST(StackFrameInfo)
@@ -10152,7 +9341,8 @@ class StackFrameInfo : public Struct {
static const int kFunctionNameIndex =
kScriptNameOrSourceUrlIndex + kPointerSize;
static const int kFlagIndex = kFunctionNameIndex + kPointerSize;
- static const int kSize = kFlagIndex + kPointerSize;
+ static const int kIdIndex = kFlagIndex + kPointerSize;
+ static const int kSize = kIdIndex + kPointerSize;
private:
// Bit position in the flag, from least significant bit position.
@@ -10163,92 +9353,77 @@ class StackFrameInfo : public Struct {
DISALLOW_IMPLICIT_CONSTRUCTORS(StackFrameInfo);
};
-#define VISITOR_SYNCHRONIZATION_TAGS_LIST(V) \
- V(kStringTable, "string_table", "(Internalized strings)") \
- V(kExternalStringsTable, "external_strings_table", "(External strings)") \
- V(kStrongRootList, "strong_root_list", "(Strong roots)") \
- V(kSmiRootList, "smi_root_list", "(Smi roots)") \
- V(kBootstrapper, "bootstrapper", "(Bootstrapper)") \
- V(kTop, "top", "(Isolate)") \
- V(kRelocatable, "relocatable", "(Relocatable)") \
- V(kDebug, "debug", "(Debugger)") \
- V(kCompilationCache, "compilationcache", "(Compilation cache)") \
- V(kHandleScope, "handlescope", "(Handle scope)") \
- V(kDispatchTable, "dispatchtable", "(Dispatch table)") \
- V(kBuiltins, "builtins", "(Builtins)") \
- V(kGlobalHandles, "globalhandles", "(Global handles)") \
- V(kEternalHandles, "eternalhandles", "(Eternal handles)") \
- V(kThreadManager, "threadmanager", "(Thread manager)") \
- V(kStrongRoots, "strong roots", "(Strong roots)") \
- V(kExtensions, "Extensions", "(Extensions)")
-
-class VisitorSynchronization : public AllStatic {
+class SourcePositionTableWithFrameCache : public Tuple2 {
public:
-#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
- enum SyncTag {
- VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_ENUM)
- kNumberOfSyncTags
- };
-#undef DECLARE_ENUM
+ DECL_ACCESSORS(source_position_table, ByteArray)
+ DECL_ACCESSORS(stack_frame_cache, UnseededNumberDictionary)
+
+ DECLARE_CAST(SourcePositionTableWithFrameCache)
- static const char* const kTags[kNumberOfSyncTags];
- static const char* const kTagNames[kNumberOfSyncTags];
+ static const int kSourcePositionTableIndex = Struct::kHeaderSize;
+ static const int kStackFrameCacheIndex =
+ kSourcePositionTableIndex + kPointerSize;
+ static const int kSize = kStackFrameCacheIndex + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SourcePositionTableWithFrameCache);
};
// Abstract base class for visiting, and optionally modifying, the
// pointers contained in Objects. Used in GC and serialization/deserialization.
+// TODO(ulan): move to src/visitors.h
class ObjectVisitor BASE_EMBEDDED {
public:
virtual ~ObjectVisitor() {}
// Visits a contiguous arrays of pointers in the half-open range
// [start, end). Any or all of the values may be modified on return.
- virtual void VisitPointers(Object** start, Object** end) = 0;
+ virtual void VisitPointers(HeapObject* host, Object** start,
+ Object** end) = 0;
// Handy shorthand for visiting a single pointer.
- virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
+ virtual void VisitPointer(HeapObject* host, Object** p) {
+ VisitPointers(host, p, p + 1);
+ }
// Visit weak next_code_link in Code object.
- virtual void VisitNextCodeLink(Object** p) { VisitPointers(p, p + 1); }
+ virtual void VisitNextCodeLink(Code* host, Object** p) {
+ VisitPointers(host, p, p + 1);
+ }
// To allow lazy clearing of inline caches the visitor has
// a rich interface for iterating over Code objects..
// Visits a code target in the instruction stream.
- virtual void VisitCodeTarget(RelocInfo* rinfo);
+ virtual void VisitCodeTarget(Code* host, RelocInfo* rinfo);
// Visits a code entry in a JS function.
- virtual void VisitCodeEntry(Address entry_address);
+ virtual void VisitCodeEntry(JSFunction* host, Address entry_address);
// Visits a global property cell reference in the instruction stream.
- virtual void VisitCell(RelocInfo* rinfo);
+ virtual void VisitCellPointer(Code* host, RelocInfo* rinfo);
// Visits a runtime entry in the instruction stream.
- virtual void VisitRuntimeEntry(RelocInfo* rinfo) {}
+ virtual void VisitRuntimeEntry(Code* host, RelocInfo* rinfo) {}
// Visits a debug call target in the instruction stream.
- virtual void VisitDebugTarget(RelocInfo* rinfo);
+ virtual void VisitDebugTarget(Code* host, RelocInfo* rinfo);
// Visits the byte sequence in a function's prologue that contains information
// about the code's age.
- virtual void VisitCodeAgeSequence(RelocInfo* rinfo);
+ virtual void VisitCodeAgeSequence(Code* host, RelocInfo* rinfo);
// Visit pointer embedded into a code object.
- virtual void VisitEmbeddedPointer(RelocInfo* rinfo);
+ virtual void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo);
// Visits an external reference embedded into a code object.
- virtual void VisitExternalReference(RelocInfo* rinfo);
+ virtual void VisitExternalReference(Code* host, RelocInfo* rinfo) {}
// Visits an external reference.
- virtual void VisitExternalReference(Address* p) {}
+ virtual void VisitExternalReference(Foreign* host, Address* p) {}
// Visits an (encoded) internal reference.
- virtual void VisitInternalReference(RelocInfo* rinfo) {}
-
- // Intended for serialization/deserialization checking: insert, or
- // check for the presence of, a tag at this position in the stream.
- // Also used for marking up GC roots in heap snapshots.
- virtual void Synchronize(VisitorSynchronization::SyncTag tag) {}
+ virtual void VisitInternalReference(Code* host, RelocInfo* rinfo) {}
};
diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h
index 4927433567..32c376ee78 100644
--- a/deps/v8/src/objects/descriptor-array.h
+++ b/deps/v8/src/objects/descriptor-array.h
@@ -18,14 +18,18 @@ class Handle;
class Isolate;
-// DescriptorArrays are fixed arrays used to hold instance descriptors.
+// A DescriptorArray is a fixed array used to hold instance descriptors.
// The format of the these objects is:
// [0]: Number of descriptors
-// [1]: Either Smi(0) if uninitialized, or a pointer to small fixed array:
-// [0]: pointer to fixed array with enum cache
+// [1]: Either Smi(0) if uninitialized,
+// or enum cache bridge (FixedArray[2]):
+// [0]: enum cache: FixedArray containing all own enumerable keys
// [1]: either Smi(0) or pointer to fixed array with indices
-// [2]: first key
-// [2 + number of descriptors * kEntrySize]: start of slack
+// [2]: first key (and internalized String)
+// [3]: first descriptor details (see PropertyDetails)
+// [4]: first value for constants | Smi(1) when not usedA
+//
+// [2 + number of descriptors * 3]: start of slack
class DescriptorArray : public FixedArray {
public:
// Returns true for both shared empty_descriptor_array and for smis, which the
@@ -35,28 +39,19 @@ class DescriptorArray : public FixedArray {
// Returns the number of descriptors in the array.
inline int number_of_descriptors();
-
inline int number_of_descriptors_storage();
-
inline int NumberOfSlackDescriptors();
inline void SetNumberOfDescriptors(int number_of_descriptors);
inline int number_of_entries();
inline bool HasEnumCache();
-
- inline void CopyEnumCacheFrom(DescriptorArray* array);
-
- inline FixedArray* GetEnumCache();
-
inline bool HasEnumIndicesCache();
-
+ inline FixedArray* GetEnumCache();
inline FixedArray* GetEnumIndicesCache();
- inline Object** GetEnumCacheSlot();
-
void ClearEnumCache();
-
+ inline void CopyEnumCacheFrom(DescriptorArray* array);
// Initialize or change the enum cache,
// using the supplied storage for the small "bridge".
static void SetEnumCache(Handle<DescriptorArray> descriptors,
@@ -127,7 +122,7 @@ class DescriptorArray : public FixedArray {
static const int kNotFound = -1;
static const int kDescriptorLengthIndex = 0;
- static const int kEnumCacheIndex = 1;
+ static const int kEnumCacheBridgeIndex = 1;
static const int kFirstIndex = 2;
// The length of the "bridge" to the enum cache.
@@ -137,8 +132,9 @@ class DescriptorArray : public FixedArray {
// Layout description.
static const int kDescriptorLengthOffset = FixedArray::kHeaderSize;
- static const int kEnumCacheOffset = kDescriptorLengthOffset + kPointerSize;
- static const int kFirstOffset = kEnumCacheOffset + kPointerSize;
+ static const int kEnumCacheBridgeOffset =
+ kDescriptorLengthOffset + kPointerSize;
+ static const int kFirstOffset = kEnumCacheBridgeOffset + kPointerSize;
// Layout description for the bridge array.
static const int kEnumCacheBridgeCacheOffset = FixedArray::kHeaderSize;
diff --git a/deps/v8/src/objects/dictionary.h b/deps/v8/src/objects/dictionary.h
index 78837ea296..ca709679b1 100644
--- a/deps/v8/src/objects/dictionary.h
+++ b/deps/v8/src/objects/dictionary.h
@@ -5,7 +5,10 @@
#ifndef V8_OBJECTS_DICTIONARY_H_
#define V8_OBJECTS_DICTIONARY_H_
-#include "src/objects.h"
+#include "src/objects/hash-table.h"
+
+#include "src/base/export-template.h"
+#include "src/globals.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -274,6 +277,12 @@ class UnseededNumberDictionaryShape : public NumberDictionaryShape {
static inline Map* GetMap(Isolate* isolate);
};
+extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ HashTable<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>;
+
+extern template class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
+ Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape, uint32_t>;
+
class SeededNumberDictionary
: public Dictionary<SeededNumberDictionary, SeededNumberDictionaryShape,
uint32_t> {
@@ -325,6 +334,10 @@ class SeededNumberDictionary
static const int kRequiresSlowElementsMask = 1;
static const int kRequiresSlowElementsTagSize = 1;
static const uint32_t kRequiresSlowElementsLimit = (1 << 29) - 1;
+
+ // JSObjects prefer dictionary elements if the dictionary saves this much
+ // memory compared to a fast elements backing store.
+ static const uint32_t kPreferFastElementsSizeFactor = 3;
};
class UnseededNumberDictionary
diff --git a/deps/v8/src/objects/hash-table-inl.h b/deps/v8/src/objects/hash-table-inl.h
deleted file mode 100644
index 7b2db38495..0000000000
--- a/deps/v8/src/objects/hash-table-inl.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_OBJECTS_HASH_TABLE_INL_H_
-#define V8_OBJECTS_HASH_TABLE_INL_H_
-
-#include "src/objects/hash-table.h"
-
-namespace v8 {
-namespace internal {
-
-template <typename Derived, typename Shape, typename Key>
-uint32_t HashTable<Derived, Shape, Key>::Hash(Key key) {
- if (Shape::UsesSeed) {
- return Shape::SeededHash(key, GetHeap()->HashSeed());
- } else {
- return Shape::Hash(key);
- }
-}
-
-template <typename Derived, typename Shape, typename Key>
-uint32_t HashTable<Derived, Shape, Key>::HashForObject(Key key, Object* object) {
- if (Shape::UsesSeed) {
- return Shape::SeededHashForObject(key, GetHeap()->HashSeed(), object);
- } else {
- return Shape::HashForObject(key, object);
- }
-}
-
-} // namespace internal
-} // namespace v8
-
-#endif // V8_OBJECTS_HASH_TABLE_INL_H_
diff --git a/deps/v8/src/objects/hash-table.h b/deps/v8/src/objects/hash-table.h
index b274d94cd4..f3c68a82c5 100644
--- a/deps/v8/src/objects/hash-table.h
+++ b/deps/v8/src/objects/hash-table.h
@@ -7,6 +7,9 @@
#include "src/objects.h"
+#include "src/base/compiler-specific.h"
+#include "src/globals.h"
+
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@@ -63,7 +66,7 @@ class BaseShape {
static inline Map* GetMap(Isolate* isolate);
};
-class HashTableBase : public FixedArray {
+class V8_EXPORT_PRIVATE HashTableBase : public NON_EXPORTED_BASE(FixedArray) {
public:
// Returns the number of elements in the hash table.
inline int NumberOfElements();
@@ -135,10 +138,22 @@ class HashTable : public HashTableBase {
public:
typedef Shape ShapeT;
- // Wrapper methods. Defined in src/objects/hash-table-inl.h
- // to break a cycle with src/heap/heap.h
- inline uint32_t Hash(Key key);
- inline uint32_t HashForObject(Key key, Object* object);
+ // Wrapper methods
+ inline uint32_t Hash(Key key) {
+ if (Shape::UsesSeed) {
+ return Shape::SeededHash(key, GetHeap()->HashSeed());
+ } else {
+ return Shape::Hash(key);
+ }
+ }
+
+ inline uint32_t HashForObject(Key key, Object* object) {
+ if (Shape::UsesSeed) {
+ return Shape::SeededHashForObject(key, GetHeap()->HashSeed(), object);
+ } else {
+ return Shape::HashForObject(key, object);
+ }
+ }
// Returns a new HashTable object.
MUST_USE_RESULT static Handle<Derived> New(
@@ -177,8 +192,11 @@ class HashTable : public HashTableBase {
static const int kMaxCapacity =
(FixedArray::kMaxLength - kElementsStartIndex) / kEntrySize;
+ // Maximum length to create a regular HashTable (aka. non large object).
+ static const int kMaxRegularCapacity = 16384;
+
// Returns the index for an entry (of the key)
- static inline int EntryToIndex(int entry) {
+ static constexpr inline int EntryToIndex(int entry) {
return (entry * kEntrySize) + kElementsStartIndex;
}
@@ -203,6 +221,15 @@ class HashTable : public HashTableBase {
// Returns true if this table has sufficient capacity for adding n elements.
bool HasSufficientCapacityToAdd(int number_of_additional_elements);
+ private:
+ // Ensure that kMaxRegularCapacity yields a non-large object dictionary.
+ STATIC_ASSERT(EntryToIndex(kMaxRegularCapacity) < kMaxRegularLength);
+ STATIC_ASSERT(v8::base::bits::IsPowerOfTwo32(kMaxRegularCapacity));
+ static const int kMaxRegularEntry = kMaxRegularCapacity / kEntrySize;
+ static const int kMaxRegularIndex = EntryToIndex(kMaxRegularEntry);
+ STATIC_ASSERT(OffsetOfElementAt(kMaxRegularIndex) <
+ kMaxRegularHeapObjectSize);
+
// Sets the capacity of the hash table.
void SetCapacity(int capacity) {
// To scale a computed hash code to fit within the hash table, we
@@ -213,7 +240,6 @@ class HashTable : public HashTableBase {
set(kCapacityIndex, Smi::FromInt(capacity));
}
- private:
// Returns _expected_ if one of entries given by the first _probe_ probes is
// equal to _expected_. Otherwise, returns the entry given by the probe
// number _probe_.
@@ -329,9 +355,9 @@ class ObjectHashSet
// Originally attributed to Tyler Close.
//
// Memory layout:
-// [0]: bucket count
-// [1]: element count
-// [2]: deleted element count
+// [0]: element count
+// [1]: deleted element count
+// [2]: bucket count
// [3..(3 + NumberOfBuckets() - 1)]: "hash table", where each item is an
// offset into the data table (see below) where the
// first item in this bucket is stored.
diff --git a/deps/v8/src/i18n.cc b/deps/v8/src/objects/intl-objects.cc
index d96fd45b2a..fd6546b390 100644
--- a/deps/v8/src/i18n.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -1,9 +1,12 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// limitations under the License.
-#include "src/i18n.h"
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#include "src/objects/intl-objects.h"
#include <memory>
@@ -11,7 +14,6 @@
#include "src/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
-#include "src/string-case.h"
#include "unicode/brkiter.h"
#include "unicode/bytestream.h"
#include "unicode/calendar.h"
@@ -32,7 +34,6 @@
#include "unicode/ucol.h"
#include "unicode/ucurr.h"
#include "unicode/unum.h"
-#include "unicode/ustring.h"
#include "unicode/uvernum.h"
#include "unicode/uversion.h"
@@ -45,10 +46,8 @@ namespace internal {
namespace {
-bool ExtractStringSetting(Isolate* isolate,
- Handle<JSObject> options,
- const char* key,
- icu::UnicodeString* setting) {
+bool ExtractStringSetting(Isolate* isolate, Handle<JSObject> options,
+ const char* key, icu::UnicodeString* setting) {
Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key);
Handle<Object> object =
JSReceiver::GetProperty(options, str).ToHandleChecked();
@@ -61,11 +60,8 @@ bool ExtractStringSetting(Isolate* isolate,
return false;
}
-
-bool ExtractIntegerSetting(Isolate* isolate,
- Handle<JSObject> options,
- const char* key,
- int32_t* value) {
+bool ExtractIntegerSetting(Isolate* isolate, Handle<JSObject> options,
+ const char* key, int32_t* value) {
Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key);
Handle<Object> object =
JSReceiver::GetProperty(options, str).ToHandleChecked();
@@ -76,11 +72,8 @@ bool ExtractIntegerSetting(Isolate* isolate,
return false;
}
-
-bool ExtractBooleanSetting(Isolate* isolate,
- Handle<JSObject> options,
- const char* key,
- bool* value) {
+bool ExtractBooleanSetting(Isolate* isolate, Handle<JSObject> options,
+ const char* key, bool* value) {
Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(key);
Handle<Object> object =
JSReceiver::GetProperty(options, str).ToHandleChecked();
@@ -91,11 +84,9 @@ bool ExtractBooleanSetting(Isolate* isolate,
return false;
}
-
-icu::SimpleDateFormat* CreateICUDateFormat(
- Isolate* isolate,
- const icu::Locale& icu_locale,
- Handle<JSObject> options) {
+icu::SimpleDateFormat* CreateICUDateFormat(Isolate* isolate,
+ const icu::Locale& icu_locale,
+ Handle<JSObject> options) {
// Create time zone as specified by the user. We have to re-create time zone
// since calendar takes ownership.
icu::TimeZone* tz = NULL;
@@ -147,9 +138,7 @@ icu::SimpleDateFormat* CreateICUDateFormat(
return date_format;
}
-
-void SetResolvedDateSettings(Isolate* isolate,
- const icu::Locale& icu_locale,
+void SetResolvedDateSettings(Isolate* isolate, const icu::Locale& icu_locale,
icu::SimpleDateFormat* date_format,
Handle<JSObject> resolved) {
Factory* factory = isolate->factory();
@@ -158,21 +147,24 @@ void SetResolvedDateSettings(Isolate* isolate,
date_format->toPattern(pattern);
JSObject::SetProperty(
resolved, factory->intl_pattern_symbol(),
- factory->NewStringFromTwoByte(
- Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
- pattern.length())).ToHandleChecked(),
- SLOPPY).Assert();
+ factory
+ ->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
+ pattern.length()))
+ .ToHandleChecked(),
+ SLOPPY)
+ .Assert();
// Set time zone and calendar.
const icu::Calendar* calendar = date_format->getCalendar();
// getType() returns legacy calendar type name instead of LDML/BCP47 calendar
- // key values. i18n.js maps them to BCP47 values for key "ca".
+ // key values. intl.js maps them to BCP47 values for key "ca".
// TODO(jshin): Consider doing it here, instead.
const char* calendar_name = calendar->getType();
JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("calendar"),
factory->NewStringFromAsciiChecked(calendar_name),
- SLOPPY).Assert();
+ SLOPPY)
+ .Assert();
const icu::TimeZone& tz = calendar->getTimeZone();
icu::UnicodeString time_zone;
@@ -181,19 +173,30 @@ void SetResolvedDateSettings(Isolate* isolate,
icu::UnicodeString canonical_time_zone;
icu::TimeZone::getCanonicalID(time_zone, canonical_time_zone, status);
if (U_SUCCESS(status)) {
- if (canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/GMT")) {
- JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("timeZone"),
- factory->NewStringFromStaticChars("UTC"), SLOPPY).Assert();
+ // In CLDR (http://unicode.org/cldr/trac/ticket/9943), Etc/UTC is made
+ // a separate timezone ID from Etc/GMT even though they're still the same
+ // timezone. We'd not have "Etc/GMT" here because we canonicalize it and
+ // other GMT-variants to "UTC" in intl.js and "UTC" is turned to "Etc/UTC"
+ // by ICU before getting here.
+ // TODO(jshin): Figure out the cause of crbug.com/719609 and re-enable
+ // DCHECK(canonical_time_zone != UNICODE_STRING_SIMPLE("Etc/GMT")) .
+ if (canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/UTC") ||
+ canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/GMT")) {
+ JSObject::SetProperty(resolved,
+ factory->NewStringFromStaticChars("timeZone"),
+ factory->NewStringFromStaticChars("UTC"), SLOPPY)
+ .Assert();
} else {
- JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("timeZone"),
- factory->NewStringFromTwoByte(
- Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(
- canonical_time_zone.getBuffer()),
- canonical_time_zone.length())).ToHandleChecked(),
- SLOPPY).Assert();
+ JSObject::SetProperty(resolved,
+ factory->NewStringFromStaticChars("timeZone"),
+ factory
+ ->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(
+ canonical_time_zone.getBuffer()),
+ canonical_time_zone.length()))
+ .ToHandleChecked(),
+ SLOPPY)
+ .Assert();
}
}
@@ -205,38 +208,38 @@ void SetResolvedDateSettings(Isolate* isolate,
icu::NumberingSystem::createInstance(icu_locale, status);
if (U_SUCCESS(status)) {
const char* ns = numbering_system->getName();
- JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("numberingSystem"),
- factory->NewStringFromAsciiChecked(ns), SLOPPY).Assert();
+ JSObject::SetProperty(resolved,
+ factory->NewStringFromStaticChars("numberingSystem"),
+ factory->NewStringFromAsciiChecked(ns), SLOPPY)
+ .Assert();
} else {
JSObject::SetProperty(resolved,
factory->NewStringFromStaticChars("numberingSystem"),
- factory->undefined_value(), SLOPPY).Assert();
+ factory->undefined_value(), SLOPPY)
+ .Assert();
}
delete numbering_system;
// Set the locale
char result[ULOC_FULLNAME_CAPACITY];
status = U_ZERO_ERROR;
- uloc_toLanguageTag(
- icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ uloc_toLanguageTag(icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY,
+ FALSE, &status);
if (U_SUCCESS(status)) {
JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromAsciiChecked(result),
- SLOPPY).Assert();
+ factory->NewStringFromAsciiChecked(result), SLOPPY)
+ .Assert();
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromStaticChars("und"),
- SLOPPY).Assert();
+ factory->NewStringFromStaticChars("und"), SLOPPY)
+ .Assert();
}
}
-
-icu::DecimalFormat* CreateICUNumberFormat(
- Isolate* isolate,
- const icu::Locale& icu_locale,
- Handle<JSObject> options) {
+icu::DecimalFormat* CreateICUNumberFormat(Isolate* isolate,
+ const icu::Locale& icu_locale,
+ Handle<JSObject> options) {
// Make formatter from options. Numbering system is added
// to the locale as Unicode extension (if it was specified at all).
UErrorCode status = U_ZERO_ERROR;
@@ -303,30 +306,30 @@ icu::DecimalFormat* CreateICUNumberFormat(
}
int32_t digits;
- if (ExtractIntegerSetting(
- isolate, options, "minimumIntegerDigits", &digits)) {
+ if (ExtractIntegerSetting(isolate, options, "minimumIntegerDigits",
+ &digits)) {
number_format->setMinimumIntegerDigits(digits);
}
- if (ExtractIntegerSetting(
- isolate, options, "minimumFractionDigits", &digits)) {
+ if (ExtractIntegerSetting(isolate, options, "minimumFractionDigits",
+ &digits)) {
number_format->setMinimumFractionDigits(digits);
}
- if (ExtractIntegerSetting(
- isolate, options, "maximumFractionDigits", &digits)) {
+ if (ExtractIntegerSetting(isolate, options, "maximumFractionDigits",
+ &digits)) {
number_format->setMaximumFractionDigits(digits);
}
bool significant_digits_used = false;
- if (ExtractIntegerSetting(
- isolate, options, "minimumSignificantDigits", &digits)) {
+ if (ExtractIntegerSetting(isolate, options, "minimumSignificantDigits",
+ &digits)) {
number_format->setMinimumSignificantDigits(digits);
significant_digits_used = true;
}
- if (ExtractIntegerSetting(
- isolate, options, "maximumSignificantDigits", &digits)) {
+ if (ExtractIntegerSetting(isolate, options, "maximumSignificantDigits",
+ &digits)) {
number_format->setMaximumSignificantDigits(digits);
significant_digits_used = true;
}
@@ -344,9 +347,7 @@ icu::DecimalFormat* CreateICUNumberFormat(
return number_format;
}
-
-void SetResolvedNumberSettings(Isolate* isolate,
- const icu::Locale& icu_locale,
+void SetResolvedNumberSettings(Isolate* isolate, const icu::Locale& icu_locale,
icu::DecimalFormat* number_format,
Handle<JSObject> resolved) {
Factory* factory = isolate->factory();
@@ -354,22 +355,26 @@ void SetResolvedNumberSettings(Isolate* isolate,
number_format->toPattern(pattern);
JSObject::SetProperty(
resolved, factory->intl_pattern_symbol(),
- factory->NewStringFromTwoByte(
- Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
- pattern.length())).ToHandleChecked(),
- SLOPPY).Assert();
+ factory
+ ->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
+ pattern.length()))
+ .ToHandleChecked(),
+ SLOPPY)
+ .Assert();
// Set resolved currency code in options.currency if not empty.
icu::UnicodeString currency(number_format->getCurrency());
if (!currency.isEmpty()) {
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("currency"),
- factory->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(
- currency.getBuffer()),
- currency.length())).ToHandleChecked(),
- SLOPPY).Assert();
+ factory
+ ->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(currency.getBuffer()),
+ currency.length()))
+ .ToHandleChecked(),
+ SLOPPY)
+ .Assert();
}
// Ugly hack. ICU doesn't expose numbering system in any way, so we have
@@ -380,34 +385,40 @@ void SetResolvedNumberSettings(Isolate* isolate,
icu::NumberingSystem::createInstance(icu_locale, status);
if (U_SUCCESS(status)) {
const char* ns = numbering_system->getName();
- JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("numberingSystem"),
- factory->NewStringFromAsciiChecked(ns), SLOPPY).Assert();
+ JSObject::SetProperty(resolved,
+ factory->NewStringFromStaticChars("numberingSystem"),
+ factory->NewStringFromAsciiChecked(ns), SLOPPY)
+ .Assert();
} else {
JSObject::SetProperty(resolved,
factory->NewStringFromStaticChars("numberingSystem"),
- factory->undefined_value(), SLOPPY).Assert();
+ factory->undefined_value(), SLOPPY)
+ .Assert();
}
delete numbering_system;
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("useGrouping"),
- factory->ToBoolean(number_format->isGroupingUsed()), SLOPPY).Assert();
+ factory->ToBoolean(number_format->isGroupingUsed()), SLOPPY)
+ .Assert();
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("minimumIntegerDigits"),
factory->NewNumberFromInt(number_format->getMinimumIntegerDigits()),
- SLOPPY).Assert();
+ SLOPPY)
+ .Assert();
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("minimumFractionDigits"),
factory->NewNumberFromInt(number_format->getMinimumFractionDigits()),
- SLOPPY).Assert();
+ SLOPPY)
+ .Assert();
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("maximumFractionDigits"),
factory->NewNumberFromInt(number_format->getMaximumFractionDigits()),
- SLOPPY).Assert();
+ SLOPPY)
+ .Assert();
Handle<String> key =
factory->NewStringFromStaticChars("minimumSignificantDigits");
@@ -417,7 +428,8 @@ void SetResolvedNumberSettings(Isolate* isolate,
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("minimumSignificantDigits"),
factory->NewNumberFromInt(number_format->getMinimumSignificantDigits()),
- SLOPPY).Assert();
+ SLOPPY)
+ .Assert();
}
key = factory->NewStringFromStaticChars("maximumSignificantDigits");
@@ -427,31 +439,30 @@ void SetResolvedNumberSettings(Isolate* isolate,
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("maximumSignificantDigits"),
factory->NewNumberFromInt(number_format->getMaximumSignificantDigits()),
- SLOPPY).Assert();
+ SLOPPY)
+ .Assert();
}
// Set the locale
char result[ULOC_FULLNAME_CAPACITY];
status = U_ZERO_ERROR;
- uloc_toLanguageTag(
- icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ uloc_toLanguageTag(icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY,
+ FALSE, &status);
if (U_SUCCESS(status)) {
JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromAsciiChecked(result),
- SLOPPY).Assert();
+ factory->NewStringFromAsciiChecked(result), SLOPPY)
+ .Assert();
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromStaticChars("und"),
- SLOPPY).Assert();
+ factory->NewStringFromStaticChars("und"), SLOPPY)
+ .Assert();
}
}
-
-icu::Collator* CreateICUCollator(
- Isolate* isolate,
- const icu::Locale& icu_locale,
- Handle<JSObject> options) {
+icu::Collator* CreateICUCollator(Isolate* isolate,
+ const icu::Locale& icu_locale,
+ Handle<JSObject> options) {
// Make collator from options.
icu::Collator* collator = NULL;
UErrorCode status = U_ZERO_ERROR;
@@ -465,8 +476,8 @@ icu::Collator* CreateICUCollator(
// Set flags first, and then override them with sensitivity if necessary.
bool numeric;
if (ExtractBooleanSetting(isolate, options, "numeric", &numeric)) {
- collator->setAttribute(
- UCOL_NUMERIC_COLLATION, numeric ? UCOL_ON : UCOL_OFF, status);
+ collator->setAttribute(UCOL_NUMERIC_COLLATION, numeric ? UCOL_ON : UCOL_OFF,
+ status);
}
// Normalization is always on, by the spec. We are free to optimize
@@ -511,7 +522,6 @@ icu::Collator* CreateICUCollator(
return collator;
}
-
void SetResolvedCollatorSettings(Isolate* isolate,
const icu::Locale& icu_locale,
icu::Collator* collator,
@@ -523,106 +533,120 @@ void SetResolvedCollatorSettings(Isolate* isolate,
resolved, factory->NewStringFromStaticChars("numeric"),
factory->ToBoolean(
collator->getAttribute(UCOL_NUMERIC_COLLATION, status) == UCOL_ON),
- SLOPPY).Assert();
+ SLOPPY)
+ .Assert();
switch (collator->getAttribute(UCOL_CASE_FIRST, status)) {
case UCOL_LOWER_FIRST:
- JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("caseFirst"),
- factory->NewStringFromStaticChars("lower"), SLOPPY).Assert();
+ JSObject::SetProperty(resolved,
+ factory->NewStringFromStaticChars("caseFirst"),
+ factory->NewStringFromStaticChars("lower"), SLOPPY)
+ .Assert();
break;
case UCOL_UPPER_FIRST:
- JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("caseFirst"),
- factory->NewStringFromStaticChars("upper"), SLOPPY).Assert();
+ JSObject::SetProperty(resolved,
+ factory->NewStringFromStaticChars("caseFirst"),
+ factory->NewStringFromStaticChars("upper"), SLOPPY)
+ .Assert();
break;
default:
- JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("caseFirst"),
- factory->NewStringFromStaticChars("false"), SLOPPY).Assert();
+ JSObject::SetProperty(resolved,
+ factory->NewStringFromStaticChars("caseFirst"),
+ factory->NewStringFromStaticChars("false"), SLOPPY)
+ .Assert();
}
switch (collator->getAttribute(UCOL_STRENGTH, status)) {
case UCOL_PRIMARY: {
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("strength"),
- factory->NewStringFromStaticChars("primary"), SLOPPY).Assert();
+ factory->NewStringFromStaticChars("primary"), SLOPPY)
+ .Assert();
// case level: true + s1 -> case, s1 -> base.
if (UCOL_ON == collator->getAttribute(UCOL_CASE_LEVEL, status)) {
- JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("sensitivity"),
- factory->NewStringFromStaticChars("case"), SLOPPY).Assert();
+ JSObject::SetProperty(resolved,
+ factory->NewStringFromStaticChars("sensitivity"),
+ factory->NewStringFromStaticChars("case"), SLOPPY)
+ .Assert();
} else {
- JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("sensitivity"),
- factory->NewStringFromStaticChars("base"), SLOPPY).Assert();
+ JSObject::SetProperty(resolved,
+ factory->NewStringFromStaticChars("sensitivity"),
+ factory->NewStringFromStaticChars("base"), SLOPPY)
+ .Assert();
}
break;
}
case UCOL_SECONDARY:
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("strength"),
- factory->NewStringFromStaticChars("secondary"), SLOPPY).Assert();
- JSObject::SetProperty(
- resolved, factory->NewStringFromStaticChars("sensitivity"),
- factory->NewStringFromStaticChars("accent"), SLOPPY).Assert();
+ factory->NewStringFromStaticChars("secondary"), SLOPPY)
+ .Assert();
+ JSObject::SetProperty(resolved,
+ factory->NewStringFromStaticChars("sensitivity"),
+ factory->NewStringFromStaticChars("accent"), SLOPPY)
+ .Assert();
break;
case UCOL_TERTIARY:
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("strength"),
- factory->NewStringFromStaticChars("tertiary"), SLOPPY).Assert();
+ factory->NewStringFromStaticChars("tertiary"), SLOPPY)
+ .Assert();
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("sensitivity"),
- factory->NewStringFromStaticChars("variant"), SLOPPY).Assert();
+ factory->NewStringFromStaticChars("variant"), SLOPPY)
+ .Assert();
break;
case UCOL_QUATERNARY:
// We shouldn't get quaternary and identical from ICU, but if we do
// put them into variant.
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("strength"),
- factory->NewStringFromStaticChars("quaternary"), SLOPPY).Assert();
+ factory->NewStringFromStaticChars("quaternary"), SLOPPY)
+ .Assert();
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("sensitivity"),
- factory->NewStringFromStaticChars("variant"), SLOPPY).Assert();
+ factory->NewStringFromStaticChars("variant"), SLOPPY)
+ .Assert();
break;
default:
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("strength"),
- factory->NewStringFromStaticChars("identical"), SLOPPY).Assert();
+ factory->NewStringFromStaticChars("identical"), SLOPPY)
+ .Assert();
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("sensitivity"),
- factory->NewStringFromStaticChars("variant"), SLOPPY).Assert();
+ factory->NewStringFromStaticChars("variant"), SLOPPY)
+ .Assert();
}
JSObject::SetProperty(
resolved, factory->NewStringFromStaticChars("ignorePunctuation"),
factory->ToBoolean(collator->getAttribute(UCOL_ALTERNATE_HANDLING,
status) == UCOL_SHIFTED),
- SLOPPY).Assert();
+ SLOPPY)
+ .Assert();
// Set the locale
char result[ULOC_FULLNAME_CAPACITY];
status = U_ZERO_ERROR;
- uloc_toLanguageTag(
- icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ uloc_toLanguageTag(icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY,
+ FALSE, &status);
if (U_SUCCESS(status)) {
JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromAsciiChecked(result),
- SLOPPY).Assert();
+ factory->NewStringFromAsciiChecked(result), SLOPPY)
+ .Assert();
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromStaticChars("und"),
- SLOPPY).Assert();
+ factory->NewStringFromStaticChars("und"), SLOPPY)
+ .Assert();
}
}
-
-icu::BreakIterator* CreateICUBreakIterator(
- Isolate* isolate,
- const icu::Locale& icu_locale,
- Handle<JSObject> options) {
+icu::BreakIterator* CreateICUBreakIterator(Isolate* isolate,
+ const icu::Locale& icu_locale,
+ Handle<JSObject> options) {
UErrorCode status = U_ZERO_ERROR;
icu::BreakIterator* break_iterator = NULL;
icu::UnicodeString type;
@@ -630,17 +654,15 @@ icu::BreakIterator* CreateICUBreakIterator(
if (type == UNICODE_STRING_SIMPLE("character")) {
break_iterator =
- icu::BreakIterator::createCharacterInstance(icu_locale, status);
+ icu::BreakIterator::createCharacterInstance(icu_locale, status);
} else if (type == UNICODE_STRING_SIMPLE("sentence")) {
break_iterator =
- icu::BreakIterator::createSentenceInstance(icu_locale, status);
+ icu::BreakIterator::createSentenceInstance(icu_locale, status);
} else if (type == UNICODE_STRING_SIMPLE("line")) {
- break_iterator =
- icu::BreakIterator::createLineInstance(icu_locale, status);
+ break_iterator = icu::BreakIterator::createLineInstance(icu_locale, status);
} else {
// Defualt is word iterator.
- break_iterator =
- icu::BreakIterator::createWordInstance(icu_locale, status);
+ break_iterator = icu::BreakIterator::createWordInstance(icu_locale, status);
}
if (U_FAILURE(status)) {
@@ -653,7 +675,6 @@ icu::BreakIterator* CreateICUBreakIterator(
return break_iterator;
}
-
void SetResolvedBreakIteratorSettings(Isolate* isolate,
const icu::Locale& icu_locale,
icu::BreakIterator* break_iterator,
@@ -664,26 +685,24 @@ void SetResolvedBreakIteratorSettings(Isolate* isolate,
// Set the locale
char result[ULOC_FULLNAME_CAPACITY];
status = U_ZERO_ERROR;
- uloc_toLanguageTag(
- icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
+ uloc_toLanguageTag(icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY,
+ FALSE, &status);
if (U_SUCCESS(status)) {
JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromAsciiChecked(result),
- SLOPPY).Assert();
+ factory->NewStringFromAsciiChecked(result), SLOPPY)
+ .Assert();
} else {
// This would never happen, since we got the locale from ICU.
JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
- factory->NewStringFromStaticChars("und"),
- SLOPPY).Assert();
+ factory->NewStringFromStaticChars("und"), SLOPPY)
+ .Assert();
}
}
} // namespace
// static
icu::SimpleDateFormat* DateFormat::InitializeDateTimeFormat(
- Isolate* isolate,
- Handle<String> locale,
- Handle<JSObject> options,
+ Isolate* isolate, Handle<String> locale, Handle<JSObject> options,
Handle<JSObject> resolved) {
// Convert BCP47 into ICU locale format.
UErrorCode status = U_ZERO_ERROR;
@@ -700,8 +719,8 @@ icu::SimpleDateFormat* DateFormat::InitializeDateTimeFormat(
icu_locale = icu::Locale(icu_result);
}
- icu::SimpleDateFormat* date_format = CreateICUDateFormat(
- isolate, icu_locale, options);
+ icu::SimpleDateFormat* date_format =
+ CreateICUDateFormat(isolate, icu_locale, options);
if (!date_format) {
// Remove extensions and try again.
icu::Locale no_extension_locale(icu_locale.getBaseName());
@@ -712,8 +731,8 @@ icu::SimpleDateFormat* DateFormat::InitializeDateTimeFormat(
}
// Set resolved settings (pattern, numbering system, calendar).
- SetResolvedDateSettings(
- isolate, no_extension_locale, date_format, resolved);
+ SetResolvedDateSettings(isolate, no_extension_locale, date_format,
+ resolved);
} else {
SetResolvedDateSettings(isolate, icu_locale, date_format, resolved);
}
@@ -721,10 +740,8 @@ icu::SimpleDateFormat* DateFormat::InitializeDateTimeFormat(
return date_format;
}
-
-icu::SimpleDateFormat* DateFormat::UnpackDateFormat(
- Isolate* isolate,
- Handle<JSObject> obj) {
+icu::SimpleDateFormat* DateFormat::UnpackDateFormat(Isolate* isolate,
+ Handle<JSObject> obj) {
return reinterpret_cast<icu::SimpleDateFormat*>(obj->GetEmbedderField(0));
}
@@ -733,11 +750,8 @@ void DateFormat::DeleteDateFormat(const v8::WeakCallbackInfo<void>& data) {
GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
}
-
icu::DecimalFormat* NumberFormat::InitializeNumberFormat(
- Isolate* isolate,
- Handle<String> locale,
- Handle<JSObject> options,
+ Isolate* isolate, Handle<String> locale, Handle<JSObject> options,
Handle<JSObject> resolved) {
// Convert BCP47 into ICU locale format.
UErrorCode status = U_ZERO_ERROR;
@@ -759,16 +773,16 @@ icu::DecimalFormat* NumberFormat::InitializeNumberFormat(
if (!number_format) {
// Remove extensions and try again.
icu::Locale no_extension_locale(icu_locale.getBaseName());
- number_format = CreateICUNumberFormat(
- isolate, no_extension_locale, options);
+ number_format =
+ CreateICUNumberFormat(isolate, no_extension_locale, options);
if (!number_format) {
FATAL("Failed to create ICU number format, are ICU data files missing?");
}
// Set resolved settings (pattern, numbering system).
- SetResolvedNumberSettings(
- isolate, no_extension_locale, number_format, resolved);
+ SetResolvedNumberSettings(isolate, no_extension_locale, number_format,
+ resolved);
} else {
SetResolvedNumberSettings(isolate, icu_locale, number_format, resolved);
}
@@ -776,10 +790,8 @@ icu::DecimalFormat* NumberFormat::InitializeNumberFormat(
return number_format;
}
-
-icu::DecimalFormat* NumberFormat::UnpackNumberFormat(
- Isolate* isolate,
- Handle<JSObject> obj) {
+icu::DecimalFormat* NumberFormat::UnpackNumberFormat(Isolate* isolate,
+ Handle<JSObject> obj) {
return reinterpret_cast<icu::DecimalFormat*>(obj->GetEmbedderField(0));
}
@@ -788,12 +800,10 @@ void NumberFormat::DeleteNumberFormat(const v8::WeakCallbackInfo<void>& data) {
GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
}
-
-icu::Collator* Collator::InitializeCollator(
- Isolate* isolate,
- Handle<String> locale,
- Handle<JSObject> options,
- Handle<JSObject> resolved) {
+icu::Collator* Collator::InitializeCollator(Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved) {
// Convert BCP47 into ICU locale format.
UErrorCode status = U_ZERO_ERROR;
icu::Locale icu_locale;
@@ -820,8 +830,8 @@ icu::Collator* Collator::InitializeCollator(
}
// Set resolved settings (pattern, numbering system).
- SetResolvedCollatorSettings(
- isolate, no_extension_locale, collator, resolved);
+ SetResolvedCollatorSettings(isolate, no_extension_locale, collator,
+ resolved);
} else {
SetResolvedCollatorSettings(isolate, icu_locale, collator, resolved);
}
@@ -829,7 +839,6 @@ icu::Collator* Collator::InitializeCollator(
return collator;
}
-
icu::Collator* Collator::UnpackCollator(Isolate* isolate,
Handle<JSObject> obj) {
return reinterpret_cast<icu::Collator*>(obj->GetEmbedderField(0));
@@ -858,24 +867,24 @@ icu::BreakIterator* V8BreakIterator::InitializeBreakIterator(
icu_locale = icu::Locale(icu_result);
}
- icu::BreakIterator* break_iterator = CreateICUBreakIterator(
- isolate, icu_locale, options);
+ icu::BreakIterator* break_iterator =
+ CreateICUBreakIterator(isolate, icu_locale, options);
if (!break_iterator) {
// Remove extensions and try again.
icu::Locale no_extension_locale(icu_locale.getBaseName());
- break_iterator = CreateICUBreakIterator(
- isolate, no_extension_locale, options);
+ break_iterator =
+ CreateICUBreakIterator(isolate, no_extension_locale, options);
if (!break_iterator) {
FATAL("Failed to create ICU break iterator, are ICU data files missing?");
}
// Set resolved settings (locale).
- SetResolvedBreakIteratorSettings(
- isolate, no_extension_locale, break_iterator, resolved);
+ SetResolvedBreakIteratorSettings(isolate, no_extension_locale,
+ break_iterator, resolved);
} else {
- SetResolvedBreakIteratorSettings(
- isolate, icu_locale, break_iterator, resolved);
+ SetResolvedBreakIteratorSettings(isolate, icu_locale, break_iterator,
+ resolved);
}
return break_iterator;
@@ -893,361 +902,5 @@ void V8BreakIterator::DeleteBreakIterator(
GlobalHandles::Destroy(reinterpret_cast<Object**>(data.GetParameter()));
}
-namespace {
-inline bool IsASCIIUpper(uint16_t ch) { return ch >= 'A' && ch <= 'Z'; }
-
-const uint8_t kToLower[256] = {
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
- 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
- 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23,
- 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
- 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B,
- 0x3C, 0x3D, 0x3E, 0x3F, 0x40, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
- 0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73,
- 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
- 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x6B,
- 0x6C, 0x6D, 0x6E, 0x6F, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
- 0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F, 0x80, 0x81, 0x82, 0x83,
- 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
- 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0x9B,
- 0x9C, 0x9D, 0x9E, 0x9F, 0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
- 0xA8, 0xA9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF, 0xB0, 0xB1, 0xB2, 0xB3,
- 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
- 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB,
- 0xEC, 0xED, 0xEE, 0xEF, 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xD7,
- 0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xDF, 0xE0, 0xE1, 0xE2, 0xE3,
- 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
- 0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0xFB,
- 0xFC, 0xFD, 0xFE, 0xFF,
-};
-
-inline uint16_t ToLatin1Lower(uint16_t ch) {
- return static_cast<uint16_t>(kToLower[ch]);
-}
-
-inline uint16_t ToASCIIUpper(uint16_t ch) {
- return ch & ~((ch >= 'a' && ch <= 'z') << 5);
-}
-
-// Does not work for U+00DF (sharp-s), U+00B5 (micron), U+00FF.
-inline uint16_t ToLatin1Upper(uint16_t ch) {
- DCHECK(ch != 0xDF && ch != 0xB5 && ch != 0xFF);
- return ch &
- ~(((ch >= 'a' && ch <= 'z') || (((ch & 0xE0) == 0xE0) && ch != 0xF7))
- << 5);
-}
-
-template <typename Char>
-bool ToUpperFastASCII(const Vector<const Char>& src,
- Handle<SeqOneByteString> result) {
- // Do a faster loop for the case where all the characters are ASCII.
- uint16_t ored = 0;
- int32_t index = 0;
- for (auto it = src.begin(); it != src.end(); ++it) {
- uint16_t ch = static_cast<uint16_t>(*it);
- ored |= ch;
- result->SeqOneByteStringSet(index++, ToASCIIUpper(ch));
- }
- return !(ored & ~0x7F);
-}
-
-const uint16_t sharp_s = 0xDF;
-
-template <typename Char>
-bool ToUpperOneByte(const Vector<const Char>& src, uint8_t* dest,
- int* sharp_s_count) {
- // Still pretty-fast path for the input with non-ASCII Latin-1 characters.
-
- // There are two special cases.
- // 1. U+00B5 and U+00FF are mapped to a character beyond U+00FF.
- // 2. Lower case sharp-S converts to "SS" (two characters)
- *sharp_s_count = 0;
- for (auto it = src.begin(); it != src.end(); ++it) {
- uint16_t ch = static_cast<uint16_t>(*it);
- if (V8_UNLIKELY(ch == sharp_s)) {
- ++(*sharp_s_count);
- continue;
- }
- if (V8_UNLIKELY(ch == 0xB5 || ch == 0xFF)) {
- // Since this upper-cased character does not fit in an 8-bit string, we
- // need to take the 16-bit path.
- return false;
- }
- *dest++ = ToLatin1Upper(ch);
- }
-
- return true;
-}
-
-template <typename Char>
-void ToUpperWithSharpS(const Vector<const Char>& src,
- Handle<SeqOneByteString> result) {
- int32_t dest_index = 0;
- for (auto it = src.begin(); it != src.end(); ++it) {
- uint16_t ch = static_cast<uint16_t>(*it);
- if (ch == sharp_s) {
- result->SeqOneByteStringSet(dest_index++, 'S');
- result->SeqOneByteStringSet(dest_index++, 'S');
- } else {
- result->SeqOneByteStringSet(dest_index++, ToLatin1Upper(ch));
- }
- }
-}
-
-inline int FindFirstUpperOrNonAscii(Handle<String> s, int length) {
- for (int index = 0; index < length; ++index) {
- uint16_t ch = s->Get(index);
- if (V8_UNLIKELY(IsASCIIUpper(ch) || ch & ~0x7F)) {
- return index;
- }
- }
- return length;
-}
-
-} // namespace
-
-const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
- std::unique_ptr<uc16[]>* dest,
- int32_t length) {
- DCHECK(flat.IsFlat());
- if (flat.IsOneByte()) {
- if (!*dest) {
- dest->reset(NewArray<uc16>(length));
- CopyChars(dest->get(), flat.ToOneByteVector().start(), length);
- }
- return reinterpret_cast<const UChar*>(dest->get());
- } else {
- return reinterpret_cast<const UChar*>(flat.ToUC16Vector().start());
- }
-}
-
-MUST_USE_RESULT Object* LocaleConvertCase(Handle<String> s, Isolate* isolate,
- bool is_to_upper, const char* lang) {
- auto case_converter = is_to_upper ? u_strToUpper : u_strToLower;
- int32_t src_length = s->length();
- int32_t dest_length = src_length;
- UErrorCode status;
- Handle<SeqTwoByteString> result;
- std::unique_ptr<uc16[]> sap;
-
- if (dest_length == 0) return isolate->heap()->empty_string();
-
- // This is not a real loop. It'll be executed only once (no overflow) or
- // twice (overflow).
- for (int i = 0; i < 2; ++i) {
- // Case conversion can increase the string length (e.g. sharp-S => SS) so
- // that we have to handle RangeError exceptions here.
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result, isolate->factory()->NewRawTwoByteString(dest_length));
- DisallowHeapAllocation no_gc;
- DCHECK(s->IsFlat());
- String::FlatContent flat = s->GetFlatContent();
- const UChar* src = GetUCharBufferFromFlat(flat, &sap, src_length);
- status = U_ZERO_ERROR;
- dest_length = case_converter(reinterpret_cast<UChar*>(result->GetChars()),
- dest_length, src, src_length, lang, &status);
- if (status != U_BUFFER_OVERFLOW_ERROR) break;
- }
-
- // In most cases, the output will fill the destination buffer completely
- // leading to an unterminated string (U_STRING_NOT_TERMINATED_WARNING).
- // Only in rare cases, it'll be shorter than the destination buffer and
- // |result| has to be truncated.
- DCHECK(U_SUCCESS(status));
- if (V8_LIKELY(status == U_STRING_NOT_TERMINATED_WARNING)) {
- DCHECK(dest_length == result->length());
- return *result;
- }
- if (U_SUCCESS(status)) {
- DCHECK(dest_length < result->length());
- return *Handle<SeqTwoByteString>::cast(
- SeqString::Truncate(result, dest_length));
- }
- return *s;
-}
-
-MUST_USE_RESULT Object* ConvertToLower(Handle<String> s, Isolate* isolate) {
- if (!s->HasOnlyOneByteChars()) {
- // Use a slower implementation for strings with characters beyond U+00FF.
- return LocaleConvertCase(s, isolate, false, "");
- }
-
- int length = s->length();
-
- // We depend here on the invariant that the length of a Latin1
- // string is invariant under ToLowerCase, and the result always
- // fits in the Latin1 range in the *root locale*. It does not hold
- // for ToUpperCase even in the root locale.
-
- // Scan the string for uppercase and non-ASCII characters for strings
- // shorter than a machine-word without any memory allocation overhead.
- // TODO(jshin): Apply this to a longer input by breaking FastAsciiConvert()
- // to two parts, one for scanning the prefix with no change and the other for
- // handling ASCII-only characters.
- int index_to_first_unprocessed = length;
- const bool is_short = length < static_cast<int>(sizeof(uintptr_t));
- if (is_short) {
- index_to_first_unprocessed = FindFirstUpperOrNonAscii(s, length);
- // Nothing to do if the string is all ASCII with no uppercase.
- if (index_to_first_unprocessed == length) return *s;
- }
-
- Handle<SeqOneByteString> result =
- isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
-
- DisallowHeapAllocation no_gc;
- DCHECK(s->IsFlat());
- String::FlatContent flat = s->GetFlatContent();
- uint8_t* dest = result->GetChars();
- if (flat.IsOneByte()) {
- const uint8_t* src = flat.ToOneByteVector().start();
- bool has_changed_character = false;
- index_to_first_unprocessed = FastAsciiConvert<true>(
- reinterpret_cast<char*>(dest), reinterpret_cast<const char*>(src),
- length, &has_changed_character);
- // If not ASCII, we keep the result up to index_to_first_unprocessed and
- // process the rest.
- if (index_to_first_unprocessed == length)
- return has_changed_character ? *result : *s;
-
- for (int index = index_to_first_unprocessed; index < length; ++index) {
- dest[index] = ToLatin1Lower(static_cast<uint16_t>(src[index]));
- }
- } else {
- if (index_to_first_unprocessed == length) {
- DCHECK(!is_short);
- index_to_first_unprocessed = FindFirstUpperOrNonAscii(s, length);
- }
- // Nothing to do if the string is all ASCII with no uppercase.
- if (index_to_first_unprocessed == length) return *s;
- const uint16_t* src = flat.ToUC16Vector().start();
- CopyChars(dest, src, index_to_first_unprocessed);
- for (int index = index_to_first_unprocessed; index < length; ++index) {
- dest[index] = ToLatin1Lower(static_cast<uint16_t>(src[index]));
- }
- }
-
- return *result;
-}
-
-MUST_USE_RESULT Object* ConvertToUpper(Handle<String> s, Isolate* isolate) {
- int32_t length = s->length();
- if (s->HasOnlyOneByteChars() && length > 0) {
- Handle<SeqOneByteString> result =
- isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
-
- DCHECK(s->IsFlat());
- int sharp_s_count;
- bool is_result_single_byte;
- {
- DisallowHeapAllocation no_gc;
- String::FlatContent flat = s->GetFlatContent();
- uint8_t* dest = result->GetChars();
- if (flat.IsOneByte()) {
- Vector<const uint8_t> src = flat.ToOneByteVector();
- bool has_changed_character = false;
- int index_to_first_unprocessed =
- FastAsciiConvert<false>(reinterpret_cast<char*>(result->GetChars()),
- reinterpret_cast<const char*>(src.start()),
- length, &has_changed_character);
- if (index_to_first_unprocessed == length)
- return has_changed_character ? *result : *s;
- // If not ASCII, we keep the result up to index_to_first_unprocessed and
- // process the rest.
- is_result_single_byte =
- ToUpperOneByte(src.SubVector(index_to_first_unprocessed, length),
- dest + index_to_first_unprocessed, &sharp_s_count);
- } else {
- DCHECK(flat.IsTwoByte());
- Vector<const uint16_t> src = flat.ToUC16Vector();
- if (ToUpperFastASCII(src, result)) return *result;
- is_result_single_byte = ToUpperOneByte(src, dest, &sharp_s_count);
- }
- }
-
- // Go to the full Unicode path if there are characters whose uppercase
- // is beyond the Latin-1 range (cannot be represented in OneByteString).
- if (V8_UNLIKELY(!is_result_single_byte)) {
- return LocaleConvertCase(s, isolate, true, "");
- }
-
- if (sharp_s_count == 0) return *result;
-
- // We have sharp_s_count sharp-s characters, but the result is still
- // in the Latin-1 range.
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, result,
- isolate->factory()->NewRawOneByteString(length + sharp_s_count));
- DisallowHeapAllocation no_gc;
- String::FlatContent flat = s->GetFlatContent();
- if (flat.IsOneByte()) {
- ToUpperWithSharpS(flat.ToOneByteVector(), result);
- } else {
- ToUpperWithSharpS(flat.ToUC16Vector(), result);
- }
-
- return *result;
- }
-
- return LocaleConvertCase(s, isolate, true, "");
-}
-
-MUST_USE_RESULT Object* ConvertCase(Handle<String> s, bool is_upper,
- Isolate* isolate) {
- return is_upper ? ConvertToUpper(s, isolate) : ConvertToLower(s, isolate);
-}
-
-ICUTimezoneCache::ICUTimezoneCache() : timezone_(nullptr) { Clear(); }
-
-ICUTimezoneCache::~ICUTimezoneCache() { Clear(); }
-
-const char* ICUTimezoneCache::LocalTimezone(double time_ms) {
- bool is_dst = DaylightSavingsOffset(time_ms) != 0;
- char* name = is_dst ? dst_timezone_name_ : timezone_name_;
- if (name[0] == '\0') {
- icu::UnicodeString result;
- GetTimeZone()->getDisplayName(is_dst, icu::TimeZone::LONG, result);
- result += '\0';
-
- icu::CheckedArrayByteSink byte_sink(name, kMaxTimezoneChars);
- result.toUTF8(byte_sink);
- CHECK(!byte_sink.Overflowed());
- }
- return const_cast<const char*>(name);
-}
-
-icu::TimeZone* ICUTimezoneCache::GetTimeZone() {
- if (timezone_ == nullptr) {
- timezone_ = icu::TimeZone::createDefault();
- }
- return timezone_;
-}
-
-bool ICUTimezoneCache::GetOffsets(double time_ms, int32_t* raw_offset,
- int32_t* dst_offset) {
- UErrorCode status = U_ZERO_ERROR;
- GetTimeZone()->getOffset(time_ms, false, *raw_offset, *dst_offset, status);
- return U_SUCCESS(status);
-}
-
-double ICUTimezoneCache::DaylightSavingsOffset(double time_ms) {
- int32_t raw_offset, dst_offset;
- if (!GetOffsets(time_ms, &raw_offset, &dst_offset)) return 0;
- return dst_offset;
-}
-
-double ICUTimezoneCache::LocalTimeOffset() {
- int32_t raw_offset, dst_offset;
- if (!GetOffsets(icu::Calendar::getNow(), &raw_offset, &dst_offset)) return 0;
- return raw_offset;
-}
-
-void ICUTimezoneCache::Clear() {
- delete timezone_;
- timezone_ = nullptr;
- timezone_name_[0] = '\0';
- dst_timezone_name_[0] = '\0';
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/i18n.h b/deps/v8/src/objects/intl-objects.h
index 5b5aca4d5f..890a21d074 100644
--- a/deps/v8/src/i18n.h
+++ b/deps/v8/src/objects/intl-objects.h
@@ -1,12 +1,14 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// limitations under the License.
-#ifndef V8_I18N_H_
-#define V8_I18N_H_
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
+
+#ifndef V8_OBJECTS_INTL_OBJECTS_H_
+#define V8_OBJECTS_INTL_OBJECTS_H_
-#include "src/base/timezone-cache.h"
#include "src/objects.h"
#include "unicode/uversion.h"
@@ -15,7 +17,6 @@ class BreakIterator;
class Collator;
class DecimalFormat;
class SimpleDateFormat;
-class TimeZone;
}
namespace v8 {
@@ -29,9 +30,7 @@ class DateFormat {
// Create a formatter for the specificied locale and options. Returns the
// resolved settings for the locale / options.
static icu::SimpleDateFormat* InitializeDateTimeFormat(
- Isolate* isolate,
- Handle<String> locale,
- Handle<JSObject> options,
+ Isolate* isolate, Handle<String> locale, Handle<JSObject> options,
Handle<JSObject> resolved);
// Unpacks date format object from corresponding JavaScript object.
@@ -50,16 +49,14 @@ class DateFormat {
DateFormat();
};
-
class NumberFormat {
public:
// Create a formatter for the specificied locale and options. Returns the
// resolved settings for the locale / options.
- static icu::DecimalFormat* InitializeNumberFormat(
- Isolate* isolate,
- Handle<String> locale,
- Handle<JSObject> options,
- Handle<JSObject> resolved);
+ static icu::DecimalFormat* InitializeNumberFormat(Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved);
// Unpacks number format object from corresponding JavaScript object.
static icu::DecimalFormat* UnpackNumberFormat(Isolate* isolate,
@@ -77,16 +74,14 @@ class NumberFormat {
NumberFormat();
};
-
class Collator {
public:
// Create a collator for the specificied locale and options. Returns the
// resolved settings for the locale / options.
- static icu::Collator* InitializeCollator(
- Isolate* isolate,
- Handle<String> locale,
- Handle<JSObject> options,
- Handle<JSObject> resolved);
+ static icu::Collator* InitializeCollator(Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved);
// Unpacks collator object from corresponding JavaScript object.
static icu::Collator* UnpackCollator(Isolate* isolate, Handle<JSObject> obj);
@@ -107,11 +102,10 @@ class V8BreakIterator {
public:
// Create a BreakIterator for the specificied locale and options. Returns the
// resolved settings for the locale / options.
- static icu::BreakIterator* InitializeBreakIterator(
- Isolate* isolate,
- Handle<String> locale,
- Handle<JSObject> options,
- Handle<JSObject> resolved);
+ static icu::BreakIterator* InitializeBreakIterator(Isolate* isolate,
+ Handle<String> locale,
+ Handle<JSObject> options,
+ Handle<JSObject> resolved);
// Unpacks break iterator object from corresponding JavaScript object.
static icu::BreakIterator* UnpackBreakIterator(Isolate* isolate,
@@ -130,45 +124,7 @@ class V8BreakIterator {
V8BreakIterator();
};
-const UChar* GetUCharBufferFromFlat(const String::FlatContent& flat,
- std::unique_ptr<uc16[]>* dest,
- int32_t length);
-MUST_USE_RESULT Object* LocaleConvertCase(Handle<String> s, Isolate* isolate,
- bool is_to_upper, const char* lang);
-MUST_USE_RESULT Object* ConvertToLower(Handle<String> s, Isolate* isolate);
-MUST_USE_RESULT Object* ConvertToUpper(Handle<String> s, Isolate* isolate);
-MUST_USE_RESULT Object* ConvertCase(Handle<String> s, bool is_upper,
- Isolate* isolate);
-
-// ICUTimezoneCache calls out to ICU for TimezoneCache
-// functionality in a straightforward way.
-class ICUTimezoneCache : public base::TimezoneCache {
- public:
- ICUTimezoneCache();
-
- ~ICUTimezoneCache() override;
-
- const char* LocalTimezone(double time_ms) override;
-
- double DaylightSavingsOffset(double time_ms) override;
-
- double LocalTimeOffset() override;
-
- void Clear() override;
-
- private:
- icu::TimeZone* GetTimeZone();
-
- bool GetOffsets(double time_ms, int32_t* raw_offset, int32_t* dst_offset);
-
- icu::TimeZone* timezone_;
-
- static const int32_t kMaxTimezoneChars = 100;
- char timezone_name_[kMaxTimezoneChars];
- char dst_timezone_name_[kMaxTimezoneChars];
-};
-
} // namespace internal
} // namespace v8
-#endif // V8_I18N_H_
+#endif // V8_OBJECTS_INTL_OBJECTS_H_
diff --git a/deps/v8/src/objects/literal-objects.h b/deps/v8/src/objects/literal-objects.h
index fdd321e74e..40bf70d602 100644
--- a/deps/v8/src/objects/literal-objects.h
+++ b/deps/v8/src/objects/literal-objects.h
@@ -40,20 +40,15 @@ class BoilerplateDescription : public FixedArray {
// Pair of {ElementsKind} and an array of constant values for {ArrayLiteral}
// expressions. Used to communicate with the runtime for literal boilerplate
// creation within the {Runtime_CreateArrayLiteral} method.
-class ConstantElementsPair : public Struct {
+class ConstantElementsPair : public Tuple2 {
public:
DECL_INT_ACCESSORS(elements_kind)
DECL_ACCESSORS(constant_values, FixedArrayBase)
DECLARE_CAST(ConstantElementsPair)
- // Dispatched behavior.
- DECLARE_PRINTER(ConstantElementsPair)
- DECLARE_VERIFIER(ConstantElementsPair)
-
- static const int kElementsKindOffset = HeapObject::kHeaderSize;
- static const int kConstantValuesOffset = kElementsKindOffset + kPointerSize;
- static const int kSize = kConstantValuesOffset + kPointerSize;
+ static const int kElementsKindOffset = kValue1Offset;
+ static const int kConstantValuesOffset = kValue2Offset;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantElementsPair);
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
new file mode 100644
index 0000000000..aab6e78668
--- /dev/null
+++ b/deps/v8/src/objects/map-inl.h
@@ -0,0 +1,68 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_MAP_INL_H_
+#define V8_OBJECTS_MAP_INL_H_
+
+#include "src/field-type.h"
+#include "src/objects/map.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+CAST_ACCESSOR(Map)
+
+InterceptorInfo* Map::GetNamedInterceptor() {
+ DCHECK(has_named_interceptor());
+ FunctionTemplateInfo* info = GetFunctionTemplateInfo();
+ return InterceptorInfo::cast(info->named_property_handler());
+}
+
+InterceptorInfo* Map::GetIndexedInterceptor() {
+ DCHECK(has_indexed_interceptor());
+ FunctionTemplateInfo* info = GetFunctionTemplateInfo();
+ return InterceptorInfo::cast(info->indexed_property_handler());
+}
+
+bool Map::IsInplaceGeneralizableField(PropertyConstness constness,
+ Representation representation,
+ FieldType* field_type) {
+ if (FLAG_track_constant_fields && FLAG_modify_map_inplace &&
+ (constness == kConst)) {
+ // kConst -> kMutable field generalization may happen in-place.
+ return true;
+ }
+ if (representation.IsHeapObject() && !field_type->IsAny()) {
+ return true;
+ }
+ return false;
+}
+
+int NormalizedMapCache::GetIndex(Handle<Map> map) {
+ return map->Hash() % NormalizedMapCache::kEntries;
+}
+
+bool NormalizedMapCache::IsNormalizedMapCache(const HeapObject* obj) {
+ if (!obj->IsFixedArray()) return false;
+ if (FixedArray::cast(obj)->length() != NormalizedMapCache::kEntries) {
+ return false;
+ }
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ reinterpret_cast<NormalizedMapCache*>(const_cast<HeapObject*>(obj))
+ ->NormalizedMapCacheVerify();
+ }
+#endif
+ return true;
+}
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_MAP_INL_H_
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
new file mode 100644
index 0000000000..7faf834c08
--- /dev/null
+++ b/deps/v8/src/objects/map.h
@@ -0,0 +1,829 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_MAP_H_
+#define V8_OBJECTS_MAP_H_
+
+#include "src/objects.h"
+
+#include "src/globals.h"
+
+// Has to be the last include (doesn't have include guards):
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+typedef std::vector<Handle<Map>> MapHandles;
+
+// All heap objects have a Map that describes their structure.
+// A Map contains information about:
+// - Size information about the object
+// - How to iterate over an object (for garbage collection)
+class Map : public HeapObject {
+ public:
+ // Instance size.
+ // Size in bytes or kVariableSizeSentinel if instances do not have
+ // a fixed size.
+ inline int instance_size();
+ inline void set_instance_size(int value);
+
+ // Only to clear an unused byte, remove once byte is used.
+ inline void clear_unused();
+
+ // [inobject_properties_or_constructor_function_index]: Provides access
+ // to the inobject properties in case of JSObject maps, or the constructor
+ // function index in case of primitive maps.
+ inline int inobject_properties_or_constructor_function_index();
+ inline void set_inobject_properties_or_constructor_function_index(int value);
+ // Count of properties allocated in the object (JSObject only).
+ inline int GetInObjectProperties();
+ inline void SetInObjectProperties(int value);
+ // Index of the constructor function in the native context (primitives only),
+ // or the special sentinel value to indicate that there is no object wrapper
+ // for the primitive (i.e. in case of null or undefined).
+ static const int kNoConstructorFunctionIndex = 0;
+ inline int GetConstructorFunctionIndex();
+ inline void SetConstructorFunctionIndex(int value);
+ static MaybeHandle<JSFunction> GetConstructorFunction(
+ Handle<Map> map, Handle<Context> native_context);
+
+ // Retrieve interceptors.
+ inline InterceptorInfo* GetNamedInterceptor();
+ inline InterceptorInfo* GetIndexedInterceptor();
+
+ // Instance type.
+ inline InstanceType instance_type();
+ inline void set_instance_type(InstanceType value);
+
+ // Tells how many unused property fields are available in the
+ // instance (only used for JSObject in fast mode).
+ inline int unused_property_fields();
+ inline void set_unused_property_fields(int value);
+
+ // Bit field.
+ inline byte bit_field() const;
+ inline void set_bit_field(byte value);
+
+ // Bit field 2.
+ inline byte bit_field2() const;
+ inline void set_bit_field2(byte value);
+
+ // Bit field 3.
+ inline uint32_t bit_field3() const;
+ inline void set_bit_field3(uint32_t bits);
+
+ class EnumLengthBits : public BitField<int, 0, kDescriptorIndexBitCount> {
+ }; // NOLINT
+ class NumberOfOwnDescriptorsBits
+ : public BitField<int, kDescriptorIndexBitCount,
+ kDescriptorIndexBitCount> {}; // NOLINT
+ STATIC_ASSERT(kDescriptorIndexBitCount + kDescriptorIndexBitCount == 20);
+ class DictionaryMap : public BitField<bool, 20, 1> {};
+ class OwnsDescriptors : public BitField<bool, 21, 1> {};
+ class HasHiddenPrototype : public BitField<bool, 22, 1> {};
+ class Deprecated : public BitField<bool, 23, 1> {};
+ class IsUnstable : public BitField<bool, 24, 1> {};
+ class IsMigrationTarget : public BitField<bool, 25, 1> {};
+ class ImmutablePrototype : public BitField<bool, 26, 1> {};
+ class NewTargetIsBase : public BitField<bool, 27, 1> {};
+ // Bit 28 is free.
+
+ // Keep this bit field at the very end for better code in
+ // Builtins::kJSConstructStubGeneric stub.
+ // This counter is used for in-object slack tracking.
+ // The in-object slack tracking is considered enabled when the counter is
+ // non zero. The counter only has a valid count for initial maps. For
+ // transitioned maps only kNoSlackTracking has a meaning, namely that inobject
+ // slack tracking already finished for the transition tree. Any other value
+ // indicates that either inobject slack tracking is still in progress, or that
+ // the map isn't part of the transition tree anymore.
+ class ConstructionCounter : public BitField<int, 29, 3> {};
+ static const int kSlackTrackingCounterStart = 7;
+ static const int kSlackTrackingCounterEnd = 1;
+ static const int kNoSlackTracking = 0;
+ STATIC_ASSERT(kSlackTrackingCounterStart <= ConstructionCounter::kMax);
+
+ // Inobject slack tracking is the way to reclaim unused inobject space.
+ //
+ // The instance size is initially determined by adding some slack to
+ // expected_nof_properties (to allow for a few extra properties added
+ // after the constructor). There is no guarantee that the extra space
+ // will not be wasted.
+ //
+ // Here is the algorithm to reclaim the unused inobject space:
+ // - Detect the first constructor call for this JSFunction.
+ // When it happens enter the "in progress" state: initialize construction
+ // counter in the initial_map.
+ // - While the tracking is in progress initialize unused properties of a new
+ // object with one_pointer_filler_map instead of undefined_value (the "used"
+ // part is initialized with undefined_value as usual). This way they can
+ // be resized quickly and safely.
+ // - Once enough objects have been created compute the 'slack'
+ // (traverse the map transition tree starting from the
+ // initial_map and find the lowest value of unused_property_fields).
+ // - Traverse the transition tree again and decrease the instance size
+ // of every map. Existing objects will resize automatically (they are
+ // filled with one_pointer_filler_map). All further allocations will
+ // use the adjusted instance size.
+ // - SharedFunctionInfo's expected_nof_properties left unmodified since
+ // allocations made using different closures could actually create different
+ // kind of objects (see prototype inheritance pattern).
+ //
+ // Important: inobject slack tracking is not attempted during the snapshot
+ // creation.
+
+ static const int kGenerousAllocationCount =
+ kSlackTrackingCounterStart - kSlackTrackingCounterEnd + 1;
+
+ // Starts the tracking by initializing object constructions countdown counter.
+ void StartInobjectSlackTracking();
+
+ // True if the object constructions countdown counter is a range
+ // [kSlackTrackingCounterEnd, kSlackTrackingCounterStart].
+ inline bool IsInobjectSlackTrackingInProgress();
+
+ // Does the tracking step.
+ inline void InobjectSlackTrackingStep();
+
+ // Completes inobject slack tracking for the transition tree starting at this
+ // initial map.
+ void CompleteInobjectSlackTracking();
+
+ // Tells whether the object in the prototype property will be used
+ // for instances created from this function. If the prototype
+ // property is set to a value that is not a JSObject, the prototype
+ // property will not be used to create instances of the function.
+ // See ECMA-262, 13.2.2.
+ inline void set_non_instance_prototype(bool value);
+ inline bool has_non_instance_prototype();
+
+ // Tells whether the instance has a [[Construct]] internal method.
+ // This property is implemented according to ES6, section 7.2.4.
+ inline void set_is_constructor(bool value);
+ inline bool is_constructor() const;
+
+ // Tells whether the instance with this map has a hidden prototype.
+ inline void set_has_hidden_prototype(bool value);
+ inline bool has_hidden_prototype() const;
+
+ // Records and queries whether the instance has a named interceptor.
+ inline void set_has_named_interceptor();
+ inline bool has_named_interceptor();
+
+ // Records and queries whether the instance has an indexed interceptor.
+ inline void set_has_indexed_interceptor();
+ inline bool has_indexed_interceptor();
+
+ // Tells whether the instance is undetectable.
+ // An undetectable object is a special class of JSObject: 'typeof' operator
+ // returns undefined, ToBoolean returns false. Otherwise it behaves like
+ // a normal JS object. It is useful for implementing undetectable
+ // document.all in Firefox & Safari.
+ // See https://bugzilla.mozilla.org/show_bug.cgi?id=248549.
+ inline void set_is_undetectable();
+ inline bool is_undetectable();
+
+ // Tells whether the instance has a [[Call]] internal method.
+ // This property is implemented according to ES6, section 7.2.3.
+ inline void set_is_callable();
+ inline bool is_callable() const;
+
+ inline void set_new_target_is_base(bool value);
+ inline bool new_target_is_base();
+ inline void set_is_extensible(bool value);
+ inline bool is_extensible();
+ inline void set_is_prototype_map(bool value);
+ inline bool is_prototype_map() const;
+
+ inline void set_elements_kind(ElementsKind elements_kind);
+ inline ElementsKind elements_kind();
+
+ // Tells whether the instance has fast elements that are only Smis.
+ inline bool has_fast_smi_elements();
+
+ // Tells whether the instance has fast elements.
+ inline bool has_fast_object_elements();
+ inline bool has_fast_smi_or_object_elements();
+ inline bool has_fast_double_elements();
+ inline bool has_fast_elements();
+ inline bool has_sloppy_arguments_elements();
+ inline bool has_fast_sloppy_arguments_elements();
+ inline bool has_fast_string_wrapper_elements();
+ inline bool has_fixed_typed_array_elements();
+ inline bool has_dictionary_elements();
+
+ static bool IsValidElementsTransition(ElementsKind from_kind,
+ ElementsKind to_kind);
+
+ // Returns true if the current map doesn't have DICTIONARY_ELEMENTS but if a
+ // map with DICTIONARY_ELEMENTS was found in the prototype chain.
+ bool DictionaryElementsInPrototypeChainOnly();
+
+ inline Map* ElementsTransitionMap();
+
+ inline FixedArrayBase* GetInitialElements();
+
+ // [raw_transitions]: Provides access to the transitions storage field.
+ // Don't call set_raw_transitions() directly to overwrite transitions, use
+ // the TransitionArray::ReplaceTransitions() wrapper instead!
+ DECL_ACCESSORS(raw_transitions, Object)
+ // [prototype_info]: Per-prototype metadata. Aliased with transitions
+ // (which prototype maps don't have).
+ DECL_ACCESSORS(prototype_info, Object)
+ // PrototypeInfo is created lazily using this helper (which installs it on
+ // the given prototype's map).
+ static Handle<PrototypeInfo> GetOrCreatePrototypeInfo(
+ Handle<JSObject> prototype, Isolate* isolate);
+ static Handle<PrototypeInfo> GetOrCreatePrototypeInfo(
+ Handle<Map> prototype_map, Isolate* isolate);
+ inline bool should_be_fast_prototype_map() const;
+ static void SetShouldBeFastPrototypeMap(Handle<Map> map, bool value,
+ Isolate* isolate);
+
+ // [prototype chain validity cell]: Associated with a prototype object,
+ // stored in that object's map's PrototypeInfo, indicates that prototype
+ // chains through this object are currently valid. The cell will be
+ // invalidated and replaced when the prototype chain changes.
+ static Handle<Cell> GetOrCreatePrototypeChainValidityCell(Handle<Map> map,
+ Isolate* isolate);
+ static const int kPrototypeChainValid = 0;
+ static const int kPrototypeChainInvalid = 1;
+
+ // Return the map of the root of object's prototype chain.
+ Map* GetPrototypeChainRootMap(Isolate* isolate);
+
+ // Returns a WeakCell object containing given prototype. The cell is cached
+ // in PrototypeInfo which is created lazily.
+ static Handle<WeakCell> GetOrCreatePrototypeWeakCell(
+ Handle<JSObject> prototype, Isolate* isolate);
+
+ Map* FindRootMap();
+ Map* FindFieldOwner(int descriptor);
+
+ inline int GetInObjectPropertyOffset(int index);
+
+ int NumberOfFields();
+
+ // Returns true if transition to the given map requires special
+ // synchronization with the concurrent marker.
+ bool TransitionRequiresSynchronizationWithGC(Map* target);
+ // Returns true if transition to the given map removes a tagged in-object
+ // field.
+ bool TransitionRemovesTaggedField(Map* target);
+ // Returns true if transition to the given map replaces a tagged in-object
+ // field with an untagged in-object field.
+ bool TransitionChangesTaggedFieldToUntaggedField(Map* target);
+
+ // TODO(ishell): candidate with JSObject::MigrateToMap().
+ bool InstancesNeedRewriting(Map* target);
+ bool InstancesNeedRewriting(Map* target, int target_number_of_fields,
+ int target_inobject, int target_unused,
+ int* old_number_of_fields);
+ // TODO(ishell): moveit!
+ static Handle<Map> GeneralizeAllFields(Handle<Map> map);
+ MUST_USE_RESULT static Handle<FieldType> GeneralizeFieldType(
+ Representation rep1, Handle<FieldType> type1, Representation rep2,
+ Handle<FieldType> type2, Isolate* isolate);
+ static void GeneralizeField(Handle<Map> map, int modify_index,
+ PropertyConstness new_constness,
+ Representation new_representation,
+ Handle<FieldType> new_field_type);
+ // Returns true if |descriptor|'th property is a field that may be generalized
+ // by just updating current map.
+ static inline bool IsInplaceGeneralizableField(PropertyConstness constness,
+ Representation representation,
+ FieldType* field_type);
+
+ static Handle<Map> ReconfigureProperty(Handle<Map> map, int modify_index,
+ PropertyKind new_kind,
+ PropertyAttributes new_attributes,
+ Representation new_representation,
+ Handle<FieldType> new_field_type);
+
+ static Handle<Map> ReconfigureElementsKind(Handle<Map> map,
+ ElementsKind new_elements_kind);
+
+ static Handle<Map> PrepareForDataProperty(Handle<Map> old_map,
+ int descriptor_number,
+ PropertyConstness constness,
+ Handle<Object> value);
+
+ static Handle<Map> Normalize(Handle<Map> map, PropertyNormalizationMode mode,
+ const char* reason);
+
+ // Tells whether the map is used for JSObjects in dictionary mode (ie
+ // normalized objects, ie objects for which HasFastProperties returns false).
+ // A map can never be used for both dictionary mode and fast mode JSObjects.
+ // False by default and for HeapObjects that are not JSObjects.
+ inline void set_dictionary_map(bool value);
+ inline bool is_dictionary_map();
+
+ // Tells whether the instance needs security checks when accessing its
+ // properties.
+ inline void set_is_access_check_needed(bool access_check_needed);
+ inline bool is_access_check_needed();
+
+ // Returns true if map has a non-empty stub code cache.
+ inline bool has_code_cache();
+
+ // [prototype]: implicit prototype object.
+ DECL_ACCESSORS(prototype, Object)
+ // TODO(jkummerow): make set_prototype private.
+ static void SetPrototype(
+ Handle<Map> map, Handle<Object> prototype,
+ PrototypeOptimizationMode proto_mode = FAST_PROTOTYPE);
+
+ // [constructor]: points back to the function or FunctionTemplateInfo
+ // responsible for this map.
+ // The field overlaps with the back pointer. All maps in a transition tree
+ // have the same constructor, so maps with back pointers can walk the
+ // back pointer chain until they find the map holding their constructor.
+ // Returns null_value if there's neither a constructor function nor a
+ // FunctionTemplateInfo available.
+ DECL_ACCESSORS(constructor_or_backpointer, Object)
+ inline Object* GetConstructor() const;
+ inline FunctionTemplateInfo* GetFunctionTemplateInfo() const;
+ inline void SetConstructor(Object* constructor,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+ // [back pointer]: points back to the parent map from which a transition
+ // leads to this map. The field overlaps with the constructor (see above).
+ inline Object* GetBackPointer();
+ inline void SetBackPointer(Object* value,
+ WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
+
+ // [instance descriptors]: describes the object.
+ DECL_ACCESSORS(instance_descriptors, DescriptorArray)
+
+ // [layout descriptor]: describes the object layout.
+ DECL_ACCESSORS(layout_descriptor, LayoutDescriptor)
+ // |layout descriptor| accessor which can be used from GC.
+ inline LayoutDescriptor* layout_descriptor_gc_safe();
+ inline bool HasFastPointerLayout() const;
+
+ // |layout descriptor| accessor that is safe to call even when
+ // FLAG_unbox_double_fields is disabled (in this case Map does not contain
+ // |layout_descriptor| field at all).
+ inline LayoutDescriptor* GetLayoutDescriptor();
+
+ inline void UpdateDescriptors(DescriptorArray* descriptors,
+ LayoutDescriptor* layout_descriptor);
+ inline void InitializeDescriptors(DescriptorArray* descriptors,
+ LayoutDescriptor* layout_descriptor);
+
+ // [stub cache]: contains stubs compiled for this map.
+ DECL_ACCESSORS(code_cache, FixedArray)
+
+ // [dependent code]: list of optimized codes that weakly embed this map.
+ DECL_ACCESSORS(dependent_code, DependentCode)
+
+ // [weak cell cache]: cache that stores a weak cell pointing to this map.
+ DECL_ACCESSORS(weak_cell_cache, Object)
+
+ inline PropertyDetails GetLastDescriptorDetails();
+
+ inline int LastAdded();
+
+ inline int NumberOfOwnDescriptors();
+ inline void SetNumberOfOwnDescriptors(int number);
+
+ inline Cell* RetrieveDescriptorsPointer();
+
+ // Checks whether all properties are stored either in the map or on the object
+ // (inobject, properties, or elements backing store), requiring no special
+ // checks.
+ bool OnlyHasSimpleProperties();
+ inline int EnumLength();
+ inline void SetEnumLength(int length);
+
+ inline bool owns_descriptors();
+ inline void set_owns_descriptors(bool owns_descriptors);
+ inline void mark_unstable();
+ inline bool is_stable();
+ inline void set_migration_target(bool value);
+ inline bool is_migration_target();
+ inline void set_immutable_proto(bool value);
+ inline bool is_immutable_proto();
+ inline void set_construction_counter(int value);
+ inline int construction_counter();
+ inline void deprecate();
+ inline bool is_deprecated();
+ inline bool CanBeDeprecated();
+ // Returns a non-deprecated version of the input. If the input was not
+ // deprecated, it is directly returned. Otherwise, the non-deprecated version
+ // is found by re-transitioning from the root of the transition tree using the
+ // descriptor array of the map. Returns MaybeHandle<Map>() if no updated map
+ // is found.
+ static MaybeHandle<Map> TryUpdate(Handle<Map> map) WARN_UNUSED_RESULT;
+
+ // Returns a non-deprecated version of the input. This method may deprecate
+ // existing maps along the way if encodings conflict. Not for use while
+ // gathering type feedback. Use TryUpdate in those cases instead.
+ static Handle<Map> Update(Handle<Map> map);
+
+ static inline Handle<Map> CopyInitialMap(Handle<Map> map);
+ static Handle<Map> CopyInitialMap(Handle<Map> map, int instance_size,
+ int in_object_properties,
+ int unused_property_fields);
+ static Handle<Map> CopyInitialMapNormalized(
+ Handle<Map> map,
+ PropertyNormalizationMode mode = CLEAR_INOBJECT_PROPERTIES);
+ static Handle<Map> CopyDropDescriptors(Handle<Map> map);
+ static Handle<Map> CopyInsertDescriptor(Handle<Map> map,
+ Descriptor* descriptor,
+ TransitionFlag flag);
+
+ static Handle<Object> WrapFieldType(Handle<FieldType> type);
+ static FieldType* UnwrapFieldType(Object* wrapped_type);
+
+ MUST_USE_RESULT static MaybeHandle<Map> CopyWithField(
+ Handle<Map> map, Handle<Name> name, Handle<FieldType> type,
+ PropertyAttributes attributes, PropertyConstness constness,
+ Representation representation, TransitionFlag flag);
+
+ MUST_USE_RESULT static MaybeHandle<Map> CopyWithConstant(
+ Handle<Map> map, Handle<Name> name, Handle<Object> constant,
+ PropertyAttributes attributes, TransitionFlag flag);
+
+ // Returns a new map with all transitions dropped from the given map and
+ // the ElementsKind set.
+ static Handle<Map> TransitionElementsTo(Handle<Map> map,
+ ElementsKind to_kind);
+
+ static Handle<Map> AsElementsKind(Handle<Map> map, ElementsKind kind);
+
+ static Handle<Map> CopyAsElementsKind(Handle<Map> map, ElementsKind kind,
+ TransitionFlag flag);
+
+ static Handle<Map> AsLanguageMode(Handle<Map> initial_map,
+ LanguageMode language_mode,
+ FunctionKind kind);
+
+ static Handle<Map> CopyForPreventExtensions(Handle<Map> map,
+ PropertyAttributes attrs_to_add,
+ Handle<Symbol> transition_marker,
+ const char* reason);
+
+ static Handle<Map> FixProxy(Handle<Map> map, InstanceType type, int size);
+
+ // Maximal number of fast properties. Used to restrict the number of map
+ // transitions to avoid an explosion in the number of maps for objects used as
+ // dictionaries.
+ inline bool TooManyFastProperties(StoreFromKeyed store_mode);
+ static Handle<Map> TransitionToDataProperty(Handle<Map> map,
+ Handle<Name> name,
+ Handle<Object> value,
+ PropertyAttributes attributes,
+ PropertyConstness constness,
+ StoreFromKeyed store_mode);
+ static Handle<Map> TransitionToAccessorProperty(
+ Isolate* isolate, Handle<Map> map, Handle<Name> name, int descriptor,
+ Handle<Object> getter, Handle<Object> setter,
+ PropertyAttributes attributes);
+ static Handle<Map> ReconfigureExistingProperty(Handle<Map> map,
+ int descriptor,
+ PropertyKind kind,
+ PropertyAttributes attributes);
+
+ inline void AppendDescriptor(Descriptor* desc);
+
+ // Returns a copy of the map, prepared for inserting into the transition
+ // tree (if the |map| owns descriptors then the new one will share
+ // descriptors with |map|).
+ static Handle<Map> CopyForTransition(Handle<Map> map, const char* reason);
+
+ // Returns a copy of the map, with all transitions dropped from the
+ // instance descriptors.
+ static Handle<Map> Copy(Handle<Map> map, const char* reason);
+ static Handle<Map> Create(Isolate* isolate, int inobject_properties);
+
+ // Returns the next free property index (only valid for FAST MODE).
+ int NextFreePropertyIndex();
+
+ // Returns the number of properties described in instance_descriptors
+ // filtering out properties with the specified attributes.
+ int NumberOfDescribedProperties(DescriptorFlag which = OWN_DESCRIPTORS,
+ PropertyFilter filter = ALL_PROPERTIES);
+
+ DECLARE_CAST(Map)
+
+ // Code cache operations.
+
+ // Clears the code cache.
+ inline void ClearCodeCache(Heap* heap);
+
+ // Update code cache.
+ static void UpdateCodeCache(Handle<Map> map, Handle<Name> name,
+ Handle<Code> code);
+
+ // Extend the descriptor array of the map with the list of descriptors.
+ // In case of duplicates, the latest descriptor is used.
+ static void AppendCallbackDescriptors(Handle<Map> map,
+ Handle<Object> descriptors);
+
+ static inline int SlackForArraySize(int old_size, int size_limit);
+
+ static void EnsureDescriptorSlack(Handle<Map> map, int slack);
+
+ Code* LookupInCodeCache(Name* name, Code::Flags code);
+
+ static Handle<Map> GetObjectCreateMap(Handle<HeapObject> prototype);
+
+ // Computes a hash value for this map, to be used in HashTables and such.
+ int Hash();
+
+ // Returns the transitioned map for this map with the most generic
+ // elements_kind that's found in |candidates|, or |nullptr| if no match is
+ // found at all.
+ Map* FindElementsKindTransitionedMap(MapHandles const& candidates);
+
+ inline bool CanTransition();
+
+ inline bool IsBooleanMap();
+ inline bool IsPrimitiveMap();
+ inline bool IsJSReceiverMap();
+ inline bool IsJSObjectMap();
+ inline bool IsJSArrayMap();
+ inline bool IsJSFunctionMap();
+ inline bool IsStringMap();
+ inline bool IsJSProxyMap();
+ inline bool IsModuleMap();
+ inline bool IsJSGlobalProxyMap();
+ inline bool IsJSGlobalObjectMap();
+ inline bool IsJSTypedArrayMap();
+ inline bool IsJSDataViewMap();
+
+ inline bool IsSpecialReceiverMap();
+
+ inline bool CanOmitMapChecks();
+
+ static void AddDependentCode(Handle<Map> map,
+ DependentCode::DependencyGroup group,
+ Handle<Code> code);
+
+ bool IsMapInArrayPrototypeChain();
+
+ static Handle<WeakCell> WeakCellForMap(Handle<Map> map);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(Map)
+ DECLARE_VERIFIER(Map)
+
+#ifdef VERIFY_HEAP
+ void DictionaryMapVerify();
+ void VerifyOmittedMapChecks();
+#endif
+
+ inline int visitor_id();
+ inline void set_visitor_id(int visitor_id);
+
+ static Handle<Map> TransitionToPrototype(Handle<Map> map,
+ Handle<Object> prototype,
+ PrototypeOptimizationMode mode);
+
+ static Handle<Map> TransitionToImmutableProto(Handle<Map> map);
+
+ static const int kMaxPreAllocatedPropertyFields = 255;
+
+ // Layout description.
+ static const int kInstanceSizesOffset = HeapObject::kHeaderSize;
+ static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize;
+ static const int kBitField3Offset = kInstanceAttributesOffset + kIntSize;
+ static const int kPrototypeOffset = kBitField3Offset + kPointerSize;
+ static const int kConstructorOrBackPointerOffset =
+ kPrototypeOffset + kPointerSize;
+ // When there is only one transition, it is stored directly in this field;
+ // otherwise a transition array is used.
+ // For prototype maps, this slot is used to store this map's PrototypeInfo
+ // struct.
+ static const int kTransitionsOrPrototypeInfoOffset =
+ kConstructorOrBackPointerOffset + kPointerSize;
+ static const int kDescriptorsOffset =
+ kTransitionsOrPrototypeInfoOffset + kPointerSize;
+#if V8_DOUBLE_FIELDS_UNBOXING
+ static const int kLayoutDescriptorOffset = kDescriptorsOffset + kPointerSize;
+ static const int kCodeCacheOffset = kLayoutDescriptorOffset + kPointerSize;
+#else
+ static const int kLayoutDescriptorOffset = 1; // Must not be ever accessed.
+ static const int kCodeCacheOffset = kDescriptorsOffset + kPointerSize;
+#endif
+ static const int kDependentCodeOffset = kCodeCacheOffset + kPointerSize;
+ static const int kWeakCellCacheOffset = kDependentCodeOffset + kPointerSize;
+ static const int kSize = kWeakCellCacheOffset + kPointerSize;
+
+ // Layout of pointer fields. Heap iteration code relies on them
+ // being continuously allocated.
+ static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
+ static const int kPointerFieldsEndOffset = kSize;
+
+ // Byte offsets within kInstanceSizesOffset.
+ static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
+ static const int kInObjectPropertiesOrConstructorFunctionIndexByte = 1;
+ static const int kInObjectPropertiesOrConstructorFunctionIndexOffset =
+ kInstanceSizesOffset + kInObjectPropertiesOrConstructorFunctionIndexByte;
+ // Note there is one byte available for use here.
+ static const int kUnusedByte = 2;
+ static const int kUnusedOffset = kInstanceSizesOffset + kUnusedByte;
+ static const int kVisitorIdByte = 3;
+ static const int kVisitorIdOffset = kInstanceSizesOffset + kVisitorIdByte;
+
+// Byte offsets within kInstanceAttributesOffset attributes.
+#if V8_TARGET_LITTLE_ENDIAN
+ // Order instance type and bit field together such that they can be loaded
+ // together as a 16-bit word with instance type in the lower 8 bits regardless
+ // of endianess. Also provide endian-independent offset to that 16-bit word.
+ static const int kInstanceTypeOffset = kInstanceAttributesOffset + 0;
+ static const int kBitFieldOffset = kInstanceAttributesOffset + 1;
+#else
+ static const int kBitFieldOffset = kInstanceAttributesOffset + 0;
+ static const int kInstanceTypeOffset = kInstanceAttributesOffset + 1;
+#endif
+ static const int kInstanceTypeAndBitFieldOffset =
+ kInstanceAttributesOffset + 0;
+ static const int kBitField2Offset = kInstanceAttributesOffset + 2;
+ static const int kUnusedPropertyFieldsOffset = kInstanceAttributesOffset + 3;
+
+ STATIC_ASSERT(kInstanceTypeAndBitFieldOffset ==
+ Internals::kMapInstanceTypeAndBitFieldOffset);
+
+ // Bit positions for bit field.
+ static const int kHasNonInstancePrototype = 0;
+ static const int kIsCallable = 1;
+ static const int kHasNamedInterceptor = 2;
+ static const int kHasIndexedInterceptor = 3;
+ static const int kIsUndetectable = 4;
+ static const int kIsAccessCheckNeeded = 5;
+ static const int kIsConstructor = 6;
+ // Bit 7 is free.
+
+ // Bit positions for bit field 2
+ static const int kIsExtensible = 0;
+ // Bit 1 is free.
+ class IsPrototypeMapBits : public BitField<bool, 2, 1> {};
+ class ElementsKindBits : public BitField<ElementsKind, 3, 5> {};
+
+ // Derived values from bit field 2
+ static const int8_t kMaximumBitField2FastElementValue =
+ static_cast<int8_t>((FAST_ELEMENTS + 1)
+ << Map::ElementsKindBits::kShift) -
+ 1;
+ static const int8_t kMaximumBitField2FastSmiElementValue =
+ static_cast<int8_t>((FAST_SMI_ELEMENTS + 1)
+ << Map::ElementsKindBits::kShift) -
+ 1;
+ static const int8_t kMaximumBitField2FastHoleyElementValue =
+ static_cast<int8_t>((FAST_HOLEY_ELEMENTS + 1)
+ << Map::ElementsKindBits::kShift) -
+ 1;
+ static const int8_t kMaximumBitField2FastHoleySmiElementValue =
+ static_cast<int8_t>((FAST_HOLEY_SMI_ELEMENTS + 1)
+ << Map::ElementsKindBits::kShift) -
+ 1;
+
+ typedef FixedBodyDescriptor<kPointerFieldsBeginOffset,
+ kPointerFieldsEndOffset, kSize>
+ BodyDescriptor;
+
+ // Compares this map to another to see if they describe equivalent objects.
+ // If |mode| is set to CLEAR_INOBJECT_PROPERTIES, |other| is treated as if
+ // it had exactly zero inobject properties.
+ // The "shared" flags of both this map and |other| are ignored.
+ bool EquivalentToForNormalization(Map* other, PropertyNormalizationMode mode);
+
+ // Returns true if given field is unboxed double.
+ inline bool IsUnboxedDoubleField(FieldIndex index);
+
+#if V8_TRACE_MAPS
+ static void TraceTransition(const char* what, Map* from, Map* to, Name* name);
+ static void TraceAllTransitions(Map* map);
+#endif
+
+ static inline Handle<Map> AddMissingTransitionsForTesting(
+ Handle<Map> split_map, Handle<DescriptorArray> descriptors,
+ Handle<LayoutDescriptor> full_layout_descriptor);
+
+ // Fires when the layout of an object with a leaf map changes.
+ // This includes adding transitions to the leaf map or changing
+ // the descriptor array.
+ inline void NotifyLeafMapLayoutChange();
+
+ private:
+ // Returns the map that this (root) map transitions to if its elements_kind
+ // is changed to |elements_kind|, or |nullptr| if no such map is cached yet.
+ Map* LookupElementsTransitionMap(ElementsKind elements_kind);
+
+ // Tries to replay property transitions starting from this (root) map using
+ // the descriptor array of the |map|. The |root_map| is expected to have
+ // proper elements kind and therefore elements kinds transitions are not
+ // taken by this function. Returns |nullptr| if matching transition map is
+ // not found.
+ Map* TryReplayPropertyTransitions(Map* map);
+
+ static void ConnectTransition(Handle<Map> parent, Handle<Map> child,
+ Handle<Name> name, SimpleTransitionFlag flag);
+
+ bool EquivalentToForTransition(Map* other);
+ bool EquivalentToForElementsKindTransition(Map* other);
+ static Handle<Map> RawCopy(Handle<Map> map, int instance_size);
+ static Handle<Map> ShareDescriptor(Handle<Map> map,
+ Handle<DescriptorArray> descriptors,
+ Descriptor* descriptor);
+ static Handle<Map> AddMissingTransitions(
+ Handle<Map> map, Handle<DescriptorArray> descriptors,
+ Handle<LayoutDescriptor> full_layout_descriptor);
+ static void InstallDescriptors(
+ Handle<Map> parent_map, Handle<Map> child_map, int new_descriptor,
+ Handle<DescriptorArray> descriptors,
+ Handle<LayoutDescriptor> full_layout_descriptor);
+ static Handle<Map> CopyAddDescriptor(Handle<Map> map, Descriptor* descriptor,
+ TransitionFlag flag);
+ static Handle<Map> CopyReplaceDescriptors(
+ Handle<Map> map, Handle<DescriptorArray> descriptors,
+ Handle<LayoutDescriptor> layout_descriptor, TransitionFlag flag,
+ MaybeHandle<Name> maybe_name, const char* reason,
+ SimpleTransitionFlag simple_flag);
+
+ static Handle<Map> CopyReplaceDescriptor(Handle<Map> map,
+ Handle<DescriptorArray> descriptors,
+ Descriptor* descriptor, int index,
+ TransitionFlag flag);
+ static MUST_USE_RESULT MaybeHandle<Map> TryReconfigureExistingProperty(
+ Handle<Map> map, int descriptor, PropertyKind kind,
+ PropertyAttributes attributes, const char** reason);
+
+ static Handle<Map> CopyNormalized(Handle<Map> map,
+ PropertyNormalizationMode mode);
+
+ // TODO(ishell): Move to MapUpdater.
+ static Handle<Map> CopyGeneralizeAllFields(
+ Handle<Map> map, ElementsKind elements_kind, int modify_index,
+ PropertyKind kind, PropertyAttributes attributes, const char* reason);
+
+ void DeprecateTransitionTree();
+
+ void ReplaceDescriptors(DescriptorArray* new_descriptors,
+ LayoutDescriptor* new_layout_descriptor);
+
+ // Update field type of the given descriptor to new representation and new
+ // type. The type must be prepared for storing in descriptor array:
+ // it must be either a simple type or a map wrapped in a weak cell.
+ void UpdateFieldType(int descriptor_number, Handle<Name> name,
+ PropertyConstness new_constness,
+ Representation new_representation,
+ Handle<Object> new_wrapped_type);
+
+ // TODO(ishell): Move to MapUpdater.
+ void PrintReconfiguration(FILE* file, int modify_index, PropertyKind kind,
+ PropertyAttributes attributes);
+ // TODO(ishell): Move to MapUpdater.
+ void PrintGeneralization(FILE* file, const char* reason, int modify_index,
+ int split, int descriptors, bool constant_to_field,
+ Representation old_representation,
+ Representation new_representation,
+ MaybeHandle<FieldType> old_field_type,
+ MaybeHandle<Object> old_value,
+ MaybeHandle<FieldType> new_field_type,
+ MaybeHandle<Object> new_value);
+ static const int kFastPropertiesSoftLimit = 12;
+ static const int kMaxFastProperties = 128;
+
+ friend class MapUpdater;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
+};
+
+// The cache for maps used by normalized (dictionary mode) objects.
+// Such maps do not have property descriptors, so a typical program
+// needs very limited number of distinct normalized maps.
+class NormalizedMapCache : public FixedArray {
+ public:
+ static Handle<NormalizedMapCache> New(Isolate* isolate);
+
+ MUST_USE_RESULT MaybeHandle<Map> Get(Handle<Map> fast_map,
+ PropertyNormalizationMode mode);
+ void Set(Handle<Map> fast_map, Handle<Map> normalized_map);
+
+ void Clear();
+
+ DECLARE_CAST(NormalizedMapCache)
+
+ static inline bool IsNormalizedMapCache(const HeapObject* obj);
+
+ DECLARE_VERIFIER(NormalizedMapCache)
+ private:
+ static const int kEntries = 64;
+
+ static inline int GetIndex(Handle<Map> map);
+
+ // The following declarations hide base class methods.
+ Object* get(int index);
+ void set(int index, Object* value);
+};
+
+} // namespace internal
+} // namespace v8
+
+#include "src/objects/object-macros-undef.h"
+
+#endif // V8_OBJECTS_MAP_H_
diff --git a/deps/v8/src/objects/string-table.h b/deps/v8/src/objects/string-table.h
index f2ecb55c60..a827895e6a 100644
--- a/deps/v8/src/objects/string-table.h
+++ b/deps/v8/src/objects/string-table.h
@@ -58,6 +58,7 @@ class StringTable
Isolate* isolate, Handle<String> str);
MUST_USE_RESULT static MaybeHandle<String> LookupTwoCharsStringIfExists(
Isolate* isolate, uint16_t c1, uint16_t c2);
+ static Object* LookupStringIfExists_NoAllocate(String* string);
static void EnsureCapacityForDeserialization(Isolate* isolate, int expected);
diff --git a/deps/v8/src/parsing/parse-info.cc b/deps/v8/src/parsing/parse-info.cc
index 816a854654..12329307ac 100644
--- a/deps/v8/src/parsing/parse-info.cc
+++ b/deps/v8/src/parsing/parse-info.cc
@@ -50,7 +50,6 @@ ParseInfo::ParseInfo(Handle<SharedFunctionInfo> shared)
set_toplevel(shared->is_toplevel());
set_allow_lazy_parsing(FLAG_lazy_inner_functions);
set_is_named_expression(shared->is_named_expression());
- set_calls_eval(shared->scope_info()->CallsEval());
set_compiler_hints(shared->compiler_hints());
set_start_position(shared->start_position());
set_end_position(shared->end_position());
@@ -58,7 +57,6 @@ ParseInfo::ParseInfo(Handle<SharedFunctionInfo> shared)
set_language_mode(shared->language_mode());
set_shared_info(shared);
set_module(shared->kind() == FunctionKind::kModule);
- set_scope_info_is_empty(shared->scope_info() == ScopeInfo::Empty(isolate));
Handle<Script> script(Script::cast(shared->script()));
set_script(script);
@@ -107,7 +105,6 @@ ParseInfo* ParseInfo::AllocateWithoutScript(Handle<SharedFunctionInfo> shared) {
p->set_toplevel(shared->is_toplevel());
p->set_allow_lazy_parsing(FLAG_lazy_inner_functions);
p->set_is_named_expression(shared->is_named_expression());
- p->set_calls_eval(shared->scope_info()->CallsEval());
p->set_compiler_hints(shared->compiler_hints());
p->set_start_position(shared->start_position());
p->set_end_position(shared->end_position());
@@ -115,7 +112,6 @@ ParseInfo* ParseInfo::AllocateWithoutScript(Handle<SharedFunctionInfo> shared) {
p->set_language_mode(shared->language_mode());
p->set_shared_info(shared);
p->set_module(shared->kind() == FunctionKind::kModule);
- p->set_scope_info_is_empty(shared->scope_info() == ScopeInfo::Empty(isolate));
// BUG(5946): This function exists as a workaround until we can
// get rid of %SetCode in our native functions. The ParseInfo
@@ -168,6 +164,38 @@ void ParseInfo::InitFromIsolate(Isolate* isolate) {
set_ast_string_constants(isolate->ast_string_constants());
}
+void ParseInfo::UpdateStatisticsAfterBackgroundParse(Isolate* isolate) {
+ // Copy over the counters from the background thread to the main counters on
+ // the isolate.
+ RuntimeCallStats* main_call_stats = isolate->counters()->runtime_call_stats();
+ if (FLAG_runtime_stats ==
+ v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE) {
+ DCHECK_NE(main_call_stats, runtime_call_stats());
+ DCHECK_NOT_NULL(main_call_stats);
+ DCHECK_NOT_NULL(runtime_call_stats());
+ main_call_stats->Add(runtime_call_stats());
+ }
+ set_runtime_call_stats(main_call_stats);
+}
+
+void ParseInfo::ParseFinished(std::unique_ptr<ParseInfo> info) {
+ if (info->literal()) {
+ base::LockGuard<base::Mutex> access_child_infos(&child_infos_mutex_);
+ child_infos_.emplace_back(std::move(info));
+ }
+}
+
+std::map<int, ParseInfo*> ParseInfo::child_infos() const {
+ base::LockGuard<base::Mutex> access_child_infos(&child_infos_mutex_);
+ std::map<int, ParseInfo*> rv;
+ for (const auto& child_info : child_infos_) {
+ DCHECK_NOT_NULL(child_info->literal());
+ int start_position = child_info->literal()->start_position();
+ rv.insert(std::make_pair(start_position, child_info.get()));
+ }
+ return rv;
+}
+
#ifdef DEBUG
bool ParseInfo::script_is_native() const {
return script_->type() == Script::TYPE_NATIVE;
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index c115126191..5d8bb9c8eb 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -5,9 +5,12 @@
#ifndef V8_PARSING_PARSE_INFO_H_
#define V8_PARSING_PARSE_INFO_H_
+#include <map>
#include <memory>
+#include <vector>
#include "include/v8.h"
+#include "src/compiler-dispatcher/compiler-dispatcher-job.h"
#include "src/globals.h"
#include "src/handles.h"
#include "src/parsing/preparsed-scope-data.h"
@@ -33,7 +36,7 @@ class Utf16CharacterStream;
class Zone;
// A container for the inputs, configuration options, and outputs of parsing.
-class V8_EXPORT_PRIVATE ParseInfo {
+class V8_EXPORT_PRIVATE ParseInfo : public CompileJobFinishCallback {
public:
explicit ParseInfo(AccountingAllocator* zone_allocator);
ParseInfo(Handle<Script> script);
@@ -74,10 +77,8 @@ class V8_EXPORT_PRIVATE ParseInfo {
set_ast_value_factory_owned)
FLAG_ACCESSOR(kIsNamedExpression, is_named_expression,
set_is_named_expression)
- FLAG_ACCESSOR(kCallsEval, calls_eval, set_calls_eval)
FLAG_ACCESSOR(kDebug, is_debug, set_is_debug)
FLAG_ACCESSOR(kSerializing, will_serialize, set_will_serialize)
- FLAG_ACCESSOR(kScopeInfoIsEmpty, scope_info_is_empty, set_scope_info_is_empty)
FLAG_ACCESSOR(kTailCallEliminationEnabled, is_tail_call_elimination_enabled,
set_tail_call_elimination_enabled)
@@ -245,6 +246,13 @@ class V8_EXPORT_PRIVATE ParseInfo {
}
}
+ void UpdateStatisticsAfterBackgroundParse(Isolate* isolate);
+
+ // The key of the map is the FunctionLiteral's start_position
+ std::map<int, ParseInfo*> child_infos() const;
+
+ void ParseFinished(std::unique_ptr<ParseInfo> info) override;
+
#ifdef DEBUG
bool script_is_native() const;
#endif // DEBUG
@@ -262,12 +270,10 @@ class V8_EXPORT_PRIVATE ParseInfo {
kModule = 1 << 6,
kAllowLazyParsing = 1 << 7,
kIsNamedExpression = 1 << 8,
- kCallsEval = 1 << 9,
- kDebug = 1 << 10,
- kSerializing = 1 << 11,
- kScopeInfoIsEmpty = 1 << 12,
- kTailCallEliminationEnabled = 1 << 13,
- kAstValueFactoryOwned = 1 << 14,
+ kDebug = 1 << 9,
+ kSerializing = 1 << 10,
+ kTailCallEliminationEnabled = 1 << 11,
+ kAstValueFactoryOwned = 1 << 12,
};
//------------- Inputs to parsing and scope analysis -----------------------
@@ -307,6 +313,9 @@ class V8_EXPORT_PRIVATE ParseInfo {
FunctionLiteral* literal_;
std::shared_ptr<DeferredHandles> deferred_handles_;
+ std::vector<std::unique_ptr<ParseInfo>> child_infos_;
+ mutable base::Mutex child_infos_mutex_;
+
void SetFlag(Flag f) { flags_ |= f; }
void SetFlag(Flag f, bool v) { flags_ = v ? flags_ | f : flags_ & ~f; }
bool GetFlag(Flag f) const { return (flags_ & f) != 0; }
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 1dbad01dea..2d01398980 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -20,6 +20,7 @@
namespace v8 {
namespace internal {
+class PreParsedScopeData;
enum FunctionNameValidity {
kFunctionNameIsStrictReserved,
@@ -200,6 +201,7 @@ class ParserBase {
ParserBase(Zone* zone, Scanner* scanner, uintptr_t stack_limit,
v8::Extension* extension, AstValueFactory* ast_value_factory,
RuntimeCallStats* runtime_call_stats,
+ PreParsedScopeData* preparsed_scope_data,
bool parsing_on_main_thread = true)
: scope_(nullptr),
original_scope_(nullptr),
@@ -207,11 +209,12 @@ class ParserBase {
extension_(extension),
fni_(nullptr),
ast_value_factory_(ast_value_factory),
- ast_node_factory_(ast_value_factory),
+ ast_node_factory_(ast_value_factory, zone),
runtime_call_stats_(runtime_call_stats),
parsing_on_main_thread_(parsing_on_main_thread),
parsing_module_(false),
stack_limit_(stack_limit),
+ preparsed_scope_data_(preparsed_scope_data),
zone_(zone),
classifier_(nullptr),
scanner_(scanner),
@@ -448,6 +451,30 @@ class ParserBase {
next_function_is_likely_called_ = true;
}
+ void RecordFunctionOrEvalCall() { contains_function_or_eval_ = true; }
+ bool contains_function_or_eval() const {
+ return contains_function_or_eval_;
+ }
+
+ class FunctionOrEvalRecordingScope {
+ public:
+ explicit FunctionOrEvalRecordingScope(FunctionState* state)
+ : state_(state) {
+ prev_value_ = state->contains_function_or_eval_;
+ state->contains_function_or_eval_ = false;
+ }
+ ~FunctionOrEvalRecordingScope() {
+ bool found = state_->contains_function_or_eval_;
+ if (!found) {
+ state_->contains_function_or_eval_ = prev_value_;
+ }
+ }
+
+ private:
+ FunctionState* state_;
+ bool prev_value_;
+ };
+
private:
void AddDestructuringAssignment(DestructuringAssignment pair) {
destructuring_assignments_to_rewrite_.Add(pair, scope_->zone());
@@ -482,6 +509,9 @@ class ParserBase {
bool next_function_is_likely_called_;
bool previous_function_was_likely_called_;
+ // Track if a function or eval occurs within this FunctionState
+ bool contains_function_or_eval_;
+
friend Impl;
};
@@ -601,7 +631,8 @@ class ParserBase {
constructor(parser->impl()->EmptyFunctionLiteral()),
has_seen_constructor(false),
has_name_static_property(false),
- has_static_computed_names(false) {}
+ has_static_computed_names(false),
+ is_anonymous(false) {}
VariableProxy* proxy;
ExpressionT extends;
typename Types::ClassPropertyList properties;
@@ -609,6 +640,7 @@ class ParserBase {
bool has_seen_constructor;
bool has_name_static_property;
bool has_static_computed_names;
+ bool is_anonymous;
};
DeclarationScope* NewScriptScope() const {
@@ -653,6 +685,10 @@ class ParserBase {
if (target_zone == nullptr) target_zone = zone();
DeclarationScope* result = new (target_zone)
DeclarationScope(zone(), scope(), FUNCTION_SCOPE, kind);
+
+ // Record presence of an inner function scope
+ function_state_->RecordFunctionOrEvalCall();
+
// TODO(verwaest): Move into the DeclarationScope constructor.
if (!IsArrowFunction(kind)) {
result->DeclareDefaultFunctionVariables(ast_value_factory());
@@ -1337,6 +1373,7 @@ class ParserBase {
if (impl()->IsIdentifier(expression) &&
impl()->IsEval(impl()->AsIdentifier(expression))) {
scope->RecordEvalCall();
+ function_state_->RecordFunctionOrEvalCall();
if (is_sloppy(scope->language_mode())) {
// For sloppy scopes we also have to record the call at function level,
// in case it includes declarations that will be hoisted.
@@ -1486,6 +1523,7 @@ class ParserBase {
bool parsing_on_main_thread_;
bool parsing_module_;
uintptr_t stack_limit_;
+ PreParsedScopeData* preparsed_scope_data_;
// Parser base's private field members.
@@ -1530,7 +1568,8 @@ ParserBase<Impl>::FunctionState::FunctionState(
non_patterns_to_rewrite_(0, scope->zone()),
reported_errors_(16, scope->zone()),
next_function_is_likely_called_(false),
- previous_function_was_likely_called_(false) {
+ previous_function_was_likely_called_(false),
+ contains_function_or_eval_(false) {
*function_state_stack = this;
if (outer_function_state_) {
outer_function_state_->previous_function_was_likely_called_ =
@@ -1933,6 +1972,11 @@ ParserBase<Impl>::ParseExpressionCoverGrammar(bool accept_IN, bool* ok) {
int ellipsis_pos = position();
int pattern_pos = peek_position();
ExpressionT pattern = ParsePrimaryExpression(CHECK_OK);
+ if (peek() == Token::ASSIGN) {
+ ReportMessage(MessageTemplate::kRestDefaultInitializer);
+ *ok = false;
+ return result;
+ }
ValidateBindingPattern(CHECK_OK);
right = factory()->NewSpread(pattern, ellipsis_pos, pattern_pos);
} else {
@@ -2222,12 +2266,12 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
Token::Value name_token = peek();
- int function_token_position = scanner()->peek_location().beg_pos;
+ int name_token_position = scanner()->peek_location().beg_pos;
IdentifierT name = impl()->EmptyIdentifier();
ExpressionT name_expression;
if (name_token == Token::STATIC) {
Consume(Token::STATIC);
- function_token_position = scanner()->peek_location().beg_pos;
+ name_token_position = scanner()->peek_location().beg_pos;
if (peek() == Token::LPAREN) {
kind = PropertyKind::kMethodProperty;
name = impl()->GetSymbol(); // TODO(bakkot) specialize on 'static'
@@ -2305,7 +2349,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
ExpressionT value = impl()->ParseFunctionLiteral(
name, scanner()->location(), kSkipFunctionNameCheck, kind,
- FLAG_harmony_function_tostring ? function_token_position
+ FLAG_harmony_function_tostring ? name_token_position
: kNoSourcePosition,
FunctionLiteral::kAccessorOrMethod, language_mode(),
CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
@@ -2335,7 +2379,7 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
FunctionLiteralT value = impl()->ParseFunctionLiteral(
name, scanner()->location(), kSkipFunctionNameCheck, kind,
- FLAG_harmony_function_tostring ? function_token_position
+ FLAG_harmony_function_tostring ? name_token_position
: kNoSourcePosition,
FunctionLiteral::kAccessorOrMethod, language_mode(),
CHECK_OK_CUSTOM(EmptyClassLiteralProperty));
@@ -2351,7 +2395,11 @@ ParserBase<Impl>::ParseClassPropertyDefinition(
*is_computed_name);
}
case PropertyKind::kSpreadProperty:
- UNREACHABLE();
+ ReportUnexpectedTokenAt(
+ Scanner::Location(name_token_position, name_expression->position()),
+ name_token);
+ *ok = false;
+ return impl()->EmptyClassLiteralProperty();
}
UNREACHABLE();
return impl()->EmptyClassLiteralProperty();
@@ -2672,6 +2720,10 @@ typename ParserBase<Impl>::ExpressionListT ParserBase<Impl>::ParseArguments(
spread_arg.beg_pos = start_pos;
spread_arg.end_pos = peek_position();
}
+ if (argument->IsAssignment()) {
+ classifier()->RecordAsyncArrowFormalParametersError(
+ scanner()->location(), MessageTemplate::kRestDefaultInitializer);
+ }
argument = factory()->NewSpread(argument, start_pos, expr_pos);
}
result->Add(argument, zone_);
@@ -2684,6 +2736,10 @@ typename ParserBase<Impl>::ExpressionListT ParserBase<Impl>::ParseArguments(
done = (peek() != Token::COMMA);
if (!done) {
Next();
+ if (argument->IsSpread()) {
+ classifier()->RecordAsyncArrowFormalParametersError(
+ scanner()->location(), MessageTemplate::kParamAfterRest);
+ }
if (allow_harmony_trailing_commas() && peek() == Token::RPAREN) {
// allow trailing comma
done = true;
@@ -3265,6 +3321,9 @@ ParserBase<Impl>::ParseLeftHandSideExpression(bool* ok) {
// Explicit calls to the super constructor using super() perform an
// implicit binding assignment to the 'this' variable.
if (is_super_call) {
+ classifier()->RecordAssignmentPatternError(
+ Scanner::Location(pos, scanner()->location().end_pos),
+ MessageTemplate::kInvalidDestructuringTarget);
ExpressionT this_expr = impl()->ThisExpression(pos);
result =
factory()->NewAssignment(Token::INIT, this_expr, result, pos);
@@ -3418,6 +3477,10 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseMemberExpression(
if (impl()->ParsingDynamicFunctionDeclaration()) {
// We don't want dynamic functions to actually declare their name
// "anonymous". We just want that name in the toString().
+ if (stack_overflow()) {
+ *ok = false;
+ return impl()->EmptyExpression();
+ }
Consume(Token::IDENTIFIER);
DCHECK(scanner()->CurrentMatchesContextual(Token::ANONYMOUS));
} else if (peek_any_identifier()) {
@@ -3506,6 +3569,10 @@ ParserBase<Impl>::ParseNewTargetExpression(bool* ok) {
int pos = position();
ExpectMetaProperty(Token::TARGET, "new.target", pos, CHECK_OK);
+ classifier()->RecordAssignmentPatternError(
+ Scanner::Location(pos, scanner()->location().end_pos),
+ MessageTemplate::kInvalidDestructuringTarget);
+
if (!GetReceiverScope()->is_function_scope()) {
impl()->ReportMessageAt(scanner()->location(),
MessageTemplate::kUnexpectedNewTarget);
@@ -3603,7 +3670,12 @@ void ParserBase<Impl>::ParseFormalParameter(FormalParametersT* parameters,
}
ExpressionT initializer = impl()->EmptyExpression();
- if (!is_rest && Check(Token::ASSIGN)) {
+ if (Check(Token::ASSIGN)) {
+ if (is_rest) {
+ ReportMessage(MessageTemplate::kRestDefaultInitializer);
+ *ok = false;
+ return;
+ }
ExpressionClassifier init_classifier(this);
initializer = ParseAssignmentExpression(true, CHECK_OK_CUSTOM(Void));
impl()->RewriteNonPattern(CHECK_OK_CUSTOM(Void));
@@ -4216,32 +4288,20 @@ ParserBase<Impl>::ParseArrowFunctionLiteral(
// FIXME(marja): Arrow function parameters will be parsed even if the
// body is preparsed; move relevant parts of parameter handling to
// simulate consistent parameter handling.
- Scanner::BookmarkScope bookmark(scanner());
- bookmark.Set();
+
// For arrow functions, we don't need to retrieve data about function
// parameters.
int dummy_num_parameters = -1;
- int dummy_function_length = -1;
DCHECK((kind & FunctionKind::kArrowFunction) != 0);
- LazyParsingResult result = impl()->SkipFunction(
- kind, formal_parameters.scope, &dummy_num_parameters,
- &dummy_function_length, false, true, CHECK_OK);
- formal_parameters.scope->ResetAfterPreparsing(
- ast_value_factory_, result == kLazyParsingAborted);
-
- if (result == kLazyParsingAborted) {
- bookmark.Apply();
- // Trigger eager (re-)parsing, just below this block.
- is_lazy_top_level_function = false;
-
- // This is probably an initialization function. Inform the compiler it
- // should also eager-compile this function, and that we expect it to
- // be used once.
- eager_compile_hint = FunctionLiteral::kShouldEagerCompile;
- should_be_used_once_hint = true;
- }
- }
- if (!is_lazy_top_level_function) {
+ LazyParsingResult result =
+ impl()->SkipFunction(kind, formal_parameters.scope,
+ &dummy_num_parameters, false, false, CHECK_OK);
+ DCHECK_NE(result, kLazyParsingAborted);
+ USE(result);
+ formal_parameters.scope->ResetAfterPreparsing(ast_value_factory_,
+ false);
+
+ } else {
Consume(Token::LBRACE);
body = impl()->NewStatementList(8);
impl()->ParseFunctionBody(body, impl()->EmptyIdentifier(),
@@ -4339,24 +4399,30 @@ template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
IdentifierT name, Scanner::Location class_name_location,
bool name_is_strict_reserved, int class_token_pos, bool* ok) {
+ bool is_anonymous = impl()->IsEmptyIdentifier(name);
+
// All parts of a ClassDeclaration and ClassExpression are strict code.
- if (name_is_strict_reserved) {
- impl()->ReportMessageAt(class_name_location,
- MessageTemplate::kUnexpectedStrictReserved);
- *ok = false;
- return impl()->EmptyExpression();
- }
- if (impl()->IsEvalOrArguments(name)) {
- impl()->ReportMessageAt(class_name_location,
- MessageTemplate::kStrictEvalArguments);
- *ok = false;
- return impl()->EmptyExpression();
+ if (!is_anonymous) {
+ if (name_is_strict_reserved) {
+ impl()->ReportMessageAt(class_name_location,
+ MessageTemplate::kUnexpectedStrictReserved);
+ *ok = false;
+ return impl()->EmptyExpression();
+ }
+ if (impl()->IsEvalOrArguments(name)) {
+ impl()->ReportMessageAt(class_name_location,
+ MessageTemplate::kStrictEvalArguments);
+ *ok = false;
+ return impl()->EmptyExpression();
+ }
}
- BlockState block_state(zone(), &scope_);
+ Scope* block_scope = NewScope(BLOCK_SCOPE);
+ BlockState block_state(&scope_, block_scope);
RaiseLanguageMode(STRICT);
ClassInfo class_info(this);
+ class_info.is_anonymous = is_anonymous;
impl()->DeclareClassVariable(name, &class_info, class_token_pos, CHECK_OK);
scope()->set_start_position(scanner()->location().end_pos);
@@ -4401,7 +4467,10 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseClassLiteral(
}
Expect(Token::RBRACE, CHECK_OK);
- return impl()->RewriteClassLiteral(name, &class_info, class_token_pos, ok);
+ int end_pos = scanner()->location().end_pos;
+ block_scope->set_end_position(end_pos);
+ return impl()->RewriteClassLiteral(block_scope, name, &class_info,
+ class_token_pos, end_pos, ok);
}
template <typename Impl>
@@ -4452,6 +4521,10 @@ ParserBase<Impl>::ParseAsyncFunctionLiteral(bool* ok) {
if (impl()->ParsingDynamicFunctionDeclaration()) {
// We don't want dynamic functions to actually declare their name
// "anonymous". We just want that name in the toString().
+ if (stack_overflow()) {
+ *ok = false;
+ return impl()->EmptyExpression();
+ }
Consume(Token::IDENTIFIER);
DCHECK(scanner()->CurrentMatchesContextual(Token::ANONYMOUS));
} else if (peek_any_identifier()) {
@@ -5460,6 +5533,8 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseTryStatement(
template <typename Impl>
typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
ZoneList<const AstRawString*>* labels, bool* ok) {
+ typename FunctionState::FunctionOrEvalRecordingScope recording_scope(
+ function_state_);
int stmt_pos = peek_position();
ForInfo for_info(this);
bool bound_names_are_lexical = false;
@@ -5469,7 +5544,6 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseForStatement(
Expect(Token::FOR, CHECK_OK);
Expect(Token::LPAREN, CHECK_OK);
scope()->set_start_position(scanner()->location().beg_pos);
- scope()->set_is_hidden();
StatementT init = impl()->NullStatement();
@@ -5527,6 +5601,7 @@ typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseForEachStatementWithDeclarations(
int stmt_pos, ForInfo* for_info, ZoneList<const AstRawString*>* labels,
bool* ok) {
+ scope()->set_is_hidden();
// Just one declaration followed by in/of.
if (for_info->parsing_result.declarations.length() != 1) {
impl()->ReportMessageAt(for_info->parsing_result.bindings_loc,
@@ -5607,6 +5682,7 @@ typename ParserBase<Impl>::StatementT
ParserBase<Impl>::ParseForEachStatementWithoutDeclarations(
int stmt_pos, ExpressionT expression, int lhs_beg_pos, int lhs_end_pos,
ForInfo* for_info, ZoneList<const AstRawString*>* labels, bool* ok) {
+ scope()->set_is_hidden();
// Initializer is reference followed by in/of.
if (!expression->IsArrayLiteral() && !expression->IsObjectLiteral()) {
expression = impl()->CheckAndRewriteReferenceExpression(
@@ -5690,15 +5766,15 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStandardForLoop(
body = ParseStatement(nullptr, CHECK_OK);
}
- if (bound_names_are_lexical && for_info->bound_names.length() > 0) {
- auto result = impl()->DesugarLexicalBindingsInForStatement(
+ scope()->set_end_position(scanner()->location().end_pos);
+ inner_scope->set_end_position(scanner()->location().end_pos);
+ if (bound_names_are_lexical && for_info->bound_names.length() > 0 &&
+ (is_resumable() || function_state_->contains_function_or_eval())) {
+ scope()->set_is_hidden();
+ return impl()->DesugarLexicalBindingsInForStatement(
loop, init, cond, next, body, inner_scope, *for_info, CHECK_OK);
- scope()->set_end_position(scanner()->location().end_pos);
- inner_scope->set_end_position(scanner()->location().end_pos);
- return result;
}
- scope()->set_end_position(scanner()->location().end_pos);
Scope* for_scope = scope()->FinalizeBlockScope();
if (for_scope != nullptr) {
// Rewrite a for statement of the form
@@ -5722,6 +5798,7 @@ typename ParserBase<Impl>::StatementT ParserBase<Impl>::ParseStandardForLoop(
BlockT block = factory()->NewBlock(nullptr, 2, false, kNoSourcePosition);
if (!impl()->IsNullStatement(init)) {
block->statements()->Add(init, zone());
+ init = impl()->NullStatement();
}
block->statements()->Add(loop, zone());
block->set_scope(for_scope);
diff --git a/deps/v8/src/parsing/parser.cc b/deps/v8/src/parsing/parser.cc
index 6a2c2db0d1..a4f40dda53 100644
--- a/deps/v8/src/parsing/parser.cc
+++ b/deps/v8/src/parsing/parser.cc
@@ -4,6 +4,7 @@
#include "src/parsing/parser.h"
+#include <algorithm>
#include <memory>
#include "src/api.h"
@@ -14,6 +15,7 @@
#include "src/bailout-reason.h"
#include "src/base/platform/platform.h"
#include "src/char-predicates-inl.h"
+#include "src/compiler-dispatcher/compiler-dispatcher.h"
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/parsing/duplicate-finder.h"
@@ -109,8 +111,7 @@ int ParseData::FunctionsSize() {
class DiscardableZoneScope {
public:
DiscardableZoneScope(Parser* parser, Zone* temp_zone, bool use_temp_zone)
- : ast_node_factory_scope_(parser->factory(), temp_zone, use_temp_zone),
- fni_(parser->ast_value_factory_, temp_zone),
+ : fni_(parser->ast_value_factory_, temp_zone),
parser_(parser),
prev_fni_(parser->fni_),
prev_zone_(parser->zone_),
@@ -122,6 +123,7 @@ class DiscardableZoneScope {
parser_->temp_zoned_ = true;
parser_->fni_ = &fni_;
parser_->zone_ = temp_zone;
+ parser_->factory()->set_zone(temp_zone);
if (parser_->reusable_preparser_ != nullptr) {
parser_->reusable_preparser_->zone_ = temp_zone;
parser_->reusable_preparser_->factory()->set_zone(temp_zone);
@@ -131,18 +133,17 @@ class DiscardableZoneScope {
void Reset() {
parser_->fni_ = prev_fni_;
parser_->zone_ = prev_zone_;
+ parser_->factory()->set_zone(prev_zone_);
parser_->allow_lazy_ = prev_allow_lazy_;
parser_->temp_zoned_ = prev_temp_zoned_;
if (parser_->reusable_preparser_ != nullptr) {
parser_->reusable_preparser_->zone_ = prev_zone_;
parser_->reusable_preparser_->factory()->set_zone(prev_zone_);
}
- ast_node_factory_scope_.Reset();
}
~DiscardableZoneScope() { Reset(); }
private:
- AstNodeFactory::BodyScope ast_node_factory_scope_;
FuncNameInferrer fni_;
Parser* parser_;
FuncNameInferrer* prev_fni_;
@@ -250,60 +251,56 @@ bool Parser::ShortcutNumericLiteralBinaryExpression(Expression** x,
y->AsLiteral() && y->AsLiteral()->raw_value()->IsNumber()) {
double x_val = (*x)->AsLiteral()->raw_value()->AsNumber();
double y_val = y->AsLiteral()->raw_value()->AsNumber();
- bool x_has_dot = (*x)->AsLiteral()->raw_value()->ContainsDot();
- bool y_has_dot = y->AsLiteral()->raw_value()->ContainsDot();
- bool has_dot = x_has_dot || y_has_dot;
switch (op) {
case Token::ADD:
- *x = factory()->NewNumberLiteral(x_val + y_val, pos, has_dot);
+ *x = factory()->NewNumberLiteral(x_val + y_val, pos);
return true;
case Token::SUB:
- *x = factory()->NewNumberLiteral(x_val - y_val, pos, has_dot);
+ *x = factory()->NewNumberLiteral(x_val - y_val, pos);
return true;
case Token::MUL:
- *x = factory()->NewNumberLiteral(x_val * y_val, pos, has_dot);
+ *x = factory()->NewNumberLiteral(x_val * y_val, pos);
return true;
case Token::DIV:
- *x = factory()->NewNumberLiteral(x_val / y_val, pos, has_dot);
+ *x = factory()->NewNumberLiteral(x_val / y_val, pos);
return true;
case Token::BIT_OR: {
int value = DoubleToInt32(x_val) | DoubleToInt32(y_val);
- *x = factory()->NewNumberLiteral(value, pos, has_dot);
+ *x = factory()->NewNumberLiteral(value, pos);
return true;
}
case Token::BIT_AND: {
int value = DoubleToInt32(x_val) & DoubleToInt32(y_val);
- *x = factory()->NewNumberLiteral(value, pos, has_dot);
+ *x = factory()->NewNumberLiteral(value, pos);
return true;
}
case Token::BIT_XOR: {
int value = DoubleToInt32(x_val) ^ DoubleToInt32(y_val);
- *x = factory()->NewNumberLiteral(value, pos, has_dot);
+ *x = factory()->NewNumberLiteral(value, pos);
return true;
}
case Token::SHL: {
int value = DoubleToInt32(x_val) << (DoubleToInt32(y_val) & 0x1f);
- *x = factory()->NewNumberLiteral(value, pos, has_dot);
+ *x = factory()->NewNumberLiteral(value, pos);
return true;
}
case Token::SHR: {
uint32_t shift = DoubleToInt32(y_val) & 0x1f;
uint32_t value = DoubleToUint32(x_val) >> shift;
- *x = factory()->NewNumberLiteral(value, pos, has_dot);
+ *x = factory()->NewNumberLiteral(value, pos);
return true;
}
case Token::SAR: {
uint32_t shift = DoubleToInt32(y_val) & 0x1f;
int value = ArithmeticShiftRight(DoubleToInt32(x_val), shift);
- *x = factory()->NewNumberLiteral(value, pos, has_dot);
+ *x = factory()->NewNumberLiteral(value, pos);
return true;
}
case Token::EXP: {
double value = Pow(x_val, y_val);
int int_value = static_cast<int>(value);
*x = factory()->NewNumberLiteral(
- int_value == value && value != -0.0 ? int_value : value, pos,
- has_dot);
+ int_value == value && value != -0.0 ? int_value : value, pos);
return true;
}
default:
@@ -325,15 +322,13 @@ Expression* Parser::BuildUnaryExpression(Expression* expression,
} else if (literal->IsNumber()) {
// Compute some expressions involving only number literals.
double value = literal->AsNumber();
- bool has_dot = literal->ContainsDot();
switch (op) {
case Token::ADD:
return expression;
case Token::SUB:
- return factory()->NewNumberLiteral(-value, pos, has_dot);
+ return factory()->NewNumberLiteral(-value, pos);
case Token::BIT_NOT:
- return factory()->NewNumberLiteral(~DoubleToInt32(value), pos,
- has_dot);
+ return factory()->NewNumberLiteral(~DoubleToInt32(value), pos);
default:
break;
}
@@ -342,7 +337,7 @@ Expression* Parser::BuildUnaryExpression(Expression* expression,
// Desugar '+foo' => 'foo*1'
if (op == Token::ADD) {
return factory()->NewBinaryOperation(
- Token::MUL, expression, factory()->NewNumberLiteral(1, pos, true), pos);
+ Token::MUL, expression, factory()->NewNumberLiteral(1, pos), pos);
}
// The same idea for '-foo' => 'foo*(-1)'.
if (op == Token::SUB) {
@@ -431,9 +426,8 @@ Literal* Parser::ExpressionFromLiteral(Token::Value token, int pos) {
return factory()->NewSmiLiteral(value, pos);
}
case Token::NUMBER: {
- bool has_dot = scanner()->ContainsDot();
double value = scanner()->DoubleValue();
- return factory()->NewNumberLiteral(value, pos, has_dot);
+ return factory()->NewNumberLiteral(value, pos);
}
default:
DCHECK(false);
@@ -503,7 +497,8 @@ Expression* Parser::NewV8Intrinsic(const AstRawString* name,
Parser::Parser(ParseInfo* info)
: ParserBase<Parser>(info->zone(), &scanner_, info->stack_limit(),
info->extension(), info->ast_value_factory(),
- info->runtime_call_stats(), true),
+ info->runtime_call_stats(),
+ info->preparsed_scope_data(), true),
scanner_(info->unicode_cache()),
reusable_preparser_(nullptr),
mode_(PARSE_EAGERLY), // Lazy mode must be set explicitly.
@@ -513,7 +508,6 @@ Parser::Parser(ParseInfo* info)
total_preparse_skipped_(0),
temp_zoned_(false),
log_(nullptr),
- preparsed_scope_data_(info->preparsed_scope_data()),
parameters_end_pos_(info->parameters_end_pos()) {
// Even though we were passed ParseInfo, we should not store it in
// Parser - this makes sure that Isolate is not accidentally accessed via
@@ -621,25 +615,15 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
source = String::Flatten(source);
FunctionLiteral* result;
+ if (FLAG_use_parse_tasks) {
+ source_ = source;
+ compiler_dispatcher_ = isolate->compiler_dispatcher();
+ main_parse_info_ = info;
+ }
+
{
std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(source));
- if (FLAG_use_parse_tasks) {
- // FIXME(wiktorg) make it useful for something
- // TODO(wiktorg) make preparser work also with modules
- if (!info->is_module()) {
- scanner_.Initialize(stream.get());
- // NOTE: Some features will be double counted - once here and one more
- // time while being fully parsed by a parse task.
- PreParser::PreParseResult result =
- reusable_preparser()->PreParseProgram(false, use_counts_);
- if (result == PreParser::kPreParseStackOverflow) {
- set_stack_overflow();
- return nullptr;
- }
- stream->Seek(0);
- }
- }
- scanner_.Initialize(stream.get());
+ scanner_.Initialize(stream.get(), info->is_module());
result = DoParseProgram(info);
}
if (result != NULL) {
@@ -647,6 +631,14 @@ FunctionLiteral* Parser::ParseProgram(Isolate* isolate, ParseInfo* info) {
}
HandleSourceURLComments(isolate, info->script());
+ if (FLAG_use_parse_tasks) {
+ compiler_dispatcher_->FinishAllNow();
+ StitchAst(info, isolate);
+ source_ = Handle<String>();
+ compiler_dispatcher_ = nullptr;
+ main_parse_info_ = nullptr;
+ }
+
if (FLAG_trace_parse && result != nullptr) {
double ms = timer.Elapsed().InMillisecondsF();
if (info->is_eval()) {
@@ -805,8 +797,9 @@ FunctionLiteral* Parser::ParseFunction(Isolate* isolate, ParseInfo* info) {
std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
source, shared_info->start_position(), shared_info->end_position()));
Handle<String> name(String::cast(shared_info->name()));
- scanner_.Initialize(stream.get());
- result = DoParseFunction(info, ast_value_factory()->GetString(name));
+ scanner_.Initialize(stream.get(), info->is_module());
+ info->set_function_name(ast_value_factory()->GetString(name));
+ result = DoParseFunction(info);
if (result != nullptr) {
Handle<String> inferred_name(shared_info->inferred_name());
result->set_inferred_name(inferred_name);
@@ -835,8 +828,25 @@ static FunctionLiteral::FunctionType ComputeFunctionType(ParseInfo* info) {
return FunctionLiteral::kAnonymousExpression;
}
-FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
- const AstRawString* raw_name) {
+FunctionLiteral* Parser::DoParseFunction(ParseInfo* info) {
+ const AstRawString* raw_name = info->function_name();
+ FunctionNameValidity function_name_validity = kSkipFunctionNameCheck;
+ if (!raw_name) {
+ bool ok = true;
+ if (peek() == Token::LPAREN) {
+ const AstRawString* variable_name;
+ impl()->GetDefaultStrings(&raw_name, &variable_name);
+ } else {
+ bool is_strict_reserved = true;
+ raw_name = ParseIdentifierOrStrictReservedWord(info->function_kind(),
+ &is_strict_reserved, &ok);
+ if (!ok) return nullptr;
+ function_name_validity = is_strict_reserved
+ ? kFunctionNameIsStrictReserved
+ : kFunctionNameValidityUnknown;
+ }
+ }
+
DCHECK_NOT_NULL(raw_name);
DCHECK_NULL(scope_);
DCHECK_NULL(target_stack_);
@@ -955,7 +965,7 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
info->start_position(), info->end_position());
} else {
result = ParseFunctionLiteral(
- raw_name, Scanner::Location::invalid(), kSkipFunctionNameCheck, kind,
+ raw_name, Scanner::Location::invalid(), function_name_validity, kind,
kNoSourcePosition, function_type, info->language_mode(), &ok);
}
// Make sure the results agree.
@@ -966,8 +976,6 @@ FunctionLiteral* Parser::DoParseFunction(ParseInfo* info,
DCHECK_NULL(target_stack_);
DCHECK_IMPLIES(result,
info->function_literal_id() == result->function_literal_id());
- DCHECK_IMPLIES(!info->scope_info_is_empty() && result,
- info->calls_eval() == result->scope()->calls_eval());
return result;
}
@@ -2605,11 +2613,17 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
DCHECK_IMPLIES(parse_lazily(), allow_lazy_);
DCHECK_IMPLIES(parse_lazily(), extension_ == nullptr);
- bool can_preparse = parse_lazily() &&
- eager_compile_hint == FunctionLiteral::kShouldLazyCompile;
-
- bool is_lazy_top_level_function =
- can_preparse && impl()->AllowsLazyParsingWithoutUnresolvedVariables();
+ const bool source_is_external =
+ !source_.is_null() && (source_->IsExternalTwoByteString() ||
+ source_->IsExternalOneByteString());
+ const bool is_lazy =
+ eager_compile_hint == FunctionLiteral::kShouldLazyCompile;
+ const bool is_top_level =
+ impl()->AllowsLazyParsingWithoutUnresolvedVariables();
+ const bool is_lazy_top_level_function = is_lazy && is_top_level;
+ const bool is_lazy_inner_function = is_lazy && !is_top_level;
+ const bool is_eager_top_level_function = !is_lazy && is_top_level;
+ const bool is_declaration = function_type == FunctionLiteral::kDeclaration;
RuntimeCallTimerScope runtime_timer(
runtime_call_stats_,
@@ -2633,22 +2647,19 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// Inner functions will be parsed using a temporary Zone. After parsing, we
// will migrate unresolved variable into a Scope in the main Zone.
- // TODO(marja): Refactor parsing modes: simplify this.
- bool use_temp_zone =
- (FLAG_aggressive_lazy_inner_functions
- ? can_preparse
- : (is_lazy_top_level_function ||
- (parse_lazily() &&
- function_type == FunctionLiteral::kDeclaration &&
- eager_compile_hint == FunctionLiteral::kShouldLazyCompile)));
-
- DCHECK_IMPLIES(
- (is_lazy_top_level_function ||
- (parse_lazily() && function_type == FunctionLiteral::kDeclaration &&
- eager_compile_hint == FunctionLiteral::kShouldLazyCompile)),
- can_preparse);
- bool is_lazy_inner_function =
- use_temp_zone && FLAG_lazy_inner_functions && !is_lazy_top_level_function;
+
+ const bool should_preparse_inner =
+ parse_lazily() && FLAG_lazy_inner_functions && is_lazy_inner_function &&
+ (is_declaration || FLAG_aggressive_lazy_inner_functions);
+
+ bool should_use_parse_task =
+ FLAG_use_parse_tasks && parse_lazily() && compiler_dispatcher_ &&
+ is_eager_top_level_function && source_is_external;
+
+ // This may be modified later to reflect preparsing decision taken
+ bool should_preparse = (parse_lazily() && (is_lazy_top_level_function ||
+ should_use_parse_task)) ||
+ should_preparse_inner;
ZoneList<Statement*>* body = nullptr;
int expected_property_count = -1;
@@ -2658,14 +2669,40 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
bool has_duplicate_parameters = false;
int function_literal_id = GetNextFunctionLiteralId();
+ Expect(Token::LPAREN, CHECK_OK);
+
+ if (should_use_parse_task) {
+ int start_pos = scanner()->location().beg_pos;
+ if (function_name_location.IsValid()) {
+ start_pos = function_name_location.beg_pos;
+ }
+ // Warning!
+ // Only sets fields in compiler_hints that are currently used.
+ int compiler_hints = SharedFunctionInfo::FunctionKindBits::encode(kind);
+ if (function_type == FunctionLiteral::kDeclaration) {
+ compiler_hints |= 1 << SharedFunctionInfo::kIsDeclaration;
+ }
+ should_use_parse_task = compiler_dispatcher_->Enqueue(
+ source_, start_pos, source_->length(), language_mode,
+ function_literal_id, allow_natives(), parsing_module_,
+ function_type == FunctionLiteral::kNamedExpression, compiler_hints,
+ main_parse_info_, nullptr);
+ if (V8_UNLIKELY(FLAG_trace_parse_tasks)) {
+ PrintF("Spining off task for function at %d: %s\n", start_pos,
+ should_use_parse_task ? "SUCCESS" : "FAILED");
+ }
+ if (!should_use_parse_task) {
+ should_preparse = false;
+ }
+ }
+
Zone* outer_zone = zone();
DeclarationScope* scope;
{
// Temporary zones can nest. When we migrate free variables (see below), we
// need to recreate them in the previous Zone.
- AstNodeFactory previous_zone_ast_node_factory(ast_value_factory());
- previous_zone_ast_node_factory.set_zone(zone());
+ AstNodeFactory previous_zone_ast_node_factory(ast_value_factory(), zone());
// Open a new zone scope, which sets our AstNodeFactory to allocate in the
// new temporary zone if the preconditions are satisfied, and ensures that
@@ -2673,7 +2710,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// to do scope analysis correctly after full parsing, we migrate needed
// information when the function is parsed.
Zone temp_zone(zone()->allocator(), ZONE_NAME);
- DiscardableZoneScope zone_scope(this, &temp_zone, use_temp_zone);
+ DiscardableZoneScope zone_scope(this, &temp_zone, should_preparse);
// This Scope lives in the main zone. We'll migrate data into that zone
// later.
@@ -2681,10 +2718,8 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
SetLanguageMode(scope, language_mode);
#ifdef DEBUG
scope->SetScopeName(function_name);
- if (use_temp_zone) scope->set_needs_migration();
+ if (should_preparse) scope->set_needs_migration();
#endif
-
- Expect(Token::LPAREN, CHECK_OK);
scope->set_start_position(scanner()->location().beg_pos);
// Eager or lazy parse? If is_lazy_top_level_function, we'll parse
@@ -2692,19 +2727,19 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
// abort lazy parsing if it suspects that wasn't a good idea. If so (in
// which case the parser is expected to have backtracked), or if we didn't
// try to lazy parse in the first place, we'll have to parse eagerly.
- if (is_lazy_top_level_function || is_lazy_inner_function) {
+ if (should_preparse) {
+ DCHECK(parse_lazily());
+ DCHECK(is_lazy_top_level_function || is_lazy_inner_function ||
+ should_use_parse_task);
Scanner::BookmarkScope bookmark(scanner());
bookmark.Set();
- LazyParsingResult result = SkipFunction(
- kind, scope, &num_parameters, &function_length,
- is_lazy_inner_function, is_lazy_top_level_function, CHECK_OK);
+ LazyParsingResult result =
+ SkipFunction(kind, scope, &num_parameters, is_lazy_inner_function,
+ is_lazy_top_level_function, CHECK_OK);
if (result == kLazyParsingAborted) {
DCHECK(is_lazy_top_level_function);
bookmark.Apply();
- // Trigger eager (re-)parsing, just below this block.
- is_lazy_top_level_function = false;
-
// This is probably an initialization function. Inform the compiler it
// should also eager-compile this function, and that we expect it to be
// used once.
@@ -2712,48 +2747,45 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
should_be_used_once_hint = true;
scope->ResetAfterPreparsing(ast_value_factory(), true);
zone_scope.Reset();
- use_temp_zone = false;
+ // Trigger eager (re-)parsing, just below this block.
+ should_preparse = false;
+ should_use_parse_task = false;
}
}
- if (!is_lazy_top_level_function && !is_lazy_inner_function) {
+ if (should_preparse) {
+ scope->AnalyzePartially(&previous_zone_ast_node_factory,
+ preparsed_scope_data_);
+ } else {
body = ParseFunction(function_name, pos, kind, function_type, scope,
&num_parameters, &function_length,
&has_duplicate_parameters, &expected_property_count,
CHECK_OK);
}
- DCHECK(use_temp_zone || !is_lazy_top_level_function);
- if (use_temp_zone) {
- // If the preconditions are correct the function body should never be
- // accessed, but do this anyway for better behaviour if they're wrong.
- body = nullptr;
- scope->AnalyzePartially(&previous_zone_ast_node_factory,
- preparsed_scope_data_);
- }
-
- DCHECK_IMPLIES(use_temp_zone, temp_zoned_);
- if (FLAG_trace_preparse) {
+ DCHECK_EQ(should_preparse, temp_zoned_);
+ if (V8_UNLIKELY(FLAG_trace_preparse)) {
PrintF(" [%s]: %i-%i %.*s\n",
- is_lazy_top_level_function
- ? "Preparse no-resolution"
- : (temp_zoned_ ? "Preparse resolution" : "Full parse"),
+ should_preparse ? (is_top_level ? "Preparse no-resolution"
+ : "Preparse resolution")
+ : "Full parse",
scope->start_position(), scope->end_position(),
function_name->byte_length(), function_name->raw_data());
}
if (V8_UNLIKELY(FLAG_runtime_stats)) {
- if (is_lazy_top_level_function) {
- RuntimeCallStats::CorrectCurrentCounterId(
- runtime_call_stats_,
- parsing_on_main_thread_
- ? &RuntimeCallStats::PreParseNoVariableResolution
- : &RuntimeCallStats::PreParseBackgroundNoVariableResolution);
- } else if (temp_zoned_) {
- RuntimeCallStats::CorrectCurrentCounterId(
- runtime_call_stats_,
+ if (should_preparse) {
+ RuntimeCallStats::CounterId counter_id =
parsing_on_main_thread_
? &RuntimeCallStats::PreParseWithVariableResolution
- : &RuntimeCallStats::PreParseBackgroundWithVariableResolution);
+ : &RuntimeCallStats::PreParseBackgroundWithVariableResolution;
+ if (is_top_level) {
+ counter_id =
+ parsing_on_main_thread_
+ ? &RuntimeCallStats::PreParseNoVariableResolution
+ : &RuntimeCallStats::PreParseBackgroundNoVariableResolution;
+ }
+ RuntimeCallStats::CorrectCurrentCounterId(runtime_call_stats_,
+ counter_id);
}
}
@@ -2779,6 +2811,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
function_name, scope, body, expected_property_count, num_parameters,
function_length, duplicate_parameters, function_type, eager_compile_hint,
pos, true, function_literal_id);
+ if (should_use_parse_task) {
+ literals_to_stitch_.emplace_back(function_literal);
+ }
function_literal->set_function_token_position(function_token_pos);
if (should_be_used_once_hint)
function_literal->set_should_be_used_once_hint();
@@ -2790,9 +2825,13 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
return function_literal;
}
-Parser::LazyParsingResult Parser::SkipFunction(
- FunctionKind kind, DeclarationScope* function_scope, int* num_parameters,
- int* function_length, bool is_inner_function, bool may_abort, bool* ok) {
+Parser::LazyParsingResult Parser::SkipFunction(FunctionKind kind,
+ DeclarationScope* function_scope,
+ int* num_parameters,
+ bool is_inner_function,
+ bool may_abort, bool* ok) {
+ FunctionState function_state(&function_state_, &scope_, function_scope);
+
DCHECK_NE(kNoSourcePosition, function_scope->start_position());
DCHECK_EQ(kNoSourcePosition, parameters_end_pos_);
if (produce_cached_parse_data()) CHECK(log_);
@@ -2817,52 +2856,21 @@ Parser::LazyParsingResult Parser::SkipFunction(
scanner()->SeekForward(entry.end_pos() - 1);
Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
*num_parameters = entry.num_parameters();
- *function_length = entry.function_length();
SetLanguageMode(function_scope, entry.language_mode());
if (entry.uses_super_property())
function_scope->RecordSuperPropertyUsage();
- if (entry.calls_eval()) function_scope->RecordEvalCall();
SkipFunctionLiterals(entry.num_inner_functions());
return kLazyParsingComplete;
}
cached_parse_data_->Reject();
}
- if (FLAG_use_parse_tasks && !is_inner_function &&
- reusable_preparser()->preparse_data()) {
- // All top-level functions are already preparsed and parser tasks for eager
- // functions are already created. Use data gathered during the preparse step
- // to skip the function.
- PreParseData::FunctionData data =
- reusable_preparser()->preparse_data()->GetFunctionData(
- function_scope->start_position());
- if (data.is_valid()) {
- if (FLAG_trace_parse_tasks) {
- PrintF("Skipping top level func @ %d : %d using preparse data\n",
- function_scope->start_position(), data.end);
- }
- function_scope->set_end_position(data.end);
- scanner()->SeekForward(data.end - 1);
- Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
- *num_parameters = data.num_parameters;
- *function_length = data.function_length;
- SetLanguageMode(function_scope, data.language_mode);
- if (data.uses_super_property) {
- function_scope->RecordSuperPropertyUsage();
- }
- if (data.calls_eval) {
- function_scope->RecordEvalCall();
- }
- SkipFunctionLiterals(data.num_inner_functions);
- return kLazyParsingComplete;
- }
- }
-
// FIXME(marja): There are 3 ways to skip functions now. Unify them.
if (preparsed_scope_data_->Consuming()) {
- DCHECK(FLAG_preparser_scope_analysis);
+ DCHECK(FLAG_experimental_preparser_scope_analysis);
const PreParseData::FunctionData& data =
- preparsed_scope_data_->FindFunction(function_scope->start_position());
+ preparsed_scope_data_->FindSkippableFunction(
+ function_scope->start_position());
if (data.is_valid()) {
function_scope->set_is_skipped_function(true);
function_scope->outer_scope()->SetMustUsePreParsedScopeData();
@@ -2871,14 +2879,10 @@ Parser::LazyParsingResult Parser::SkipFunction(
scanner()->SeekForward(data.end - 1);
Expect(Token::RBRACE, CHECK_OK_VALUE(kLazyParsingComplete));
*num_parameters = data.num_parameters;
- *function_length = data.function_length;
SetLanguageMode(function_scope, data.language_mode);
if (data.uses_super_property) {
function_scope->RecordSuperPropertyUsage();
}
- if (data.calls_eval) {
- function_scope->RecordEvalCall();
- }
SkipFunctionLiterals(data.num_inner_functions);
return kLazyParsingComplete;
}
@@ -2914,15 +2918,13 @@ Parser::LazyParsingResult Parser::SkipFunction(
total_preparse_skipped_ +=
function_scope->end_position() - function_scope->start_position();
*num_parameters = logger->num_parameters();
- *function_length = logger->function_length();
SkipFunctionLiterals(logger->num_inner_functions());
if (!is_inner_function && produce_cached_parse_data()) {
DCHECK(log_);
- log_->LogFunction(
- function_scope->start_position(), function_scope->end_position(),
- *num_parameters, *function_length, language_mode(),
- function_scope->uses_super_property(), function_scope->calls_eval(),
- logger->num_inner_functions());
+ log_->LogFunction(function_scope->start_position(),
+ function_scope->end_position(), *num_parameters,
+ language_mode(), function_scope->uses_super_property(),
+ logger->num_inner_functions());
}
return kLazyParsingComplete;
}
@@ -3128,22 +3130,6 @@ Block* Parser::BuildRejectPromiseOnException(Block* inner_block) {
return result;
}
-Assignment* Parser::BuildCreateJSGeneratorObject(int pos, FunctionKind kind) {
- // .generator = %CreateJSGeneratorObject(...);
- DCHECK_NOT_NULL(function_state_->generator_object_variable());
- ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
- args->Add(factory()->NewThisFunction(pos), zone());
- args->Add(IsArrowFunction(kind) ? GetLiteralUndefined(pos)
- : ThisExpression(kNoSourcePosition),
- zone());
- Expression* allocation =
- factory()->NewCallRuntime(Runtime::kCreateJSGeneratorObject, args, pos);
- VariableProxy* proxy =
- factory()->NewVariableProxy(function_state_->generator_object_variable());
- return factory()->NewAssignment(Token::INIT, proxy, allocation,
- kNoSourcePosition);
-}
-
Expression* Parser::BuildResolvePromise(Expression* value, int pos) {
// %ResolvePromise(.promise, value), .promise
ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(2, zone());
@@ -3193,13 +3179,17 @@ Variable* Parser::AsyncGeneratorAwaitVariable() {
}
Expression* Parser::BuildInitialYield(int pos, FunctionKind kind) {
- Assignment* assignment = BuildCreateJSGeneratorObject(pos, kind);
- VariableProxy* generator =
+ // We access the generator object twice: once for the {generator}
+ // member of the Suspend AST node, and once for the result of
+ // the initial yield.
+ Expression* yield_result =
+ factory()->NewVariableProxy(function_state_->generator_object_variable());
+ Expression* generator_object =
factory()->NewVariableProxy(function_state_->generator_object_variable());
// The position of the yield is important for reporting the exception
// caused by calling the .throw method on a generator suspended at the
// initial yield (i.e. right after generator instantiation).
- return BuildSuspend(generator, assignment, scope()->start_position(),
+ return BuildSuspend(generator_object, yield_result, scope()->start_position(),
Suspend::kOnExceptionThrow, SuspendFlags::kYield);
}
@@ -3317,7 +3307,7 @@ void Parser::DeclareClassProperty(const AstRawString* class_name,
class_info->properties->Add(property, zone());
}
-// This method rewrites a class literal into a do-expression.
+// This method generates a ClassLiteral AST node.
// It uses the following fields of class_info:
// - constructor (if missing, it updates it with a default constructor)
// - proxy
@@ -3325,13 +3315,13 @@ void Parser::DeclareClassProperty(const AstRawString* class_name,
// - properties
// - has_name_static_property
// - has_static_computed_names
-Expression* Parser::RewriteClassLiteral(const AstRawString* name,
+Expression* Parser::RewriteClassLiteral(Scope* block_scope,
+ const AstRawString* name,
ClassInfo* class_info, int pos,
- bool* ok) {
- int end_pos = scanner()->location().end_pos;
- Block* do_block = factory()->NewBlock(nullptr, 1, false, pos);
- Variable* result_var = NewTemporary(ast_value_factory()->empty_string());
- DoExpression* do_expr = factory()->NewDoExpression(do_block, result_var, pos);
+ int end_pos, bool* ok) {
+ DCHECK_NOT_NULL(block_scope);
+ DCHECK_EQ(block_scope->scope_type(), BLOCK_SCOPE);
+ DCHECK_EQ(block_scope->language_mode(), STRICT);
bool has_extends = class_info->extends != nullptr;
bool has_default_constructor = class_info->constructor == nullptr;
@@ -3340,31 +3330,20 @@ Expression* Parser::RewriteClassLiteral(const AstRawString* name,
DefaultConstructor(name, has_extends, pos, end_pos);
}
- scope()->set_end_position(end_pos);
-
if (name != nullptr) {
DCHECK_NOT_NULL(class_info->proxy);
class_info->proxy->var()->set_initializer_position(end_pos);
}
ClassLiteral* class_literal = factory()->NewClassLiteral(
- class_info->proxy, class_info->extends, class_info->constructor,
- class_info->properties, pos, end_pos,
+ block_scope, class_info->proxy, class_info->extends,
+ class_info->constructor, class_info->properties, pos, end_pos,
class_info->has_name_static_property,
- class_info->has_static_computed_names);
+ class_info->has_static_computed_names, class_info->is_anonymous);
- do_block->statements()->Add(
- factory()->NewExpressionStatement(
- factory()->NewAssignment(Token::ASSIGN,
- factory()->NewVariableProxy(result_var),
- class_literal, kNoSourcePosition),
- pos),
- zone());
- do_block->set_scope(scope()->FinalizeBlockScope());
- do_expr->set_represented_function(class_info->constructor);
AddFunctionForNameInference(class_info->constructor);
- return do_expr;
+ return class_literal;
}
Literal* Parser::GetLiteralUndefined(int position) {
@@ -3503,13 +3482,6 @@ void Parser::UpdateStatistics(Isolate* isolate, Handle<Script> script) {
}
isolate->counters()->total_preparse_skipped()->Increment(
total_preparse_skipped_);
- if (!parsing_on_main_thread_ &&
- FLAG_runtime_stats ==
- v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE) {
- // Copy over the counters from the background thread to the main counters on
- // the isolate.
- isolate->counters()->runtime_call_stats()->Add(runtime_call_stats_);
- }
}
void Parser::ParseOnBackground(ParseInfo* info) {
@@ -3526,10 +3498,6 @@ void Parser::ParseOnBackground(ParseInfo* info) {
compile_options_ = ScriptCompiler::kNoCompileOptions;
}
}
- if (FLAG_runtime_stats) {
- // Create separate runtime stats for background parsing.
- runtime_call_stats_ = new (zone()) RuntimeCallStats();
- }
std::unique_ptr<Utf16CharacterStream> stream;
Utf16CharacterStream* stream_ptr;
@@ -3543,7 +3511,7 @@ void Parser::ParseOnBackground(ParseInfo* info) {
runtime_call_stats_));
stream_ptr = stream.get();
}
- scanner_.Initialize(stream_ptr);
+ scanner_.Initialize(stream_ptr, info->is_module());
DCHECK(info->maybe_outer_scope_info().is_null());
DCHECK(original_scope_);
@@ -3558,12 +3526,7 @@ void Parser::ParseOnBackground(ParseInfo* info) {
fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
result = DoParseProgram(info);
} else {
- const AstRawString* function_name = info->function_name();
- if (!function_name) {
- // FIXME(wiktorg) solve fni in parse tasks
- function_name = ast_value_factory()->empty_string();
- }
- result = DoParseFunction(info, function_name);
+ result = DoParseFunction(info);
}
info->set_literal(result);
@@ -3887,9 +3850,6 @@ void Parser::PrepareAsyncFunctionBody(ZoneList<Statement*>* body,
if (function_state_->generator_object_variable() == nullptr) {
PrepareGeneratorVariables();
}
- body->Add(factory()->NewExpressionStatement(
- BuildCreateJSGeneratorObject(pos, kind), kNoSourcePosition),
- zone());
}
// This method completes the desugaring of the body of async_function.
@@ -4251,7 +4211,7 @@ void Parser::SetFunctionNameFromPropertyName(ObjectLiteralProperty* property,
// Ignore "__proto__" as a name when it's being used to set the [[Prototype]]
// of an object literal.
- if (property->kind() == ObjectLiteralProperty::PROTOTYPE) return;
+ if (property->IsPrototype()) return;
Expression* value = property->value();
@@ -4270,12 +4230,11 @@ void Parser::SetFunctionName(Expression* value, const AstRawString* name) {
DCHECK_NOT_NULL(name);
if (!value->IsAnonymousFunctionDefinition()) return;
auto function = value->AsFunctionLiteral();
+ if (value->IsClassLiteral()) {
+ function = value->AsClassLiteral()->constructor();
+ }
if (function != nullptr) {
function->set_raw_name(ast_value_factory()->NewConsString(name));
- } else {
- DCHECK(value->IsDoExpression());
- value->AsDoExpression()->represented_function()->set_raw_name(
- ast_value_factory()->NewConsString(name));
}
}
@@ -4704,10 +4663,26 @@ Expression* Parser::RewriteYieldStar(Expression* generator,
// while (true) { ... }
// Already defined earlier: WhileStatement* loop = ...
{
- Block* loop_body = factory()->NewBlock(nullptr, 4, false, nopos);
+ Block* loop_body = factory()->NewBlock(nullptr, 5, false, nopos);
loop_body->statements()->Add(switch_mode, zone());
loop_body->statements()->Add(if_done, zone());
loop_body->statements()->Add(set_mode_return, zone());
+
+ if (is_async_generator()) {
+ // AsyncGeneratorYield does not yield the original iterator result,
+ // unlike sync generators. Do `output = output.value`
+ VariableProxy* output_proxy = factory()->NewVariableProxy(var_output);
+ Expression* literal = factory()->NewStringLiteral(
+ ast_value_factory()->value_string(), nopos);
+ Assignment* assign = factory()->NewAssignment(
+ Token::ASSIGN, output_proxy,
+ factory()->NewProperty(factory()->NewVariableProxy(var_output),
+ literal, nopos),
+ nopos);
+ loop_body->statements()->Add(
+ factory()->NewExpressionStatement(assign, nopos), zone());
+ }
+
loop_body->statements()->Add(try_finally, zone());
loop->Initialize(factory()->NewBooleanLiteral(true, nopos), loop_body);
@@ -5191,6 +5166,41 @@ Statement* Parser::FinalizeForOfStatement(ForOfStatement* loop,
return final_loop;
}
+void Parser::StitchAst(ParseInfo* top_level_parse_info, Isolate* isolate) {
+ if (literals_to_stitch_.empty()) return;
+ std::map<int, ParseInfo*> child_infos = top_level_parse_info->child_infos();
+ DCHECK(std::is_sorted(literals_to_stitch_.begin(), literals_to_stitch_.end(),
+ [](FunctionLiteral* a, FunctionLiteral* b) {
+ return a->start_position() < b->start_position();
+ }));
+ auto it = literals_to_stitch_.begin();
+ for (auto& child_info : child_infos) {
+ ParseInfo* result = child_info.second;
+ // If the parse task failed the function will be treated as lazy function
+ // and reparsed before it gets called
+ if (!result) continue;
+ result->UpdateStatisticsAfterBackgroundParse(isolate);
+ if (!result->literal()) continue;
+ while ((*it)->start_position() != child_info.first) {
+ if (++it == literals_to_stitch_.end()) {
+ return;
+ }
+ }
+ FunctionLiteral* literal = *it;
+ // FIXME(wiktorg) better handling of default params for arrow functions
+ Scope* outer_scope = literal->scope()->outer_scope();
+ if (outer_scope->is_declaration_scope() &&
+ outer_scope->AsDeclarationScope()->was_lazily_parsed()) {
+ continue;
+ }
+ // TODO(wiktorg) in the future internalize somewhere else (stitching may be
+ // done on streamer thread)
+ result->ast_value_factory()->Internalize(isolate);
+ literal->ReplaceBodyAndScope(result->literal());
+ literal->SetShouldEagerCompile();
+ }
+}
+
#undef CHECK_OK
#undef CHECK_OK_VOID
#undef CHECK_FAILED
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index 8a970608e7..c51c0eff01 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -35,7 +35,6 @@ class FunctionEntry BASE_EMBEDDED {
kStartPositionIndex,
kEndPositionIndex,
kNumParametersIndex,
- kFunctionLengthIndex,
kFlagsIndex,
kNumInnerFunctionsIndex,
kSize
@@ -49,29 +48,22 @@ class FunctionEntry BASE_EMBEDDED {
class LanguageModeField : public BitField<LanguageMode, 0, 1> {};
class UsesSuperPropertyField
: public BitField<bool, LanguageModeField::kNext, 1> {};
- class CallsEvalField
- : public BitField<bool, UsesSuperPropertyField::kNext, 1> {};
static uint32_t EncodeFlags(LanguageMode language_mode,
- bool uses_super_property, bool calls_eval) {
+ bool uses_super_property) {
return LanguageModeField::encode(language_mode) |
- UsesSuperPropertyField::encode(uses_super_property) |
- CallsEvalField::encode(calls_eval);
+ UsesSuperPropertyField::encode(uses_super_property);
}
int start_pos() const { return backing_[kStartPositionIndex]; }
int end_pos() const { return backing_[kEndPositionIndex]; }
int num_parameters() const { return backing_[kNumParametersIndex]; }
- int function_length() const { return backing_[kFunctionLengthIndex]; }
LanguageMode language_mode() const {
return LanguageModeField::decode(backing_[kFlagsIndex]);
}
bool uses_super_property() const {
return UsesSuperPropertyField::decode(backing_[kFlagsIndex]);
}
- bool calls_eval() const {
- return CallsEvalField::decode(backing_[kFlagsIndex]);
- }
int num_inner_functions() const { return backing_[kNumInnerFunctionsIndex]; }
bool is_valid() const { return !backing_.is_empty(); }
@@ -274,14 +266,15 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
FunctionLiteral* ParseProgram(Isolate* isolate, ParseInfo* info);
FunctionLiteral* ParseFunction(Isolate* isolate, ParseInfo* info);
- FunctionLiteral* DoParseFunction(ParseInfo* info,
- const AstRawString* raw_name);
+ FunctionLiteral* DoParseFunction(ParseInfo* info);
// Called by ParseProgram after setting up the scanner.
FunctionLiteral* DoParseProgram(ParseInfo* info);
void SetCachedData(ParseInfo* info);
+ void StitchAst(ParseInfo* top_level_parse_info, Isolate* isolate);
+
ScriptCompiler::CompileOptions compile_options() const {
return compile_options_;
}
@@ -297,7 +290,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
reusable_preparser_ =
new PreParser(zone(), &scanner_, stack_limit_, ast_value_factory(),
&pending_error_handler_, runtime_call_stats_,
- parsing_on_main_thread_);
+ preparsed_scope_data_, parsing_on_main_thread_);
#define SET_ALLOW(name) reusable_preparser_->set_allow_##name(allow_##name());
SET_ALLOW(natives);
SET_ALLOW(tailcalls);
@@ -380,9 +373,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
ClassLiteralProperty::Kind kind,
bool is_static, bool is_constructor,
ClassInfo* class_info, bool* ok);
- V8_INLINE Expression* RewriteClassLiteral(const AstRawString* name,
+ V8_INLINE Expression* RewriteClassLiteral(Scope* block_scope,
+ const AstRawString* name,
ClassInfo* class_info, int pos,
- bool* ok);
+ int end_pos, bool* ok);
V8_INLINE Statement* DeclareNative(const AstRawString* name, int pos,
bool* ok);
@@ -561,9 +555,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
// in order to force the function to be eagerly parsed, after all.
LazyParsingResult SkipFunction(FunctionKind kind,
DeclarationScope* function_scope,
- int* num_parameters, int* function_length,
- bool is_inner_function, bool may_abort,
- bool* ok);
+ int* num_parameters, bool is_inner_function,
+ bool may_abort, bool* ok);
Block* BuildParameterInitializationBlock(
const ParserFormalParameters& parameters, bool* ok);
@@ -733,7 +726,8 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
V8_INLINE static bool IsIdentifier(Expression* expression) {
DCHECK_NOT_NULL(expression);
VariableProxy* operand = expression->AsVariableProxy();
- return operand != nullptr && !operand->is_this();
+ return operand != nullptr && !operand->is_this() &&
+ !operand->is_new_target();
}
V8_INLINE static const AstRawString* AsIdentifier(Expression* expression) {
@@ -759,7 +753,7 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
V8_INLINE static bool IsBoilerplateProperty(
ObjectLiteral::Property* property) {
- return ObjectLiteral::IsBoilerplateProperty(property);
+ return !property->IsPrototype();
}
V8_INLINE bool IsNative(Expression* expr) const {
@@ -1161,6 +1155,11 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
PreParser* reusable_preparser_;
Mode mode_;
+ std::vector<FunctionLiteral*> literals_to_stitch_;
+ Handle<String> source_;
+ CompilerDispatcher* compiler_dispatcher_ = nullptr;
+ ParseInfo* main_parse_info_ = nullptr;
+
friend class ParserTarget;
friend class ParserTargetScope;
ParserTarget* target_stack_; // for break, continue statements
@@ -1178,8 +1177,6 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
bool temp_zoned_;
ParserLogger* log_;
- PreParsedScopeData* preparsed_scope_data_;
-
// If not kNoSourcePosition, indicates that the first function literal
// encountered is a dynamic function, see CreateDynamicFunction(). This field
// indicates the correct position of the ')' that closes the parameter list.
diff --git a/deps/v8/src/parsing/preparse-data-format.h b/deps/v8/src/parsing/preparse-data-format.h
index e579c66af9..2f317ce75f 100644
--- a/deps/v8/src/parsing/preparse-data-format.h
+++ b/deps/v8/src/parsing/preparse-data-format.h
@@ -14,7 +14,7 @@ struct PreparseDataConstants {
public:
// Layout and constants of the preparse data exchange format.
static const unsigned kMagicNumber = 0xBadDead;
- static const unsigned kCurrentVersion = 16;
+ static const unsigned kCurrentVersion = 17;
static const int kMagicOffset = 0;
static const int kVersionOffset = 1;
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index e9f98b7356..f11eb7b21e 100644
--- a/deps/v8/src/parsing/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -14,15 +14,14 @@ namespace v8 {
namespace internal {
void ParserLogger::LogFunction(int start, int end, int num_parameters,
- int function_length, LanguageMode language_mode,
- bool uses_super_property, bool calls_eval,
+ LanguageMode language_mode,
+ bool uses_super_property,
int num_inner_functions) {
function_store_.Add(start);
function_store_.Add(end);
function_store_.Add(num_parameters);
- function_store_.Add(function_length);
- function_store_.Add(FunctionEntry::EncodeFlags(
- language_mode, uses_super_property, calls_eval));
+ function_store_.Add(
+ FunctionEntry::EncodeFlags(language_mode, uses_super_property));
function_store_.Add(num_inner_functions);
}
diff --git a/deps/v8/src/parsing/preparse-data.h b/deps/v8/src/parsing/preparse-data.h
index 954d89ef7e..b5db652c9c 100644
--- a/deps/v8/src/parsing/preparse-data.h
+++ b/deps/v8/src/parsing/preparse-data.h
@@ -53,14 +53,11 @@ class PreParserLogger final {
PreParserLogger()
: end_(-1),
num_parameters_(-1),
- function_length_(-1),
num_inner_functions_(-1) {}
- void LogFunction(int end, int num_parameters, int function_length,
- int num_inner_functions) {
+ void LogFunction(int end, int num_parameters, int num_inner_functions) {
end_ = end;
num_parameters_ = num_parameters;
- function_length_ = function_length;
num_inner_functions_ = num_inner_functions;
}
@@ -68,16 +65,12 @@ class PreParserLogger final {
int num_parameters() const {
return num_parameters_;
}
- int function_length() const {
- return function_length_;
- }
int num_inner_functions() const { return num_inner_functions_; }
private:
int end_;
// For function entries.
int num_parameters_;
- int function_length_;
int num_inner_functions_;
};
@@ -85,9 +78,9 @@ class ParserLogger final {
public:
ParserLogger();
- void LogFunction(int start, int end, int num_parameters, int function_length,
+ void LogFunction(int start, int end, int num_parameters,
LanguageMode language_mode, bool uses_super_property,
- bool calls_eval, int num_inner_functions);
+ int num_inner_functions);
ScriptData* GetScriptData();
@@ -105,26 +98,24 @@ class PreParseData final {
struct FunctionData {
int end;
int num_parameters;
- int function_length;
int num_inner_functions;
LanguageMode language_mode;
bool uses_super_property : 1;
- bool calls_eval : 1;
- FunctionData() : end(-1) {}
+ FunctionData() : end(kNoSourcePosition) {}
- FunctionData(int end, int num_parameters, int function_length,
- int num_inner_functions, LanguageMode language_mode,
- bool uses_super_property, bool calls_eval)
+ FunctionData(int end, int num_parameters, int num_inner_functions,
+ LanguageMode language_mode, bool uses_super_property)
: end(end),
num_parameters(num_parameters),
- function_length(function_length),
num_inner_functions(num_inner_functions),
language_mode(language_mode),
- uses_super_property(uses_super_property),
- calls_eval(calls_eval) {}
+ uses_super_property(uses_super_property) {}
- bool is_valid() const { return end > 0; }
+ bool is_valid() const {
+ DCHECK_IMPLIES(end < 0, end == kNoSourcePosition);
+ return end != kNoSourcePosition;
+ }
};
FunctionData GetFunctionData(int start) const;
diff --git a/deps/v8/src/parsing/preparsed-scope-data.cc b/deps/v8/src/parsing/preparsed-scope-data.cc
index c8deb06bc7..c8ea3de22a 100644
--- a/deps/v8/src/parsing/preparsed-scope-data.cc
+++ b/deps/v8/src/parsing/preparsed-scope-data.cc
@@ -20,7 +20,7 @@ class VariableMaybeAssignedField
class VariableContextAllocatedField
: public BitField16<bool, VariableMaybeAssignedField::kNext, 1> {};
-const int kFunctionDataSize = 9;
+const int kFunctionDataSize = 8;
} // namespace
@@ -51,21 +51,20 @@ const int kFunctionDataSize = 9;
void PreParsedScopeData::SaveData(Scope* scope) {
DCHECK(!has_data_);
+ DCHECK_NE(scope->end_position(), kNoSourcePosition);
- if (scope->scope_type() == ScopeType::FUNCTION_SCOPE) {
+ // We're not trying to save data for default constructors because the
+ // PreParser doesn't construct them.
+ DCHECK_IMPLIES(scope->scope_type() == ScopeType::FUNCTION_SCOPE,
+ (scope->AsDeclarationScope()->function_kind() &
+ kDefaultConstructor) == 0);
+
+ if (scope->scope_type() == ScopeType::FUNCTION_SCOPE &&
+ !scope->AsDeclarationScope()->is_arrow_scope()) {
// This cast is OK since we're not going to have more than 2^32 elements in
// the data. FIXME(marja): Implement limits for the data size.
function_data_positions_[scope->start_position()] =
static_cast<uint32_t>(backing_store_.size());
- // FIXME(marja): Fill in the missing fields: function_length +
- // num_inner_functions.
- function_index_.AddFunctionData(
- scope->start_position(),
- PreParseData::FunctionData(
- scope->end_position(), scope->num_parameters(), -1, -1,
- scope->language_mode(),
- scope->AsDeclarationScope()->uses_super_property(),
- scope->calls_eval()));
}
if (!ScopeNeedsData(scope)) {
@@ -80,7 +79,7 @@ void PreParsedScopeData::SaveData(Scope* scope) {
// index is needed for skipping over data for a function scope when we skip
// parsing of the corresponding function.
size_t data_end_index = backing_store_.size();
- backing_store_.push_back(-1);
+ backing_store_.push_back(0);
if (!scope->is_hidden()) {
for (Variable* var : *scope->locals()) {
@@ -92,11 +91,24 @@ void PreParsedScopeData::SaveData(Scope* scope) {
SaveDataForInnerScopes(scope);
- backing_store_[data_end_index] = backing_store_.size();
+ // FIXME(marja): see above.
+ backing_store_[data_end_index] = static_cast<uint32_t>(backing_store_.size());
+}
+
+void PreParsedScopeData::AddSkippableFunction(
+ int start_position, const PreParseData::FunctionData& function_data) {
+ AddFunction(start_position, function_data);
+ skippable_functions_.insert(start_position);
+}
+
+void PreParsedScopeData::AddFunction(
+ int start_position, const PreParseData::FunctionData& function_data) {
+ DCHECK(function_data.is_valid());
+ function_index_.AddFunctionData(start_position, function_data);
}
void PreParsedScopeData::RestoreData(DeclarationScope* scope) const {
- int index = -1;
+ uint32_t index = 0;
DCHECK_EQ(scope->scope_type(), ScopeType::FUNCTION_SCOPE);
@@ -107,7 +119,7 @@ void PreParsedScopeData::RestoreData(DeclarationScope* scope) const {
RestoreData(scope, &index);
}
-void PreParsedScopeData::RestoreData(Scope* scope, int* index_ptr) const {
+void PreParsedScopeData::RestoreData(Scope* scope, uint32_t* index_ptr) const {
// It's possible that scope is not present in the data at all (since PreParser
// doesn't create the corresponding scope). In this case, the Scope won't
// contain any variables for which we need the data.
@@ -115,22 +127,21 @@ void PreParsedScopeData::RestoreData(Scope* scope, int* index_ptr) const {
return;
}
- int& index = *index_ptr;
+ uint32_t& index = *index_ptr;
#ifdef DEBUG
// Data integrity check.
- if (scope->scope_type() == ScopeType::FUNCTION_SCOPE) {
- // FIXME(marja): Compare the missing fields too (function length,
- // num_inner_functions).
+ if (scope->scope_type() == ScopeType::FUNCTION_SCOPE &&
+ !scope->AsDeclarationScope()->is_arrow_scope()) {
const PreParseData::FunctionData& data =
- FindFunction(scope->start_position());
+ function_index_.GetFunctionData(scope->start_position());
+ DCHECK(data.is_valid());
DCHECK_EQ(data.end, scope->end_position());
// FIXME(marja): unify num_parameters too and DCHECK here.
DCHECK_EQ(data.language_mode, scope->language_mode());
DCHECK_EQ(data.uses_super_property,
scope->AsDeclarationScope()->uses_super_property());
- DCHECK_EQ(data.calls_eval, scope->calls_eval());
- int index_from_data = -1;
+ uint32_t index_from_data = 0;
FindFunctionData(scope->start_position(), &index_from_data);
DCHECK_EQ(index_from_data, index);
}
@@ -140,16 +151,19 @@ void PreParsedScopeData::RestoreData(Scope* scope, int* index_ptr) const {
// This scope is a function scope representing a function we want to
// skip. So just skip over its data.
DCHECK(!scope->must_use_preparsed_scope_data());
+ // Check that we're moving forward (not backward) in the data.
+ DCHECK_GT(backing_store_[index + 2], index);
index = backing_store_[index + 2];
return;
}
+ DCHECK_GE(backing_store_.size(), index + 3);
DCHECK_EQ(backing_store_[index++], scope->scope_type());
if (backing_store_[index++]) {
scope->RecordEvalCall();
}
- int data_end_index = backing_store_[index++];
+ uint32_t data_end_index = backing_store_[index++];
USE(data_end_index);
if (!scope->is_hidden()) {
@@ -165,13 +179,14 @@ void PreParsedScopeData::RestoreData(Scope* scope, int* index_ptr) const {
DCHECK_EQ(data_end_index, index);
}
-FixedUint32Array* PreParsedScopeData::Serialize(Isolate* isolate) const {
+Handle<PodArray<uint32_t>> PreParsedScopeData::Serialize(
+ Isolate* isolate) const {
// FIXME(marja): save space by using a byte array and converting
// function_index_ to bytes.
- Handle<JSTypedArray> js_array = isolate->factory()->NewJSTypedArray(
- UINT32_ELEMENTS,
- function_index_.size() * kFunctionDataSize + backing_store_.size() + 1);
- FixedUint32Array* array = FixedUint32Array::cast(js_array->elements());
+ size_t length =
+ function_index_.size() * kFunctionDataSize + backing_store_.size() + 1;
+ Handle<PodArray<uint32_t>> array =
+ PodArray<uint32_t>::New(isolate, static_cast<int>(length), TENURED);
array->set(0, static_cast<uint32_t>(function_index_.size()));
int i = 1;
@@ -183,51 +198,56 @@ FixedUint32Array* PreParsedScopeData::Serialize(Isolate* isolate) const {
array->set(i++, it->second); // position in data
array->set(i++, function_data.end);
array->set(i++, function_data.num_parameters);
- array->set(i++, function_data.function_length);
array->set(i++, function_data.num_inner_functions);
array->set(i++, function_data.language_mode);
array->set(i++, function_data.uses_super_property);
- array->set(i++, function_data.calls_eval);
+ array->set(i++, skippable_functions_.find(item.first) !=
+ skippable_functions_.end());
}
for (size_t j = 0; j < backing_store_.size(); ++j) {
array->set(i++, static_cast<uint32_t>(backing_store_[j]));
}
+ DCHECK_EQ(array->length(), length);
return array;
}
-void PreParsedScopeData::Deserialize(Handle<FixedUint32Array> array) {
+void PreParsedScopeData::Deserialize(PodArray<uint32_t>* array) {
has_data_ = true;
- DCHECK(!array.is_null());
+ DCHECK_NOT_NULL(array);
if (array->length() == 0) {
return;
}
- int function_count = array->get_scalar(0);
+ int function_count = array->get(0);
CHECK(array->length() > function_count * kFunctionDataSize);
if (function_count == 0) {
return;
}
int i = 1;
for (; i < function_count * kFunctionDataSize + 1; i += kFunctionDataSize) {
- int start = array->get_scalar(i);
- function_data_positions_[start] = array->get_scalar(i + 1);
+ int start = array->get(i);
+ function_data_positions_[start] = array->get(i + 1);
function_index_.AddFunctionData(
start, PreParseData::FunctionData(
- array->get_scalar(i + 2), array->get_scalar(i + 3),
- array->get_scalar(i + 4), array->get_scalar(i + 5),
- LanguageMode(array->get_scalar(i + 6)),
- array->get_scalar(i + 7), array->get_scalar(i + 8)));
+ array->get(i + 2), array->get(i + 3), array->get(i + 4),
+ LanguageMode(array->get(i + 5)), array->get(i + 6)));
+ if (array->get(i + 7)) {
+ skippable_functions_.insert(start);
+ }
}
CHECK_EQ(function_index_.size(), function_count);
backing_store_.reserve(array->length() - i);
for (; i < array->length(); ++i) {
- backing_store_.push_back(array->get_scalar(i));
+ backing_store_.push_back(array->get(i));
}
}
-PreParseData::FunctionData PreParsedScopeData::FindFunction(
+PreParseData::FunctionData PreParsedScopeData::FindSkippableFunction(
int start_pos) const {
+ if (skippable_functions_.find(start_pos) == skippable_functions_.end()) {
+ return PreParseData::FunctionData();
+ }
return function_index_.GetFunctionData(start_pos);
}
@@ -252,15 +272,17 @@ void PreParsedScopeData::SaveDataForVariable(Variable* var) {
}
void PreParsedScopeData::RestoreDataForVariable(Variable* var,
- int* index_ptr) const {
- int& index = *index_ptr;
+ uint32_t* index_ptr) const {
+ uint32_t& index = *index_ptr;
#ifdef DEBUG
const AstRawString* name = var->raw_name();
- DCHECK_EQ(backing_store_[index++], name->length());
+ DCHECK_GT(backing_store_.size(), index + name->length());
+ DCHECK_EQ(backing_store_[index++], static_cast<uint32_t>(name->length()));
for (int i = 0; i < name->length(); ++i) {
DCHECK_EQ(backing_store_[index++], name->raw_data()[i]);
}
#endif
+ DCHECK_GT(backing_store_.size(), index);
byte variable_data = backing_store_[index++];
if (VariableIsUsedField::decode(variable_data)) {
var->set_is_used();
@@ -288,7 +310,7 @@ void PreParsedScopeData::SaveDataForInnerScopes(Scope* scope) {
}
void PreParsedScopeData::RestoreDataForInnerScopes(Scope* scope,
- int* index_ptr) const {
+ uint32_t* index_ptr) const {
std::vector<Scope*> scopes;
for (Scope* inner = scope->inner_scope(); inner != nullptr;
inner = inner->sibling()) {
@@ -299,7 +321,8 @@ void PreParsedScopeData::RestoreDataForInnerScopes(Scope* scope,
}
}
-bool PreParsedScopeData::FindFunctionData(int start_pos, int* index) const {
+bool PreParsedScopeData::FindFunctionData(int start_pos,
+ uint32_t* index) const {
auto it = function_data_positions_.find(start_pos);
if (it == function_data_positions_.end()) {
return false;
@@ -310,7 +333,9 @@ bool PreParsedScopeData::FindFunctionData(int start_pos, int* index) const {
bool PreParsedScopeData::ScopeNeedsData(Scope* scope) {
if (scope->scope_type() == ScopeType::FUNCTION_SCOPE) {
- return true;
+ // Default constructors don't need data (they cannot contain inner functions
+ // defined by the user). Other functions do.
+ return !IsDefaultConstructor(scope->AsDeclarationScope()->function_kind());
}
if (!scope->is_hidden()) {
for (Variable* var : *scope->locals()) {
diff --git a/deps/v8/src/parsing/preparsed-scope-data.h b/deps/v8/src/parsing/preparsed-scope-data.h
index fb8acab696..5d4fc3a3a0 100644
--- a/deps/v8/src/parsing/preparsed-scope-data.h
+++ b/deps/v8/src/parsing/preparsed-scope-data.h
@@ -5,6 +5,7 @@
#ifndef V8_PARSING_PREPARSED_SCOPE_DATA_H_
#define V8_PARSING_PREPARSED_SCOPE_DATA_H_
+#include <set>
#include <unordered_map>
#include <vector>
@@ -65,40 +66,60 @@ class PreParsedScopeData {
// subscopes') variables.
void SaveData(Scope* scope);
+ // Save data for a function we might skip later. The data is used later for
+ // creating a FunctionLiteral.
+ void AddSkippableFunction(int start_position,
+ const PreParseData::FunctionData& function_data);
+
+ // Save variable allocation data for function which contains skippable
+ // functions.
+ void AddFunction(int start_position,
+ const PreParseData::FunctionData& function_data);
+
+ // FIXME(marja): We need different kinds of data for the two types of
+ // functions. For a skippable function we need the end position + the data
+ // needed for creating a FunctionLiteral. For a function which contains
+ // skippable functions, we need the data affecting context allocation status
+ // of the variables (but e.g., no end position). Currently we just save the
+ // same data for both. Here we can save less data.
+
// Restores the information needed for allocating the Scopes's (and its
// subscopes') variables.
- void RestoreData(Scope* scope, int* index_ptr) const;
+ void RestoreData(Scope* scope, uint32_t* index_ptr) const;
void RestoreData(DeclarationScope* scope) const;
- FixedUint32Array* Serialize(Isolate* isolate) const;
- void Deserialize(Handle<FixedUint32Array> array);
+ Handle<PodArray<uint32_t>> Serialize(Isolate* isolate) const;
+ void Deserialize(PodArray<uint32_t>* array);
bool Consuming() const { return has_data_; }
bool Producing() const { return !has_data_; }
- PreParseData::FunctionData FindFunction(int start_pos) const;
+ PreParseData::FunctionData FindSkippableFunction(int start_pos) const;
private:
friend class ScopeTestHelper;
void SaveDataForVariable(Variable* var);
- void RestoreDataForVariable(Variable* var, int* index_ptr) const;
+ void RestoreDataForVariable(Variable* var, uint32_t* index_ptr) const;
void SaveDataForInnerScopes(Scope* scope);
- void RestoreDataForInnerScopes(Scope* scope, int* index_ptr) const;
- bool FindFunctionData(int start_pos, int* index) const;
+ void RestoreDataForInnerScopes(Scope* scope, uint32_t* index_ptr) const;
+ bool FindFunctionData(int start_pos, uint32_t* index) const;
static bool ScopeNeedsData(Scope* scope);
static bool IsSkippedFunctionScope(Scope* scope);
// TODO(marja): Make the backing store more efficient once we know exactly
// what data is needed.
- std::vector<byte> backing_store_;
+ std::vector<uint32_t> backing_store_;
- // Start pos -> FunctionData.
+ // Start pos -> FunctionData. Used for creating FunctionLiterals for skipped
+ // functions (when they're actually skipped).
PreParseData function_index_;
// Start pos -> position in backing_store_.
std::unordered_map<uint32_t, uint32_t> function_data_positions_;
+ // Start positions of skippable functions.
+ std::set<uint32_t> skippable_functions_;
bool has_data_ = false;
diff --git a/deps/v8/src/parsing/preparser.cc b/deps/v8/src/parsing/preparser.cc
index 95cf8caefd..c408af88c9 100644
--- a/deps/v8/src/parsing/preparser.cc
+++ b/deps/v8/src/parsing/preparser.cc
@@ -14,6 +14,7 @@
#include "src/parsing/parser-base.h"
#include "src/parsing/preparse-data-format.h"
#include "src/parsing/preparse-data.h"
+#include "src/parsing/preparsed-scope-data.h"
#include "src/parsing/preparser.h"
#include "src/unicode.h"
#include "src/utils.h"
@@ -100,10 +101,8 @@ PreParserIdentifier PreParser::GetSymbol() const {
return symbol;
}
-PreParser::PreParseResult PreParser::PreParseProgram(bool is_module,
- int* use_counts) {
+PreParser::PreParseResult PreParser::PreParseProgram(bool is_module) {
DCHECK_NULL(scope_);
- use_counts_ = use_counts;
DeclarationScope* scope = NewScriptScope();
#ifdef DEBUG
scope->set_is_being_lazily_parsed(true);
@@ -122,7 +121,6 @@ PreParser::PreParseResult PreParser::PreParseProgram(bool is_module,
PreParserStatementList body;
ParseStatementList(body, Token::EOS, &ok);
original_scope_ = nullptr;
- use_counts_ = nullptr;
if (stack_overflow()) return kPreParseStackOverflow;
if (!ok) {
ReportUnexpectedToken(scanner()->current_token());
@@ -213,6 +211,18 @@ PreParser::PreParseResult PreParser::PreParseFunction(
// masks the arguments object. Declare arguments before declaring the
// function var since the arguments object masks 'function arguments'.
function_scope->DeclareArguments(ast_value_factory());
+
+ if (FLAG_experimental_preparser_scope_analysis &&
+ preparsed_scope_data_ != nullptr) {
+ // We're not going to skip this function, but it might contain skippable
+ // functions inside it.
+ preparsed_scope_data_->AddFunction(
+ scope()->start_position(),
+ PreParseData::FunctionData(
+ scanner()->peek_location().end_pos, scope()->num_parameters(),
+ GetLastFunctionLiteralId(), scope()->language_mode(),
+ scope()->AsDeclarationScope()->uses_super_property()));
+ }
}
use_counts_ = nullptr;
@@ -276,9 +286,6 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
runtime_call_stats_,
counters[track_unresolved_variables_][parsing_on_main_thread_]);
- bool is_top_level =
- scope()->AllowsLazyParsingWithoutUnresolvedVariables(original_scope_);
-
DeclarationScope* function_scope = NewFunctionScope(kind);
function_scope->SetLanguageMode(language_mode);
FunctionState function_state(&function_state_, &scope_, function_scope);
@@ -326,24 +333,15 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
CheckStrictOctalLiteral(start_position, end_position, CHECK_OK);
}
- if (FLAG_use_parse_tasks && is_top_level && preparse_data_) {
- preparse_data_->AddFunctionData(
+ if (FLAG_experimental_preparser_scope_analysis &&
+ track_unresolved_variables_ && preparsed_scope_data_ != nullptr) {
+ preparsed_scope_data_->AddSkippableFunction(
start_position,
PreParseData::FunctionData(
- end_position, formals.num_parameters(), formals.function_length,
- GetLastFunctionLiteralId() - func_id, language_mode,
- function_scope->uses_super_property(),
- function_scope->calls_eval()));
- // TODO(wiktorg) spin-off a parse task
- if (FLAG_trace_parse_tasks) {
- PrintF("Saved function at %d to %d with:\n", start_position,
- end_position);
- PrintF("\t- %d params\n", formals.num_parameters());
- PrintF("\t- %d function length\n", formals.function_length);
- PrintF("\t- %d inner-funcs\n", GetLastFunctionLiteralId() - func_id);
- }
+ end_position, scope()->num_parameters(),
+ GetLastFunctionLiteralId() - func_id, scope()->language_mode(),
+ scope()->AsDeclarationScope()->uses_super_property()));
}
-
if (FLAG_trace_preparse) {
PrintF(" [%s]: %i-%i\n",
track_unresolved_variables_ ? "Preparse resolution"
@@ -366,7 +364,7 @@ PreParser::LazyParsingResult PreParser::ParseStatementListAndLogFunction(
int body_end = scanner()->peek_location().end_pos;
DCHECK_EQ(this->scope()->is_function_scope(), formals->is_simple);
log_.LogFunction(body_end, formals->num_parameters(),
- formals->function_length, GetLastFunctionLiteralId());
+ GetLastFunctionLiteralId());
return kLazyParsingComplete;
}
@@ -374,13 +372,9 @@ PreParserExpression PreParser::ExpressionFromIdentifier(
PreParserIdentifier name, int start_position, InferName infer) {
VariableProxy* proxy = nullptr;
if (track_unresolved_variables_) {
- AstNodeFactory factory(ast_value_factory());
- // Setting the Zone is necessary because zone_ might be the temp Zone, and
- // AstValueFactory doesn't know about it.
- factory.set_zone(zone());
DCHECK_NOT_NULL(name.string_);
- proxy = scope()->NewUnresolved(&factory, name.string_, start_position,
- NORMAL_VARIABLE);
+ proxy = scope()->NewUnresolved(factory()->ast_node_factory(), name.string_,
+ start_position, NORMAL_VARIABLE);
}
return PreParserExpression::FromIdentifier(name, proxy, zone());
}
@@ -397,7 +391,7 @@ void PreParser::DeclareAndInitializeVariables(
declaration_descriptor->scope->RemoveUnresolved(variable);
Variable* var = scope()->DeclareVariableName(
variable->raw_name(), declaration_descriptor->mode);
- if (FLAG_preparser_scope_analysis) {
+ if (FLAG_experimental_preparser_scope_analysis) {
MarkLoopVariableAsAssigned(declaration_descriptor->scope, var);
// This is only necessary if there is an initializer, but we don't have
// that information here. Consequently, the preparser sometimes says
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 637efa2655..3bb85f0b20 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -178,6 +178,11 @@ class PreParserExpression {
variables);
}
+ static PreParserExpression NewTargetExpression() {
+ return PreParserExpression(TypeField::encode(kExpression) |
+ ExpressionTypeField::encode(kNewTarget));
+ }
+
static PreParserExpression ObjectLiteral(
ZoneList<VariableProxy*>* variables) {
return PreParserExpression(TypeField::encode(kObjectLiteralExpression),
@@ -358,7 +363,8 @@ class PreParserExpression {
kCallEvalExpression,
kSuperCallReference,
kNoTemplateTagExpression,
- kAssignment
+ kAssignment,
+ kNewTarget
};
explicit PreParserExpression(uint32_t expression_code,
@@ -387,7 +393,7 @@ class PreParserExpression {
// The rest of the bits are interpreted depending on the value
// of the Type field, so they can share the storage.
- typedef BitField<ExpressionType, TypeField::kNext, 3> ExpressionTypeField;
+ typedef BitField<ExpressionType, TypeField::kNext, 4> ExpressionTypeField;
typedef BitField<bool, TypeField::kNext, 1> IsUseStrictField;
typedef BitField<bool, IsUseStrictField::kNext, 1> IsUseAsmField;
typedef BitField<PreParserIdentifier::Type, TypeField::kNext, 10>
@@ -538,11 +544,15 @@ class PreParserStatement {
class PreParserFactory {
public:
- explicit PreParserFactory(AstValueFactory* ast_value_factory)
- : ast_value_factory_(ast_value_factory),
- zone_(ast_value_factory->zone()) {}
+ explicit PreParserFactory(AstValueFactory* ast_value_factory, Zone* zone)
+ : ast_node_factory_(ast_value_factory, zone), zone_(zone) {}
+
+ void set_zone(Zone* zone) {
+ ast_node_factory_.set_zone(zone);
+ zone_ = zone;
+ }
- void set_zone(Zone* zone) { zone_ = zone; }
+ AstNodeFactory* ast_node_factory() { return &ast_node_factory_; }
PreParserExpression NewStringLiteral(PreParserIdentifier identifier,
int pos) {
@@ -551,10 +561,8 @@ class PreParserFactory {
PreParserExpression expression = PreParserExpression::Default();
if (identifier.string_ != nullptr) {
DCHECK(FLAG_lazy_inner_functions);
- AstNodeFactory factory(ast_value_factory_);
- factory.set_zone(zone_);
- VariableProxy* variable =
- factory.NewVariableProxy(identifier.string_, NORMAL_VARIABLE);
+ VariableProxy* variable = ast_node_factory_.NewVariableProxy(
+ identifier.string_, NORMAL_VARIABLE);
expression.AddVariable(variable, zone_);
}
return expression;
@@ -789,7 +797,9 @@ class PreParserFactory {
}
private:
- AstValueFactory* ast_value_factory_;
+ // For creating VariableProxy objects (if
+ // PreParser::track_unresolved_variables_ is used).
+ AstNodeFactory ast_node_factory_;
Zone* zone_;
};
@@ -889,12 +899,12 @@ class PreParser : public ParserBase<PreParser> {
AstValueFactory* ast_value_factory,
PendingCompilationErrorHandler* pending_error_handler,
RuntimeCallStats* runtime_call_stats,
+ PreParsedScopeData* preparsed_scope_data = nullptr,
bool parsing_on_main_thread = true)
: ParserBase<PreParser>(zone, scanner, stack_limit, nullptr,
ast_value_factory, runtime_call_stats,
- parsing_on_main_thread),
+ preparsed_scope_data, parsing_on_main_thread),
use_counts_(nullptr),
- preparse_data_(FLAG_use_parse_tasks ? new PreParseData() : nullptr),
track_unresolved_variables_(false),
pending_error_handler_(pending_error_handler) {}
@@ -906,8 +916,7 @@ class PreParser : public ParserBase<PreParser> {
// success (even if parsing failed, the pre-parse data successfully
// captured the syntax error), and false if a stack-overflow happened
// during parsing.
- PreParseResult PreParseProgram(bool is_module = false,
- int* use_counts = nullptr);
+ PreParseResult PreParseProgram(bool is_module = false);
// Parses a single function literal, from the opening parentheses before
// parameters to the closing brace after the body.
@@ -923,8 +932,6 @@ class PreParser : public ParserBase<PreParser> {
bool track_unresolved_variables,
bool may_abort, int* use_counts);
- const PreParseData* preparse_data() const { return preparse_data_.get(); }
-
private:
// These types form an algebra over syntactic categories that is just
// rich enough to let us recognize and propagate the constructs that
@@ -941,9 +948,11 @@ class PreParser : public ParserBase<PreParser> {
bool AllowsLazyParsingWithoutUnresolvedVariables() const { return false; }
bool parse_lazily() const { return false; }
- V8_INLINE LazyParsingResult SkipFunction(
- FunctionKind kind, DeclarationScope* function_scope, int* num_parameters,
- int* function_length, bool is_inner_function, bool may_abort, bool* ok) {
+ V8_INLINE LazyParsingResult SkipFunction(FunctionKind kind,
+ DeclarationScope* function_scope,
+ int* num_parameters,
+ bool is_inner_function,
+ bool may_abort, bool* ok) {
UNREACHABLE();
return kLazyParsingComplete;
}
@@ -1122,16 +1131,23 @@ class PreParser : public ParserBase<PreParser> {
}
V8_INLINE void DeclareClassVariable(PreParserIdentifier name,
ClassInfo* class_info,
- int class_token_pos, bool* ok) {}
+ int class_token_pos, bool* ok) {
+ if (name.string_ != nullptr) {
+ DCHECK(track_unresolved_variables_);
+ scope()->DeclareVariableName(name.string_, CONST);
+ }
+ }
V8_INLINE void DeclareClassProperty(PreParserIdentifier class_name,
PreParserExpression property,
ClassLiteralProperty::Kind kind,
bool is_static, bool is_constructor,
ClassInfo* class_info, bool* ok) {
}
- V8_INLINE PreParserExpression RewriteClassLiteral(PreParserIdentifier name,
+ V8_INLINE PreParserExpression RewriteClassLiteral(Scope* scope,
+ PreParserIdentifier name,
ClassInfo* class_info,
- int pos, bool* ok) {
+ int pos, int end_pos,
+ bool* ok) {
bool has_default_constructor = !class_info->has_seen_constructor;
// Account for the default constructor.
if (has_default_constructor) GetNextFunctionLiteralId();
@@ -1494,12 +1510,9 @@ class PreParser : public ParserBase<PreParser> {
V8_INLINE PreParserExpression ThisExpression(int pos = kNoSourcePosition) {
ZoneList<VariableProxy*>* variables = nullptr;
if (track_unresolved_variables_) {
- AstNodeFactory factory(ast_value_factory());
- // Setting the Zone is necessary because zone_ might be the temp Zone, and
- // AstValueFactory doesn't know about it.
- factory.set_zone(zone());
VariableProxy* proxy = scope()->NewUnresolved(
- &factory, ast_value_factory()->this_string(), pos, THIS_VARIABLE);
+ factory()->ast_node_factory(), ast_value_factory()->this_string(),
+ pos, THIS_VARIABLE);
variables = new (zone()) ZoneList<VariableProxy*>(1, zone());
variables->Add(proxy, zone());
@@ -1508,15 +1521,34 @@ class PreParser : public ParserBase<PreParser> {
}
V8_INLINE PreParserExpression NewSuperPropertyReference(int pos) {
+ if (track_unresolved_variables_) {
+ scope()->NewUnresolved(factory()->ast_node_factory(),
+ ast_value_factory()->this_function_string(), pos,
+ NORMAL_VARIABLE);
+ scope()->NewUnresolved(factory()->ast_node_factory(),
+ ast_value_factory()->this_string(), pos,
+ THIS_VARIABLE);
+ }
return PreParserExpression::Default();
}
V8_INLINE PreParserExpression NewSuperCallReference(int pos) {
+ if (track_unresolved_variables_) {
+ scope()->NewUnresolved(factory()->ast_node_factory(),
+ ast_value_factory()->this_function_string(), pos,
+ NORMAL_VARIABLE);
+ scope()->NewUnresolved(factory()->ast_node_factory(),
+ ast_value_factory()->new_target_string(), pos,
+ NORMAL_VARIABLE);
+ scope()->NewUnresolved(factory()->ast_node_factory(),
+ ast_value_factory()->this_string(), pos,
+ THIS_VARIABLE);
+ }
return PreParserExpression::SuperCallReference();
}
V8_INLINE PreParserExpression NewTargetExpression(int pos) {
- return PreParserExpression::Default();
+ return PreParserExpression::NewTargetExpression();
}
V8_INLINE PreParserExpression FunctionSentExpression(int pos) {
@@ -1660,7 +1692,6 @@ class PreParser : public ParserBase<PreParser> {
// Preparser's private field members.
int* use_counts_;
- std::unique_ptr<PreParseData> preparse_data_;
bool track_unresolved_variables_;
PreParserLogger log_;
PendingCompilationErrorHandler* pending_error_handler_;
diff --git a/deps/v8/src/parsing/rewriter.cc b/deps/v8/src/parsing/rewriter.cc
index 87359f79ac..00eb29550a 100644
--- a/deps/v8/src/parsing/rewriter.cc
+++ b/deps/v8/src/parsing/rewriter.cc
@@ -24,7 +24,7 @@ class Processor final : public AstVisitor<Processor> {
breakable_(false),
zone_(ast_value_factory->zone()),
closure_scope_(closure_scope),
- factory_(ast_value_factory) {
+ factory_(ast_value_factory, ast_value_factory->zone()) {
DCHECK_EQ(closure_scope, closure_scope->GetClosureScope());
InitializeAstVisitor(stack_limit);
}
@@ -38,7 +38,7 @@ class Processor final : public AstVisitor<Processor> {
breakable_(false),
zone_(ast_value_factory->zone()),
closure_scope_(closure_scope),
- factory_(ast_value_factory) {
+ factory_(ast_value_factory, zone_) {
DCHECK_EQ(closure_scope, closure_scope->GetClosureScope());
InitializeAstVisitor(parser->stack_limit());
}
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index d3162dfbb2..e22308e8d5 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -387,8 +387,10 @@ void Utf8ExternalStreamingStream::SearchPosition(size_t position) {
// checking whether the # bytes in a chunk are equal to the # chars, and if
// so avoid the expensive SkipToPosition.)
bool ascii_only_chunk =
+ chunks_[chunk_no].start.incomplete_char ==
+ unibrow::Utf8::Utf8IncrementalBuffer(0) &&
(chunks_[chunk_no + 1].start.bytes - chunks_[chunk_no].start.bytes) ==
- (chunks_[chunk_no + 1].start.chars - chunks_[chunk_no].start.chars);
+ (chunks_[chunk_no + 1].start.chars - chunks_[chunk_no].start.chars);
if (ascii_only_chunk) {
size_t skip = position - chunks_[chunk_no].start.chars;
current_ = {chunk_no,
@@ -816,16 +818,20 @@ Utf16CharacterStream* ScannerStream::For(Handle<String> data) {
Utf16CharacterStream* ScannerStream::For(Handle<String> data, int start_pos,
int end_pos) {
DCHECK(start_pos >= 0);
+ DCHECK(start_pos <= end_pos);
DCHECK(end_pos <= data->length());
if (data->IsExternalOneByteString()) {
return new ExternalOneByteStringUtf16CharacterStream(
- Handle<ExternalOneByteString>::cast(data), start_pos, end_pos);
+ Handle<ExternalOneByteString>::cast(data),
+ static_cast<size_t>(start_pos), static_cast<size_t>(end_pos));
} else if (data->IsExternalTwoByteString()) {
return new ExternalTwoByteStringUtf16CharacterStream(
- Handle<ExternalTwoByteString>::cast(data), start_pos, end_pos);
+ Handle<ExternalTwoByteString>::cast(data),
+ static_cast<size_t>(start_pos), static_cast<size_t>(end_pos));
} else {
// TODO(vogelheim): Maybe call data.Flatten() first?
- return new GenericStringUtf16CharacterStream(data, start_pos, end_pos);
+ return new GenericStringUtf16CharacterStream(
+ data, static_cast<size_t>(start_pos), static_cast<size_t>(end_pos));
}
}
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index 81839990c6..8dfb74c06a 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -182,16 +182,14 @@ Scanner::Scanner(UnicodeCache* unicode_cache)
octal_message_(MessageTemplate::kNone),
found_html_comment_(false) {}
-void Scanner::Initialize(Utf16CharacterStream* source) {
+void Scanner::Initialize(Utf16CharacterStream* source, bool is_module) {
DCHECK_NOT_NULL(source);
source_ = source;
+ is_module_ = is_module;
// Need to capture identifiers in order to recognize "get" and "set"
// in object literals.
Init();
- // Skip initial whitespace allowing HTML comment ends just like
- // after a newline and scan first token.
has_line_terminator_before_next_ = true;
- SkipWhiteSpace();
Scan();
}
@@ -443,7 +441,7 @@ static inline bool IsLittleEndianByteOrderMark(uc32 c) {
return c == 0xFFFE;
}
-bool Scanner::SkipWhiteSpace() {
+Token::Value Scanner::SkipWhiteSpace() {
int start_position = source_pos();
while (true) {
@@ -481,11 +479,26 @@ bool Scanner::SkipWhiteSpace() {
}
// Treat the rest of the line as a comment.
- SkipSingleLineComment();
+ Token::Value token = SkipSingleHTMLComment();
+ if (token == Token::ILLEGAL) {
+ return token;
+ }
}
// Return whether or not we skipped any characters.
- return source_pos() != start_position;
+ if (source_pos() == start_position) {
+ return Token::ILLEGAL;
+ }
+
+ return Token::WHITESPACE;
+}
+
+Token::Value Scanner::SkipSingleHTMLComment() {
+ if (is_module_) {
+ ReportScannerError(source_pos(), MessageTemplate::kHtmlCommentInModule);
+ return Token::ILLEGAL;
+ }
+ return SkipSingleLineComment();
}
Token::Value Scanner::SkipSingleLineComment() {
@@ -606,7 +619,7 @@ Token::Value Scanner::ScanHtmlComment() {
}
found_html_comment_ = true;
- return SkipSingleLineComment();
+ return SkipSingleHTMLComment();
}
void Scanner::Scan() {
@@ -712,7 +725,7 @@ void Scanner::Scan() {
if (c0_ == '>' && HasAnyLineTerminatorBeforeNext()) {
// For compatibility with SpiderMonkey, we skip lines that
// start with an HTML comment end '-->'.
- token = SkipSingleLineComment();
+ token = SkipSingleHTMLComment();
} else {
token = Token::DEC;
}
@@ -864,10 +877,11 @@ void Scanner::Scan() {
token = ScanIdentifierOrKeyword();
} else if (IsDecimalDigit(c0_)) {
token = ScanNumber(false);
- } else if (SkipWhiteSpace()) {
- token = Token::WHITESPACE;
} else {
- token = Select(Token::ILLEGAL);
+ token = SkipWhiteSpace();
+ if (token == Token::ILLEGAL) {
+ Advance();
+ }
}
break;
}
@@ -1777,13 +1791,6 @@ double Scanner::DoubleValue() {
ALLOW_HEX | ALLOW_OCTAL | ALLOW_IMPLICIT_OCTAL | ALLOW_BINARY);
}
-
-bool Scanner::ContainsDot() {
- DCHECK(is_literal_one_byte());
- Vector<const uint8_t> str = literal_one_byte_string();
- return std::find(str.begin(), str.end(), '.') != str.end();
-}
-
bool Scanner::IsDuplicateSymbol(DuplicateFinder* duplicate_finder,
AstValueFactory* ast_value_factory) const {
DCHECK_NOT_NULL(duplicate_finder);
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 5b3b671523..03b3f316c2 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -197,7 +197,7 @@ class Scanner {
explicit Scanner(UnicodeCache* scanner_contants);
- void Initialize(Utf16CharacterStream* source);
+ void Initialize(Utf16CharacterStream* source, bool is_module);
// Returns the next token and advances input.
Token::Value Next();
@@ -247,7 +247,6 @@ class Scanner {
AstValueFactory* ast_value_factory) const;
double DoubleValue();
- bool ContainsDot();
inline bool CurrentMatches(Token::Value token) const {
DCHECK(Token::IsKeyword(token));
@@ -689,7 +688,8 @@ class Scanner {
// Scans a single JavaScript token.
void Scan();
- bool SkipWhiteSpace();
+ Token::Value SkipWhiteSpace();
+ Token::Value SkipSingleHTMLComment();
Token::Value SkipSingleLineComment();
Token::Value SkipSourceURLComment();
void TryToParseSourceURLComment();
@@ -717,6 +717,8 @@ class Scanner {
template <bool capture_raw>
uc32 ScanUnicodeEscape();
+ bool is_module_;
+
Token::Value ScanTemplateSpan();
// Return the current source position.
diff --git a/deps/v8/src/perf-jit.cc b/deps/v8/src/perf-jit.cc
index 86f4c4d9b4..a45d9b3fd4 100644
--- a/deps/v8/src/perf-jit.cc
+++ b/deps/v8/src/perf-jit.cc
@@ -284,7 +284,7 @@ SourcePositionInfo GetSourcePositionInfo(Handle<Code> code,
void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
// Compute the entry count and get the name of the script.
uint32_t entry_count = 0;
- for (SourcePositionTableIterator iterator(code->source_position_table());
+ for (SourcePositionTableIterator iterator(code->SourcePositionTable());
!iterator.done(); iterator.Advance()) {
entry_count++;
}
@@ -305,7 +305,7 @@ void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
Handle<Code> code_handle(code);
Handle<SharedFunctionInfo> function_handle(shared);
- for (SourcePositionTableIterator iterator(code->source_position_table());
+ for (SourcePositionTableIterator iterator(code->SourcePositionTable());
!iterator.done(); iterator.Advance()) {
SourcePositionInfo info(GetSourcePositionInfo(code_handle, function_handle,
iterator.source_position()));
@@ -320,7 +320,7 @@ void PerfJitLogger::LogWriteDebugInfo(Code* code, SharedFunctionInfo* shared) {
Address code_start = code->instruction_start();
- for (SourcePositionTableIterator iterator(code->source_position_table());
+ for (SourcePositionTableIterator iterator(code->SourcePositionTable());
!iterator.done(); iterator.Advance()) {
SourcePositionInfo info(GetSourcePositionInfo(code_handle, function_handle,
iterator.source_position()));
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
index 9ce247ad38..30a068a466 100644
--- a/deps/v8/src/ppc/assembler-ppc-inl.h
+++ b/deps/v8/src/ppc/assembler-ppc-inl.h
@@ -321,23 +321,23 @@ template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
+ visitor->VisitEmbeddedPointer(host(), this);
} else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
+ visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::CELL) {
- visitor->VisitCell(this);
+ visitor->VisitCellPointer(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(this);
+ visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE ||
mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
- visitor->VisitInternalReference(this);
+ visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
+ visitor->VisitCodeAgeSequence(host(), this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
- visitor->VisitDebugTarget(this);
+ visitor->VisitDebugTarget(host(), this);
} else if (IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(this);
+ visitor->VisitRuntimeEntry(host(), this);
}
}
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
index ec6330a351..d3c57a479b 100644
--- a/deps/v8/src/ppc/assembler-ppc.cc
+++ b/deps/v8/src/ppc/assembler-ppc.cc
@@ -1984,7 +1984,14 @@ void Assembler::GrowBuffer(int needed) {
if (space < needed) {
desc.buffer_size += needed - space;
}
- CHECK_GT(desc.buffer_size, 0); // no overflow
+
+ // Some internal data structures overflow for very large buffers,
+ // they must ensure that kMaximalBufferSize is not too large.
+ if (desc.buffer_size > kMaximalBufferSize ||
+ static_cast<size_t>(desc.buffer_size) >
+ isolate_data().max_old_generation_size_) {
+ V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ }
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
index 04678e3393..5eebdbbd17 100644
--- a/deps/v8/src/ppc/assembler-ppc.h
+++ b/deps/v8/src/ppc/assembler-ppc.h
@@ -1438,6 +1438,9 @@ class Assembler : public AssemblerBase {
RelocInfoWriter reloc_info_writer;
private:
+ // Avoid overflows for displacements etc.
+ static const int kMaximalBufferSize = 512 * MB;
+
// Repeated checking whether the trampoline pool should be emitted is rather
// expensive. By default we only check again once a number of instructions
// has been generated.
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
index 8c1ea4647e..beeb66b442 100644
--- a/deps/v8/src/ppc/code-stubs-ppc.cc
+++ b/deps/v8/src/ppc/code-stubs-ppc.cc
@@ -1214,77 +1214,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ blr();
}
-void RegExpExecStub::Generate(MacroAssembler* masm) {
-#ifdef V8_INTERPRETED_REGEXP
- // This case is handled prior to the RegExpExecStub call.
- __ Abort(kUnexpectedRegExpExecCall);
-#else // V8_INTERPRETED_REGEXP
- // Isolates: note we add an additional parameter here (isolate pointer).
- const int kRegExpExecuteArguments = 10;
- const int kParameterRegisters = 8;
- __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
-
- // Stack pointer now points to cell where return address is to be written.
- // Arguments are before that on the stack or in registers.
-
- // Argument 10 (in stack parameter area): Pass current isolate address.
- __ mov(r11, Operand(ExternalReference::isolate_address(isolate())));
- __ StoreP(r11,
- MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
-
- // Argument 9 is a dummy that reserves the space used for
- // the return address added by the ExitFrame in native calls.
-
- // Argument 8 (r10): Indicate that this is a direct call from JavaScript.
- __ li(r10, Operand(1));
-
- // Argument 7 (r9): Start (high end) of backtracking stack memory area.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate());
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate());
- __ mov(r11, Operand(address_of_regexp_stack_memory_address));
- __ LoadP(r11, MemOperand(r11, 0));
- __ mov(ip, Operand(address_of_regexp_stack_memory_size));
- __ LoadP(ip, MemOperand(ip, 0));
- __ add(r9, r11, ip);
-
- // Argument 6 (r8): Set the number of capture registers to zero to force
- // global egexps to behave as non-global. This does not affect non-global
- // regexps.
- __ li(r8, Operand::Zero());
-
- // Argument 5 (r7): static offsets vector buffer.
- __ mov(
- r7,
- Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
-
- // Argument 4, r6: End of string data
- // Argument 3, r5: Start of string data
- CHECK(r6.is(RegExpExecDescriptor::StringEndRegister()));
- CHECK(r5.is(RegExpExecDescriptor::StringStartRegister()));
-
- // Argument 2 (r4): Previous index.
- CHECK(r4.is(RegExpExecDescriptor::LastIndexRegister()));
-
- // Argument 1 (r3): Subject string.
- CHECK(r3.is(RegExpExecDescriptor::StringRegister()));
-
- // Locate the code entry and call it.
- Register code_reg = RegExpExecDescriptor::CodeRegister();
- __ addi(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm, code_reg);
-
- __ LeaveExitFrame(false, no_reg, true);
-
- // Return the smi-tagged result.
- __ SmiTag(r3);
- __ Ret();
-#endif // V8_INTERPRETED_REGEXP
-}
-
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// r3 : number of arguments to the construct function
@@ -3017,9 +2946,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
__ push(call_data);
Register scratch = call_data;
- if (!call_data_undefined()) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- }
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
// return value
__ push(scratch);
// return value default
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
index c8ad31cf2a..142b398b43 100644
--- a/deps/v8/src/ppc/deoptimizer-ppc.cc
+++ b/deps/v8/src/ppc/deoptimizer-ppc.cc
@@ -37,25 +37,22 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- if (FLAG_zap_code_space) {
- // Fail hard and early if we enter this code object again.
- byte* pointer = code->FindCodeAgeSequence();
- if (pointer != NULL) {
- pointer += kNoCodeAgeSequenceLength;
- } else {
- pointer = code->instruction_start();
- }
- CodePatcher patcher(isolate, pointer, 1);
- patcher.masm()->bkpt(0);
-
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- int osr_offset = data->OsrPcOffset()->value();
- if (osr_offset > 0) {
- CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
- 1);
- osr_patcher.masm()->bkpt(0);
- }
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(isolate, pointer, 1);
+ patcher.masm()->bkpt(0);
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(isolate, code_start_address + osr_offset, 1);
+ osr_patcher.masm()->bkpt(0);
}
DeoptimizationInputData* deopt_data =
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
index 734ed4af36..cba9275d90 100644
--- a/deps/v8/src/ppc/interface-descriptors-ppc.cc
+++ b/deps/v8/src/ppc/interface-descriptors-ppc.cc
@@ -56,11 +56,6 @@ const Register MathPowIntegerDescriptor::exponent() {
return MathPowTaggedDescriptor::exponent();
}
-const Register RegExpExecDescriptor::StringRegister() { return r3; }
-const Register RegExpExecDescriptor::LastIndexRegister() { return r4; }
-const Register RegExpExecDescriptor::StringStartRegister() { return r5; }
-const Register RegExpExecDescriptor::StringEndRegister() { return r6; }
-const Register RegExpExecDescriptor::CodeRegister() { return r17; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r3; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r6; }
@@ -160,9 +155,20 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
+ // r3 : number of arguments
+ // r5 : start index (to support rest parameters)
+ // r4 : the target to call
+ Register registers[] = {r4, r3, r5};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r3 : number of arguments
+ // r6 : the new target
// r5 : start index (to support rest parameters)
// r4 : the target to call
- Register registers[] = {r4, r5};
+ Register registers[] = {r4, r6, r3, r5};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
index 4aa901c177..8c5ea97eee 100644
--- a/deps/v8/src/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/ppc/macro-assembler-ppc.cc
@@ -308,7 +308,7 @@ void MacroAssembler::RecordWriteField(
Add(dst, object, offset - kHeapObjectTag, r0);
if (emit_debug_code()) {
Label ok;
- andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
+ andi(r0, dst, Operand(kPointerSize - 1));
beq(&ok, cr0);
stop("Unaligned cell in write barrier");
bind(&ok);
@@ -363,7 +363,7 @@ void MacroAssembler::RecordWriteForMap(Register object, Register map,
addi(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
- andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
+ andi(r0, dst, Operand(kPointerSize - 1));
beq(&ok, cr0);
stop("Unaligned cell in write barrier");
bind(&ok);
@@ -810,8 +810,9 @@ void MacroAssembler::ConvertDoubleToUnsignedInt64(
void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
- DCHECK(!AreAliased(dst_low, src_high, shift));
- DCHECK(!AreAliased(dst_high, src_low, shift));
+ DCHECK(!AreAliased(dst_low, src_high));
+ DCHECK(!AreAliased(dst_high, src_low));
+ DCHECK(!AreAliased(dst_low, dst_high, shift));
Label less_than_32;
Label done;
cmpi(shift, Operand(32));
@@ -856,8 +857,9 @@ void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
- DCHECK(!AreAliased(dst_low, src_high, shift));
- DCHECK(!AreAliased(dst_high, src_low, shift));
+ DCHECK(!AreAliased(dst_low, src_high));
+ DCHECK(!AreAliased(dst_high, src_low));
+ DCHECK(!AreAliased(dst_low, dst_high, shift));
Label less_than_32;
Label done;
cmpi(shift, Operand(32));
@@ -2958,6 +2960,7 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments) {
void MacroAssembler::CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments) {
+ DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
// Just call directly. The function called cannot cause a GC, or
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
index ed471dce5b..0face8c562 100644
--- a/deps/v8/src/ppc/simulator-ppc.cc
+++ b/deps/v8/src/ppc/simulator-ppc.cc
@@ -894,6 +894,8 @@ void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
void* Simulator::RedirectExternalReference(Isolate* isolate,
void* external_function,
ExternalReference::Type type) {
+ base::LockGuard<base::Mutex> lock_guard(
+ isolate->simulator_redirection_mutex());
Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address();
}
@@ -1292,7 +1294,9 @@ static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
// Calls into the V8 runtime.
typedef intptr_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
intptr_t arg2, intptr_t arg3,
- intptr_t arg4, intptr_t arg5);
+ intptr_t arg4, intptr_t arg5,
+ intptr_t arg6, intptr_t arg7,
+ intptr_t arg8);
typedef ObjectPair (*SimulatorRuntimePairCall)(intptr_t arg0, intptr_t arg1,
intptr_t arg2, intptr_t arg3,
intptr_t arg4, intptr_t arg5);
@@ -1329,7 +1333,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
(get_register(sp) & (::v8::internal::FLAG_sim_stack_alignment - 1)) ==
0;
Redirection* redirection = Redirection::FromSwiInstruction(instr);
- const int kArgCount = 6;
+ const int kArgCount = 9;
+ const int kRegisterArgCount = 8;
int arg0_regnum = 3;
intptr_t result_buffer = 0;
bool uses_result_buffer =
@@ -1341,9 +1346,15 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
arg0_regnum++;
}
intptr_t arg[kArgCount];
- for (int i = 0; i < kArgCount; i++) {
+ // First eight arguments in registers r3-r10.
+ for (int i = 0; i < kRegisterArgCount; i++) {
arg[i] = get_register(arg0_regnum + i);
}
+ intptr_t* stack_pointer = reinterpret_cast<intptr_t*>(get_register(sp));
+ // Remaining argument on stack
+ arg[kRegisterArgCount] = stack_pointer[kStackFrameExtraParamSlot];
+ STATIC_ASSERT(kArgCount == kRegisterArgCount + 1);
+ STATIC_ASSERT(kMaxCParameters == 9);
bool fp_call =
(redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
@@ -1519,9 +1530,10 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF(
"Call to host function at %p,\n"
"\t\t\t\targs %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+ ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR,
- static_cast<void*>(FUNCTION_ADDR(target)), arg[0], arg[1],
- arg[2], arg[3], arg[4], arg[5]);
+ static_cast<void*>(FUNCTION_ADDR(target)), arg[0], arg[1], arg[2],
+ arg[3], arg[4], arg[5], arg[6], arg[7], arg[8]);
if (!stack_aligned) {
PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
get_register(sp));
@@ -1568,8 +1580,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
- intptr_t result =
- target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+ intptr_t result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
+ arg[5], arg[6], arg[7], arg[8]);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %08" V8PRIxPTR "\n", result);
}
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
index 0c23b04a37..92da0d5811 100644
--- a/deps/v8/src/ppc/simulator-ppc.h
+++ b/deps/v8/src/ppc/simulator-ppc.h
@@ -26,17 +26,13 @@ namespace internal {
(entry(p0, p1, p2, p3, p4))
typedef int (*ppc_regexp_matcher)(String*, int, const byte*, const byte*, int*,
- int, Address, int, void*, Isolate*);
-
+ int, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type ppc_regexp_matcher.
-// The ninth argument is a dummy that reserves the space used for
-// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
- (FUNCTION_CAST<ppc_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
- NULL, p8))
+ (FUNCTION_CAST<ppc_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on ppc uses the C stack, we
@@ -345,7 +341,7 @@ class Simulator {
static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
void* page);
- // Runtime call support.
+ // Runtime call support. Uses the isolate in a thread-safe way.
static void* RedirectExternalReference(
Isolate* isolate, void* external_function,
v8::internal::ExternalReference::Type type);
@@ -495,11 +491,9 @@ class Simulator {
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
- Simulator::current(isolate)->Call(entry, 10, (intptr_t)p0, (intptr_t)p1, \
- (intptr_t)p2, (intptr_t)p3, (intptr_t)p4, \
- (intptr_t)p5, (intptr_t)p6, (intptr_t)p7, \
- (intptr_t)NULL, (intptr_t)p8)
-
+ Simulator::current(isolate)->Call( \
+ entry, 9, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, (intptr_t)p3, \
+ (intptr_t)p4, (intptr_t)p5, (intptr_t)p6, (intptr_t)p7, (intptr_t)p8)
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code. The JS-based limit normally points near the end of
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index b360fccdfe..cdd80ffc44 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -18,6 +18,7 @@
#include "src/profiler/heap-snapshot-generator-inl.h"
#include "src/prototype.h"
#include "src/transitions.h"
+#include "src/visitors.h"
namespace v8 {
namespace internal {
@@ -269,6 +270,7 @@ HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type,
size_t size,
unsigned trace_node_id) {
HeapEntry entry(this, type, name, id, size, trace_node_id);
+ DCHECK(sorted_entries_.is_empty());
entries_.Add(entry);
return &entries_.last();
}
@@ -290,26 +292,15 @@ void HeapSnapshot::FillChildren() {
}
}
-
-class FindEntryById {
- public:
- explicit FindEntryById(SnapshotObjectId id) : id_(id) { }
- int operator()(HeapEntry* const* entry) {
- if ((*entry)->id() == id_) return 0;
- return (*entry)->id() < id_ ? -1 : 1;
- }
- private:
- SnapshotObjectId id_;
-};
-
-
HeapEntry* HeapSnapshot::GetEntryById(SnapshotObjectId id) {
List<HeapEntry*>* entries_by_id = GetSortedEntriesList();
- // Perform a binary search by id.
- int index = SortedListBSearch(*entries_by_id, FindEntryById(id));
- if (index == -1)
- return NULL;
- return entries_by_id->at(index);
+
+ auto it = std::lower_bound(
+ entries_by_id->begin(), entries_by_id->end(), id,
+ [](HeapEntry* first, SnapshotObjectId val) { return first->id() < val; });
+
+ if (it == entries_by_id->end() || (*it)->id() != id) return NULL;
+ return *it;
}
@@ -972,12 +963,12 @@ class IndexedReferencesExtractor : public ObjectVisitor {
parent_end_(HeapObject::RawField(parent_obj_, parent_obj_->Size())),
parent_(parent),
next_index_(0) {}
- void VisitCodeEntry(Address entry_address) override {
- Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
- generator_->SetInternalReference(parent_obj_, parent_, "code", code);
- generator_->TagCodeObject(code);
+ void VisitCodeEntry(JSFunction* host, Address entry_address) override {
+ Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
+ generator_->SetInternalReference(parent_obj_, parent_, "code", code);
+ generator_->TagCodeObject(code);
}
- void VisitPointers(Object** start, Object** end) override {
+ void VisitPointers(HeapObject* host, Object** start, Object** end) override {
for (Object** p = start; p < end; p++) {
int index = static_cast<int>(p - HeapObject::RawField(parent_obj_, 0));
++next_index_;
@@ -1364,9 +1355,6 @@ void V8HeapExplorer::ExtractSharedFunctionInfoReferences(
SetInternalReference(obj, entry, "function_identifier",
shared->function_identifier(),
SharedFunctionInfo::kFunctionIdentifierOffset);
- SetInternalReference(obj, entry,
- "optimized_code_map", shared->optimized_code_map(),
- SharedFunctionInfo::kOptimizedCodeMapOffset);
SetInternalReference(obj, entry, "feedback_metadata",
shared->feedback_metadata(),
SharedFunctionInfo::kFeedbackMetadataOffset);
@@ -1685,8 +1673,7 @@ HeapEntry* V8HeapExplorer::GetEntry(Object* obj) {
return filler_->FindOrAddEntry(obj, this);
}
-
-class RootsReferencesExtractor : public ObjectVisitor {
+class RootsReferencesExtractor : public RootVisitor {
private:
struct IndexTag {
IndexTag(int index, VisitorSynchronization::SyncTag tag)
@@ -1702,7 +1689,7 @@ class RootsReferencesExtractor : public ObjectVisitor {
heap_(heap) {
}
- void VisitPointers(Object** start, Object** end) override {
+ void VisitRootPointers(Root root, Object** start, Object** end) override {
if (collecting_all_references_) {
for (Object** p = start; p < end; p++) all_references_.Add(*p);
} else {
@@ -2147,9 +2134,9 @@ void V8HeapExplorer::TagFixedArraySubType(const FixedArray* array,
array_types_[array] = type;
}
-class GlobalObjectsEnumerator : public ObjectVisitor {
+class GlobalObjectsEnumerator : public RootVisitor {
public:
- void VisitPointers(Object** start, Object** end) override {
+ void VisitRootPointers(Root root, Object** start, Object** end) override {
for (Object** p = start; p < end; p++) {
if ((*p)->IsNativeContext()) {
Context* context = Context::cast(*p);
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 022f238cc5..84a23e4c0d 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -12,6 +12,8 @@
#include "src/base/platform/time.h"
#include "src/objects.h"
#include "src/profiler/strings-storage.h"
+#include "src/string-hasher.h"
+#include "src/visitors.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 6e9184d39e..92bf0b497f 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -340,6 +340,8 @@ class PropertyDetails BASE_EMBEDDED {
: public BitField<PropertyAttributes, ConstnessField::kNext, 3> {};
static const int kAttributesReadOnlyMask =
(READ_ONLY << AttributesField::kShift);
+ static const int kAttributesDontDeleteMask =
+ (DONT_DELETE << AttributesField::kShift);
// Bit fields for normalized objects.
class PropertyCellTypeField
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
index 579423ff32..6ce35fff09 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.cc
@@ -41,14 +41,13 @@ namespace internal {
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
- * - fp[56] Isolate* isolate (address of the current isolate)
- * - fp[52] direct_call (if 1, direct call from JavaScript code,
+ * - fp[52] Isolate* isolate (address of the current isolate)
+ * - fp[48] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
- * - fp[48] stack_area_base (high end of the memory area to use as
+ * - fp[44] stack_area_base (high end of the memory area to use as
* backtracking stack).
- * - fp[44] capture array size (may fit multiple sets of matches)
- * - fp[40] int* capture_array (int[num_saved_registers_], for output).
- * - fp[36] secondary link/return address used by native call.
+ * - fp[40] capture array size (may fit multiple sets of matches)
+ * - fp[36] int* capture_array (int[num_saved_registers_], for output).
* --- sp when called ---
* - fp[32] return address (lr).
* - fp[28] old frame pointer (r11).
@@ -81,17 +80,14 @@ namespace internal {
* int start_index,
* Address start,
* Address end,
- * Address secondary_return_address, // Only used by native call.
* int* capture_output_array,
+ * int num_capture_registers,
* byte* stack_area_base,
- * bool direct_call = false)
+ * bool direct_call = false,
+ * Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
* (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
* in arm/simulator-arm.h.
- * When calling as a non-direct call (i.e., from C++ code), the return address
- * area is overwritten with the LR register by the RegExp code. When doing a
- * direct call from generated code, the return address is placed there by
- * the calling code, as in a normal exit frame.
*/
#define __ ACCESS_MASM(masm_)
@@ -318,11 +314,11 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ sub(r1, r1, r4);
}
// Isolate.
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
if (unicode) {
__ mov(r3, Operand(0));
} else // NOLINT
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
{
__ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
}
diff --git a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
index 6c910644b2..a522f53d4a 100644
--- a/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/regexp/arm/regexp-macro-assembler-arm.h
@@ -103,9 +103,8 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
static const int kReturnAddress = kStoredRegisters + 8 * kPointerSize;
- static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack parameters placed by caller.
- static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize;
+ static const int kRegisterOutput = kReturnAddress + kPointerSize;
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 7d015d096b..f740470ae3 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -56,10 +56,7 @@ namespace internal {
* (as referred to in
* the code)
*
- * - fp[104] isolate Address of the current isolate.
- * - fp[96] return_address Secondary link/return address
- * used by an exit frame if this is a
- * native call.
+ * - fp[96] isolate Address of the current isolate.
* ^^^ csp when called ^^^
* - fp[88] lr Return from the RegExp code.
* - fp[80] r29 Old frame pointer (CalleeSaved).
@@ -89,23 +86,18 @@ namespace internal {
* The data up to the return address must be placed there by the calling
* code and the remaining arguments are passed in registers, e.g. by calling the
* code entry as cast to a function with the signature:
- * int (*match)(String* input,
- * int start_offset,
- * Address input_start,
- * Address input_end,
- * int* output,
- * int output_size,
- * Address stack_base,
+ * int (*match)(String* input_string,
+ * int start_index,
+ * Address start,
+ * Address end,
+ * int* capture_output_array,
+ * int num_capture_registers,
+ * byte* stack_area_base,
* bool direct_call = false,
- * Address secondary_return_address, // Only used by native call.
- * Isolate* isolate)
+ * Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
* (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
* in arm64/simulator-arm64.h.
- * When calling as a non-direct call (i.e., from C++ code), the return address
- * area is overwritten with the LR register by the RegExp code. When doing a
- * direct call from generated code, the return address is placed there by
- * the calling code, as in a normal exit frame.
*/
#define __ ACCESS_MASM(masm_)
@@ -401,11 +393,11 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ Sub(x1, x1, Operand(capture_length, SXTW));
}
// Isolate.
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
if (unicode) {
__ Mov(x3, Operand(0));
} else // NOLINT
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
{
__ Mov(x3, ExternalReference::isolate_address(isolate()));
}
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
index 5db220e962..614be624a9 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.h
@@ -109,9 +109,8 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
// Return address.
// It is placed above the 11 callee-saved registers.
static const int kReturnAddress = kCalleeSavedRegisters + 11 * kPointerSize;
- static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack parameter placed by caller.
- static const int kIsolate = kSecondaryReturnAddress + kPointerSize;
+ static const int kIsolate = kReturnAddress + kPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
diff --git a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
index 6b4ea247ef..c279304777 100644
--- a/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/regexp/ia32/regexp-macro-assembler-ia32.cc
@@ -69,9 +69,10 @@ namespace internal {
* Address start,
* Address end,
* int* capture_output_array,
- * bool at_start,
+ * int num_capture_registers,
* byte* stack_area_base,
- * bool direct_call)
+ * bool direct_call = false,
+ * Isolate* isolate);
*/
#define __ ACCESS_MASM(masm_)
@@ -298,11 +299,11 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase(
// Isolate* isolate or 0 if unicode flag.
// Set isolate.
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
if (unicode) {
__ mov(Operand(esp, 3 * kPointerSize), Immediate(0));
} else // NOLINT
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
{
__ mov(Operand(esp, 3 * kPointerSize),
Immediate(ExternalReference::isolate_address(isolate())));
diff --git a/deps/v8/src/regexp/interpreter-irregexp.cc b/deps/v8/src/regexp/interpreter-irregexp.cc
index 4f8f96a536..f27f43aa5c 100644
--- a/deps/v8/src/regexp/interpreter-irregexp.cc
+++ b/deps/v8/src/regexp/interpreter-irregexp.cc
@@ -16,9 +16,9 @@
#include "src/unicode.h"
#include "src/utils.h"
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
#include "unicode/uchar.h"
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/jsregexp-inl.h b/deps/v8/src/regexp/jsregexp-inl.h
index 4bcda43496..0b73c2fbc3 100644
--- a/deps/v8/src/regexp/jsregexp-inl.h
+++ b/deps/v8/src/regexp/jsregexp-inl.h
@@ -7,7 +7,6 @@
#define V8_REGEXP_JSREGEXP_INL_H_
#include "src/allocation.h"
-#include "src/heap/heap.h"
#include "src/objects.h"
#include "src/regexp/jsregexp.h"
diff --git a/deps/v8/src/regexp/jsregexp.cc b/deps/v8/src/regexp/jsregexp.cc
index 8ab2681dcf..61cabd0b94 100644
--- a/deps/v8/src/regexp/jsregexp.cc
+++ b/deps/v8/src/regexp/jsregexp.cc
@@ -26,10 +26,10 @@
#include "src/string-search.h"
#include "src/unicode-decoder.h"
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
#include "unicode/uniset.h"
#include "unicode/utypes.h"
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
#ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32
@@ -3327,9 +3327,8 @@ TextNode* TextNode::CreateForCharacterRanges(Zone* zone,
RegExpNode* on_success) {
DCHECK_NOT_NULL(ranges);
ZoneList<TextElement>* elms = new (zone) ZoneList<TextElement>(1, zone);
- elms->Add(
- TextElement::CharClass(new (zone) RegExpCharacterClass(ranges, false)),
- zone);
+ elms->Add(TextElement::CharClass(new (zone) RegExpCharacterClass(ranges)),
+ zone);
return new (zone) TextNode(elms, read_backward, on_success);
}
@@ -3341,12 +3340,12 @@ TextNode* TextNode::CreateForSurrogatePair(Zone* zone, CharacterRange lead,
ZoneList<CharacterRange>* lead_ranges = CharacterRange::List(zone, lead);
ZoneList<CharacterRange>* trail_ranges = CharacterRange::List(zone, trail);
ZoneList<TextElement>* elms = new (zone) ZoneList<TextElement>(2, zone);
- elms->Add(TextElement::CharClass(
- new (zone) RegExpCharacterClass(lead_ranges, false)),
- zone);
- elms->Add(TextElement::CharClass(
- new (zone) RegExpCharacterClass(trail_ranges, false)),
- zone);
+ elms->Add(
+ TextElement::CharClass(new (zone) RegExpCharacterClass(lead_ranges)),
+ zone);
+ elms->Add(
+ TextElement::CharClass(new (zone) RegExpCharacterClass(trail_ranges)),
+ zone);
return new (zone) TextNode(elms, read_backward, on_success);
}
@@ -4851,7 +4850,7 @@ static bool CompareRanges(ZoneList<CharacterRange>* ranges,
bool RegExpCharacterClass::is_standard(Zone* zone) {
// TODO(lrn): Remove need for this function, by not throwing away information
// along the way.
- if (is_negated_) {
+ if (is_negated()) {
return false;
}
if (set_.is_standard()) {
@@ -5114,7 +5113,7 @@ RegExpNode* UnanchoredAdvance(RegExpCompiler* compiler,
}
void AddUnicodeCaseEquivalents(ZoneList<CharacterRange>* ranges, Zone* zone) {
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
// Use ICU to compute the case fold closure over the ranges.
icu::UnicodeSet set;
for (int i = 0; i < ranges->length(); i++) {
@@ -5132,7 +5131,7 @@ void AddUnicodeCaseEquivalents(ZoneList<CharacterRange>* ranges, Zone* zone) {
}
// No errors and everything we collected have been ranges.
CharacterRange::Canonicalize(ranges);
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
}
@@ -5144,7 +5143,8 @@ RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
if (compiler->needs_unicode_case_equivalents()) {
AddUnicodeCaseEquivalents(ranges, zone);
}
- if (compiler->unicode() && !compiler->one_byte()) {
+ if (compiler->unicode() && !compiler->one_byte() &&
+ !contains_split_surrogate()) {
if (is_negated()) {
ZoneList<CharacterRange>* negated =
new (zone) ZoneList<CharacterRange>(2, zone);
@@ -5154,7 +5154,7 @@ RegExpNode* RegExpCharacterClass::ToNode(RegExpCompiler* compiler,
if (ranges->length() == 0) {
ranges->Add(CharacterRange::Everything(), zone);
RegExpCharacterClass* fail =
- new (zone) RegExpCharacterClass(ranges, true);
+ new (zone) RegExpCharacterClass(ranges, NEGATED);
return new (zone) TextNode(fail, compiler->read_backward(), on_success);
}
if (standard_type() == '*') {
@@ -5352,6 +5352,7 @@ void RegExpDisjunction::FixSingleCharacterDisjunctions(
Zone* zone = compiler->zone();
ZoneList<RegExpTree*>* alternatives = this->alternatives();
int length = alternatives->length();
+ const bool unicode = compiler->unicode();
int write_posn = 0;
int i = 0;
@@ -5368,6 +5369,10 @@ void RegExpDisjunction::FixSingleCharacterDisjunctions(
i++;
continue;
}
+ DCHECK_IMPLIES(unicode,
+ !unibrow::Utf16::IsLeadSurrogate(atom->data().at(0)));
+ bool contains_trail_surrogate =
+ unibrow::Utf16::IsTrailSurrogate(atom->data().at(0));
int first_in_run = i;
i++;
while (i < length) {
@@ -5375,6 +5380,10 @@ void RegExpDisjunction::FixSingleCharacterDisjunctions(
if (!alternative->IsAtom()) break;
atom = alternative->AsAtom();
if (atom->length() != 1) break;
+ DCHECK_IMPLIES(unicode,
+ !unibrow::Utf16::IsLeadSurrogate(atom->data().at(0)));
+ contains_trail_surrogate |=
+ unibrow::Utf16::IsTrailSurrogate(atom->data().at(0));
i++;
}
if (i > first_in_run + 1) {
@@ -5387,8 +5396,12 @@ void RegExpDisjunction::FixSingleCharacterDisjunctions(
DCHECK_EQ(old_atom->length(), 1);
ranges->Add(CharacterRange::Singleton(old_atom->data().at(0)), zone);
}
+ RegExpCharacterClass::Flags flags;
+ if (unicode && contains_trail_surrogate) {
+ flags = RegExpCharacterClass::CONTAINS_SPLIT_SURROGATE;
+ }
alternatives->at(write_posn++) =
- new (zone) RegExpCharacterClass(ranges, false);
+ new (zone) RegExpCharacterClass(ranges, flags);
} else {
// Just copy any trivial alternatives.
for (int j = first_in_run; j < i; j++) {
diff --git a/deps/v8/src/regexp/mips/OWNERS b/deps/v8/src/regexp/mips/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/src/regexp/mips/OWNERS
+++ b/deps/v8/src/regexp/mips/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
index 062d6618e9..11590599f9 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.cc
@@ -38,14 +38,13 @@ namespace internal {
*
* The stack will have the following structure:
*
- * - fp[64] Isolate* isolate (address of the current isolate)
- * - fp[60] direct_call (if 1, direct call from JavaScript code,
+ * - fp[60] Isolate* isolate (address of the current isolate)
+ * - fp[56] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
- * - fp[56] stack_area_base (High end of the memory area to use as
+ * - fp[52] stack_area_base (High end of the memory area to use as
* backtracking stack).
- * - fp[52] capture array size (may fit multiple sets of matches)
- * - fp[48] int* capture_array (int[num_saved_registers_], for output).
- * - fp[44] secondary link/return address used by native call.
+ * - fp[48] capture array size (may fit multiple sets of matches)
+ * - fp[44] int* capture_array (int[num_saved_registers_], for output).
* --- sp when called ---
* - fp[40] return address (lr).
* - fp[36] old frame pointer (r11).
@@ -78,17 +77,14 @@ namespace internal {
* int start_index,
* Address start,
* Address end,
- * Address secondary_return_address, // Only used by native call.
* int* capture_output_array,
+ * int num_capture_registers,
* byte* stack_area_base,
- * bool direct_call = false)
+ * bool direct_call = false,
+ * Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
* (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
* in mips/simulator-mips.h.
- * When calling as a non-direct call (i.e., from C++ code), the return address
- * area is overwritten with the ra register by the RegExp code. When doing a
- * direct call from generated code, the return address is placed there by
- * the calling code, as in a normal exit frame.
*/
#define __ ACCESS_MASM(masm_)
@@ -324,11 +320,11 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ Subu(a1, a1, Operand(s3));
}
// Isolate.
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
if (unicode) {
__ mov(a3, zero_reg);
} else // NOLINT
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
{
__ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
}
diff --git a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
index 6dedb1e748..6c1ba64c51 100644
--- a/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
+++ b/deps/v8/src/regexp/mips/regexp-macro-assembler-mips.h
@@ -103,9 +103,8 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
static const int kStoredRegisters = kFramePointer;
// Return address (stored from link register, read into pc on return).
static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
- static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack frame header.
- static const int kStackFrameHeader = kReturnAddress + kPointerSize;
+ static const int kStackFrameHeader = kReturnAddress;
// Stack parameters placed by caller.
static const int kRegisterOutput = kStackFrameHeader + 20;
static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
diff --git a/deps/v8/src/regexp/mips64/OWNERS b/deps/v8/src/regexp/mips64/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/src/regexp/mips64/OWNERS
+++ b/deps/v8/src/regexp/mips64/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
index e0317dec8a..595d6fd4de 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.cc
@@ -17,7 +17,9 @@ namespace v8 {
namespace internal {
#ifndef V8_INTERPRETED_REGEXP
-/*
+
+/* clang-format off
+ *
* This assembler uses the following register assignment convention
* - t3 : Temporarily stores the index of capture start after a matching pass
* for a global regexp.
@@ -41,15 +43,14 @@ namespace internal {
*
* The O32 stack will have the following structure:
*
- * - fp[76] Isolate* isolate (address of the current isolate)
- * - fp[72] direct_call (if 1, direct call from JavaScript code,
+ * - fp[72] Isolate* isolate (address of the current isolate)
+ * - fp[68] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
- * - fp[68] stack_area_base (High end of the memory area to use as
+ * - fp[64] stack_area_base (High end of the memory area to use as
* backtracking stack).
- * - fp[64] capture array size (may fit multiple sets of matches)
- * - fp[60] int* capture_array (int[num_saved_registers_], for output).
+ * - fp[60] capture array size (may fit multiple sets of matches)
* - fp[44..59] MIPS O32 four argument slots
- * - fp[40] secondary link/return address used by native call.
+ * - fp[40] int* capture_array (int[num_saved_registers_], for output).
* --- sp when called ---
* - fp[36] return address (lr).
* - fp[32] old frame pointer (r11).
@@ -74,9 +75,8 @@ namespace internal {
*
* The N64 stack will have the following structure:
*
- * - fp[88] Isolate* isolate (address of the current isolate) kIsolate
- * - fp[80] secondary link/return address used by exit frame on native call. kSecondaryReturnAddress
- kStackFrameHeader
+ * - fp[80] Isolate* isolate (address of the current isolate) kIsolate
+ * kStackFrameHeader
* --- sp when called ---
* - fp[72] ra Return from RegExp code (ra). kReturnAddress
* - fp[64] s9, old-fp Old fp, callee saved(s9).
@@ -112,19 +112,16 @@ namespace internal {
* int start_index,
* Address start,
* Address end,
- * Address secondary_return_address, // Only used by native call.
* int* capture_output_array,
+ * int num_capture_registers,
* byte* stack_area_base,
* bool direct_call = false,
- * void* return_address,
* Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
* (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
* in mips/simulator-mips.h.
- * When calling as a non-direct call (i.e., from C++ code), the return address
- * area is overwritten with the ra register by the RegExp code. When doing a
- * direct call from generated code, the return address is placed there by
- * the calling code, as in a normal exit frame.
+ *
+ * clang-format on
*/
#define __ ACCESS_MASM(masm_)
@@ -186,9 +183,9 @@ void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) {
DCHECK(reg >= 0);
DCHECK(reg < num_registers_);
if (by != 0) {
- __ ld(a0, register_location(reg));
+ __ Ld(a0, register_location(reg));
__ Daddu(a0, a0, Operand(by));
- __ sd(a0, register_location(reg));
+ __ Sd(a0, register_location(reg));
}
}
@@ -218,7 +215,7 @@ void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
- __ ld(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
__ Daddu(a0, current_input_offset(), Operand(-char_size()));
BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
}
@@ -226,7 +223,7 @@ void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
void RegExpMacroAssemblerMIPS::CheckNotAtStart(int cp_offset,
Label* on_not_at_start) {
- __ ld(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
__ Daddu(a0, current_input_offset(),
Operand(-char_size() + cp_offset * char_size()));
BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1));
@@ -240,7 +237,7 @@ void RegExpMacroAssemblerMIPS::CheckCharacterLT(uc16 limit, Label* on_less) {
void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
Label backtrack_non_equal;
- __ lw(a0, MemOperand(backtrack_stackpointer(), 0));
+ __ Lw(a0, MemOperand(backtrack_stackpointer(), 0));
__ Branch(&backtrack_non_equal, ne, current_input_offset(), Operand(a0));
__ Daddu(backtrack_stackpointer(),
backtrack_stackpointer(),
@@ -253,8 +250,8 @@ void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
Label fallthrough;
- __ ld(a0, register_location(start_reg)); // Index of start of capture.
- __ ld(a1, register_location(start_reg + 1)); // Index of end of capture.
+ __ Ld(a0, register_location(start_reg)); // Index of start of capture.
+ __ Ld(a1, register_location(start_reg + 1)); // Index of end of capture.
__ Dsubu(a1, a1, a0); // Length of capture.
// At this point, the capture registers are either both set or both cleared.
@@ -263,7 +260,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ Branch(&fallthrough, eq, a1, Operand(zero_reg));
if (read_backward) {
- __ ld(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
__ Daddu(t1, t1, a1);
BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1));
} else {
@@ -292,9 +289,9 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
Label loop;
__ bind(&loop);
- __ lbu(a3, MemOperand(a0, 0));
+ __ Lbu(a3, MemOperand(a0, 0));
__ daddiu(a0, a0, char_size());
- __ lbu(a4, MemOperand(a2, 0));
+ __ Lbu(a4, MemOperand(a2, 0));
__ daddiu(a2, a2, char_size());
__ Branch(&loop_check, eq, a4, Operand(a3));
@@ -323,8 +320,8 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// Compute new value of character position after the matched part.
__ Dsubu(current_input_offset(), a2, end_of_input_address());
if (read_backward) {
- __ ld(t1, register_location(start_reg)); // Index of start of capture.
- __ ld(a2, register_location(start_reg + 1)); // Index of end of capture.
+ __ Ld(t1, register_location(start_reg)); // Index of start of capture.
+ __ Ld(a2, register_location(start_reg + 1)); // Index of end of capture.
__ Daddu(current_input_offset(), current_input_offset(), Operand(t1));
__ Dsubu(current_input_offset(), current_input_offset(), Operand(a2));
}
@@ -360,11 +357,11 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
__ Dsubu(a1, a1, Operand(s3));
}
// Isolate.
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
if (unicode) {
__ mov(a3, zero_reg);
} else // NOLINT
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
{
__ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
}
@@ -379,7 +376,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
// Restore regexp engine registers.
__ MultiPop(regexp_registers_to_retain);
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
- __ ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ Ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
// Check if function returned non-zero for success or zero for failure.
BranchOrBacktrack(on_no_match, eq, v0, Operand(zero_reg));
@@ -402,8 +399,8 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReference(int start_reg,
Label success;
// Find length of back-referenced capture.
- __ ld(a0, register_location(start_reg));
- __ ld(a1, register_location(start_reg + 1));
+ __ Ld(a0, register_location(start_reg));
+ __ Ld(a1, register_location(start_reg + 1));
__ Dsubu(a1, a1, a0); // Length to check.
// At this point, the capture registers are either both set or both cleared.
@@ -412,7 +409,7 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReference(int start_reg,
__ Branch(&fallthrough, eq, a1, Operand(zero_reg));
if (read_backward) {
- __ ld(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
__ Daddu(t1, t1, a1);
BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1));
} else {
@@ -432,15 +429,15 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReference(int start_reg,
Label loop;
__ bind(&loop);
if (mode_ == LATIN1) {
- __ lbu(a3, MemOperand(a0, 0));
+ __ Lbu(a3, MemOperand(a0, 0));
__ daddiu(a0, a0, char_size());
- __ lbu(a4, MemOperand(a2, 0));
+ __ Lbu(a4, MemOperand(a2, 0));
__ daddiu(a2, a2, char_size());
} else {
DCHECK(mode_ == UC16);
- __ lhu(a3, MemOperand(a0, 0));
+ __ Lhu(a3, MemOperand(a0, 0));
__ daddiu(a0, a0, char_size());
- __ lhu(a4, MemOperand(a2, 0));
+ __ Lhu(a4, MemOperand(a2, 0));
__ daddiu(a2, a2, char_size());
}
BranchOrBacktrack(on_no_match, ne, a3, Operand(a4));
@@ -449,8 +446,8 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReference(int start_reg,
// Move current character position to position after match.
__ Dsubu(current_input_offset(), a2, end_of_input_address());
if (read_backward) {
- __ ld(t1, register_location(start_reg)); // Index of start of capture.
- __ ld(a2, register_location(start_reg + 1)); // Index of end of capture.
+ __ Ld(t1, register_location(start_reg)); // Index of start of capture.
+ __ Ld(a2, register_location(start_reg + 1)); // Index of end of capture.
__ Daddu(current_input_offset(), current_input_offset(), Operand(t1));
__ Dsubu(current_input_offset(), current_input_offset(), Operand(a2));
}
@@ -525,7 +522,7 @@ void RegExpMacroAssemblerMIPS::CheckBitInTable(
__ Daddu(a0, a0, current_character());
}
- __ lbu(a0, FieldMemOperand(a0, ByteArray::kHeaderSize));
+ __ Lbu(a0, FieldMemOperand(a0, ByteArray::kHeaderSize));
BranchOrBacktrack(on_bit_set, ne, a0, Operand(zero_reg));
}
@@ -605,7 +602,7 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
ExternalReference map = ExternalReference::re_word_character_map();
__ li(a0, Operand(map));
__ Daddu(a0, a0, current_character());
- __ lbu(a0, MemOperand(a0, 0));
+ __ Lbu(a0, MemOperand(a0, 0));
BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg));
return true;
}
@@ -618,7 +615,7 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
ExternalReference map = ExternalReference::re_word_character_map();
__ li(a0, Operand(map));
__ Daddu(a0, a0, current_character());
- __ lbu(a0, MemOperand(a0, 0));
+ __ Lbu(a0, MemOperand(a0, 0));
BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg));
if (mode_ != LATIN1) {
__ bind(&done);
@@ -689,7 +686,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(masm_->isolate());
__ li(a0, Operand(stack_limit));
- __ ld(a0, MemOperand(a0));
+ __ Ld(a0, MemOperand(a0));
__ Dsubu(a0, sp, a0);
// Handle it if the stack pointer is already below the stack limit.
__ Branch(&stack_limit_hit, le, a0, Operand(zero_reg));
@@ -710,20 +707,20 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Allocate space on stack for registers.
__ Dsubu(sp, sp, Operand(num_registers_ * kPointerSize));
// Load string end.
- __ ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ Ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
// Load input start.
- __ ld(a0, MemOperand(frame_pointer(), kInputStart));
+ __ Ld(a0, MemOperand(frame_pointer(), kInputStart));
// Find negative length (offset of start relative to end).
__ Dsubu(current_input_offset(), a0, end_of_input_address());
// Set a0 to address of char before start of the input string
// (effectively string position -1).
- __ ld(a1, MemOperand(frame_pointer(), kStartIndex));
+ __ Ld(a1, MemOperand(frame_pointer(), kStartIndex));
__ Dsubu(a0, current_input_offset(), Operand(char_size()));
__ dsll(t1, a1, (mode_ == UC16) ? 1 : 0);
__ Dsubu(a0, a0, t1);
// Store this value in a local variable, for use when clearing
// position registers.
- __ sd(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Sd(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
// Initialize code pointer register
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
@@ -749,19 +746,19 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ li(a2, Operand(num_saved_registers_));
Label init_loop;
__ bind(&init_loop);
- __ sd(a0, MemOperand(a1));
+ __ Sd(a0, MemOperand(a1));
__ Daddu(a1, a1, Operand(-kPointerSize));
__ Dsubu(a2, a2, Operand(1));
__ Branch(&init_loop, ne, a2, Operand(zero_reg));
} else {
for (int i = 0; i < num_saved_registers_; i++) {
- __ sd(a0, register_location(i));
+ __ Sd(a0, register_location(i));
}
}
}
// Initialize backtrack stack pointer.
- __ ld(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
+ __ Ld(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
__ jmp(&start_label_);
@@ -772,9 +769,9 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ bind(&success_label_);
if (num_saved_registers_ > 0) {
// Copy captures to output.
- __ ld(a1, MemOperand(frame_pointer(), kInputStart));
- __ ld(a0, MemOperand(frame_pointer(), kRegisterOutput));
- __ ld(a2, MemOperand(frame_pointer(), kStartIndex));
+ __ Ld(a1, MemOperand(frame_pointer(), kInputStart));
+ __ Ld(a0, MemOperand(frame_pointer(), kRegisterOutput));
+ __ Ld(a2, MemOperand(frame_pointer(), kStartIndex));
__ Dsubu(a1, end_of_input_address(), a1);
// a1 is length of input in bytes.
if (mode_ == UC16) {
@@ -789,8 +786,8 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// unroll the loop once to add an operation between a load of a register
// and the following use of that register.
for (int i = 0; i < num_saved_registers_; i += 2) {
- __ ld(a2, register_location(i));
- __ ld(a3, register_location(i + 1));
+ __ Ld(a2, register_location(i));
+ __ Ld(a3, register_location(i + 1));
if (i == 0 && global_with_zero_length_check()) {
// Keep capture start in a4 for the zero-length check later.
__ mov(t3, a2);
@@ -805,21 +802,21 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ Daddu(a3, a1, Operand(a3));
}
// V8 expects the output to be an int32_t array.
- __ sw(a2, MemOperand(a0));
+ __ Sw(a2, MemOperand(a0));
__ Daddu(a0, a0, kIntSize);
- __ sw(a3, MemOperand(a0));
+ __ Sw(a3, MemOperand(a0));
__ Daddu(a0, a0, kIntSize);
}
}
if (global()) {
// Restart matching if the regular expression is flagged as global.
- __ ld(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
- __ ld(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
- __ ld(a2, MemOperand(frame_pointer(), kRegisterOutput));
+ __ Ld(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ Ld(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ __ Ld(a2, MemOperand(frame_pointer(), kRegisterOutput));
// Increment success counter.
__ Daddu(a0, a0, 1);
- __ sd(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ Sd(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
// Capture results have been stored, so the number of remaining global
// output registers is reduced by the number of stored captures.
__ Dsubu(a1, a1, num_saved_registers_);
@@ -827,13 +824,13 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ mov(v0, a0);
__ Branch(&return_v0, lt, a1, Operand(num_saved_registers_));
- __ sd(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ __ Sd(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
// Advance the location for output.
__ Daddu(a2, a2, num_saved_registers_ * kIntSize);
- __ sd(a2, MemOperand(frame_pointer(), kRegisterOutput));
+ __ Sd(a2, MemOperand(frame_pointer(), kRegisterOutput));
// Prepare a0 to initialize registers with its value in the next run.
- __ ld(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
if (global_with_zero_length_check()) {
// Special case for zero-length matches.
@@ -861,7 +858,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
// Exit and return v0.
__ bind(&exit_label_);
if (global()) {
- __ ld(v0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ Ld(v0, MemOperand(frame_pointer(), kSuccessfulCaptures));
}
__ bind(&return_v0);
@@ -893,7 +890,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ Branch(&return_v0, ne, v0, Operand(zero_reg));
// String might have moved: Reload end of string from frame.
- __ ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ Ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
SafeReturn();
}
@@ -925,7 +922,7 @@ Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
__ mov(backtrack_stackpointer(), v0);
// Restore saved registers and continue.
__ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
- __ ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ Ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
SafeReturn();
}
@@ -961,22 +958,22 @@ void RegExpMacroAssemblerMIPS::GoTo(Label* to) {
void RegExpMacroAssemblerMIPS::IfRegisterGE(int reg,
int comparand,
Label* if_ge) {
- __ ld(a0, register_location(reg));
- BranchOrBacktrack(if_ge, ge, a0, Operand(comparand));
+ __ Ld(a0, register_location(reg));
+ BranchOrBacktrack(if_ge, ge, a0, Operand(comparand));
}
void RegExpMacroAssemblerMIPS::IfRegisterLT(int reg,
int comparand,
Label* if_lt) {
- __ ld(a0, register_location(reg));
+ __ Ld(a0, register_location(reg));
BranchOrBacktrack(if_lt, lt, a0, Operand(comparand));
}
void RegExpMacroAssemblerMIPS::IfRegisterEqPos(int reg,
Label* if_eq) {
- __ ld(a0, register_location(reg));
+ __ Ld(a0, register_location(reg));
BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset()));
}
@@ -1010,7 +1007,7 @@ void RegExpMacroAssemblerMIPS::PopCurrentPosition() {
void RegExpMacroAssemblerMIPS::PopRegister(int register_index) {
Pop(a0);
- __ sd(a0, register_location(register_index));
+ __ Sd(a0, register_location(register_index));
}
@@ -1028,10 +1025,10 @@ void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
masm_->label_at_put(label, offset);
__ bind(&after_constant);
if (is_int16(cp_offset)) {
- __ lwu(a0, MemOperand(code_pointer(), cp_offset));
+ __ Lwu(a0, MemOperand(code_pointer(), cp_offset));
} else {
__ Daddu(a0, code_pointer(), cp_offset);
- __ lwu(a0, MemOperand(a0, 0));
+ __ Lwu(a0, MemOperand(a0, 0));
}
}
Push(a0);
@@ -1046,20 +1043,20 @@ void RegExpMacroAssemblerMIPS::PushCurrentPosition() {
void RegExpMacroAssemblerMIPS::PushRegister(int register_index,
StackCheckFlag check_stack_limit) {
- __ ld(a0, register_location(register_index));
+ __ Ld(a0, register_location(register_index));
Push(a0);
if (check_stack_limit) CheckStackLimit();
}
void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(int reg) {
- __ ld(current_input_offset(), register_location(reg));
+ __ Ld(current_input_offset(), register_location(reg));
}
void RegExpMacroAssemblerMIPS::ReadStackPointerFromRegister(int reg) {
- __ ld(backtrack_stackpointer(), register_location(reg));
- __ ld(a0, MemOperand(frame_pointer(), kStackHighEnd));
+ __ Ld(backtrack_stackpointer(), register_location(reg));
+ __ Ld(a0, MemOperand(frame_pointer(), kStackHighEnd));
__ Daddu(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
}
@@ -1082,7 +1079,7 @@ void RegExpMacroAssemblerMIPS::SetCurrentPositionFromEnd(int by) {
void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) {
DCHECK(register_index >= num_saved_registers_); // Reserved for positions!
__ li(a0, Operand(to));
- __ sd(a0, register_location(register_index));
+ __ Sd(a0, register_location(register_index));
}
@@ -1095,27 +1092,27 @@ bool RegExpMacroAssemblerMIPS::Succeed() {
void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg,
int cp_offset) {
if (cp_offset == 0) {
- __ sd(current_input_offset(), register_location(reg));
+ __ Sd(current_input_offset(), register_location(reg));
} else {
__ Daddu(a0, current_input_offset(), Operand(cp_offset * char_size()));
- __ sd(a0, register_location(reg));
+ __ Sd(a0, register_location(reg));
}
}
void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
DCHECK(reg_from <= reg_to);
- __ ld(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
for (int reg = reg_from; reg <= reg_to; reg++) {
- __ sd(a0, register_location(reg));
+ __ Sd(a0, register_location(reg));
}
}
void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
- __ ld(a1, MemOperand(frame_pointer(), kStackHighEnd));
+ __ Ld(a1, MemOperand(frame_pointer(), kStackHighEnd));
__ Dsubu(a0, backtrack_stackpointer(), a1);
- __ sd(a0, register_location(reg));
+ __ Sd(a0, register_location(reg));
}
@@ -1134,7 +1131,7 @@ void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
__ Dsubu(sp, sp, Operand(kPointerSize));
DCHECK(base::bits::IsPowerOfTwo32(stack_alignment));
__ And(sp, sp, Operand(-stack_alignment));
- __ sd(scratch, MemOperand(sp));
+ __ Sd(scratch, MemOperand(sp));
__ mov(a2, frame_pointer());
// Code* of self.
@@ -1174,7 +1171,7 @@ void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
// [sp + 2] - C argument slot.
// [sp + 1] - C argument slot.
// [sp + 0] - C argument slot.
- __ ld(sp, MemOperand(sp, stack_alignment + kCArgsSlotsSize));
+ __ Ld(sp, MemOperand(sp, stack_alignment + kCArgsSlotsSize));
__ li(code_pointer(), Operand(masm_->CodeObject()));
}
@@ -1222,7 +1219,7 @@ void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset,
BranchOrBacktrack(on_outside_input, ge, current_input_offset(),
Operand(-cp_offset * char_size()));
} else {
- __ ld(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Ld(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
__ Daddu(a0, current_input_offset(), Operand(cp_offset * char_size()));
BranchOrBacktrack(on_outside_input, le, a0, Operand(a1));
}
@@ -1276,13 +1273,13 @@ void RegExpMacroAssemblerMIPS::Push(Register source) {
__ Daddu(backtrack_stackpointer(),
backtrack_stackpointer(),
Operand(-kIntSize));
- __ sw(source, MemOperand(backtrack_stackpointer()));
+ __ Sw(source, MemOperand(backtrack_stackpointer()));
}
void RegExpMacroAssemblerMIPS::Pop(Register target) {
DCHECK(!target.is(backtrack_stackpointer()));
- __ lw(target, MemOperand(backtrack_stackpointer()));
+ __ Lw(target, MemOperand(backtrack_stackpointer()));
__ Daddu(backtrack_stackpointer(), backtrack_stackpointer(), kIntSize);
}
@@ -1292,7 +1289,7 @@ void RegExpMacroAssemblerMIPS::CheckPreemption() {
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(masm_->isolate());
__ li(a0, Operand(stack_limit));
- __ ld(a0, MemOperand(a0));
+ __ Ld(a0, MemOperand(a0));
SafeCall(&check_preempt_label_, ls, sp, Operand(a0));
}
@@ -1302,7 +1299,7 @@ void RegExpMacroAssemblerMIPS::CheckStackLimit() {
ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
__ li(a0, Operand(stack_limit));
- __ ld(a0, MemOperand(a0));
+ __ Ld(a0, MemOperand(a0));
SafeCall(&stack_overflow_label_, ls, backtrack_stackpointer(), Operand(a0));
}
@@ -1320,10 +1317,10 @@ void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
DCHECK(characters == 1);
__ Daddu(t1, end_of_input_address(), Operand(offset));
if (mode_ == LATIN1) {
- __ lbu(current_character(), MemOperand(t1, 0));
+ __ Lbu(current_character(), MemOperand(t1, 0));
} else {
DCHECK(mode_ == UC16);
- __ lhu(current_character(), MemOperand(t1, 0));
+ __ Lhu(current_character(), MemOperand(t1, 0));
}
}
diff --git a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
index df2c6c554f..722ca01ab5 100644
--- a/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
+++ b/deps/v8/src/regexp/mips64/regexp-macro-assembler-mips64.h
@@ -107,9 +107,8 @@ class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
// TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp.
static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
- static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
// Stack frame header.
- static const int kStackFrameHeader = kSecondaryReturnAddress;
+ static const int kStackFrameHeader = kReturnAddress;
// Stack parameters placed by caller.
static const int kIsolate = kStackFrameHeader + kPointerSize;
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
index d6af025f2e..8f03bcdee8 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.cc
@@ -38,8 +38,7 @@ namespace internal {
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
- * - fp[44] Isolate* isolate (address of the current isolate)
- * - fp[40] secondary link/return address used by native call.
+ * - fp[40] Isolate* isolate (address of the current isolate)
* - fp[36] lr save area (currently unused)
* - fp[32] backchain (currently unused)
* --- sp when called ---
@@ -81,16 +80,13 @@ namespace internal {
* Address start,
* Address end,
* int* capture_output_array,
+ * int num_capture_registers,
* byte* stack_area_base,
- * Address secondary_return_address, // Only used by native call.
- * bool direct_call = false)
+ * bool direct_call = false,
+ * Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
* (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
* in ppc/simulator-ppc.h.
- * When calling as a non-direct call (i.e., from C++ code), the return address
- * area is overwritten with the LR register by the RegExp code. When doing a
- * direct call from generated code, the return address is placed there by
- * the calling code, as in a normal exit frame.
*/
#define __ ACCESS_MASM(masm_)
@@ -334,11 +330,11 @@ void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
__ sub(r4, r4, r25);
}
// Isolate.
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
if (unicode) {
__ li(r6, Operand::Zero());
} else // NOLINT
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
{
__ mov(r6, Operand(ExternalReference::isolate_address(isolate())));
}
diff --git a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
index 9151bf7b07..bd6da42851 100644
--- a/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
+++ b/deps/v8/src/regexp/ppc/regexp-macro-assembler-ppc.h
@@ -96,9 +96,8 @@ class RegExpMacroAssemblerPPC : public NativeRegExpMacroAssembler {
static const int kReturnAddress = kStoredRegisters + 7 * kPointerSize;
static const int kCallerFrame = kReturnAddress + kPointerSize;
// Stack parameters placed by caller.
- static const int kSecondaryReturnAddress =
+ static const int kIsolate =
kCallerFrame + kStackFrameExtraParamSlot * kPointerSize;
- static const int kIsolate = kSecondaryReturnAddress + kPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
index fbe3ebfc72..7065ecd96c 100644
--- a/deps/v8/src/regexp/regexp-ast.h
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -291,9 +291,20 @@ class RegExpAssertion final : public RegExpTree {
class RegExpCharacterClass final : public RegExpTree {
public:
- RegExpCharacterClass(ZoneList<CharacterRange>* ranges, bool is_negated)
- : set_(ranges), is_negated_(is_negated) {}
- explicit RegExpCharacterClass(uc16 type) : set_(type), is_negated_(false) {}
+ // NEGATED: The character class is negated and should match everything but
+ // the specified ranges.
+ // CONTAINS_SPLIT_SURROGATE: The character class contains part of a split
+ // surrogate and should not be unicode-desugared (crbug.com/641091).
+ enum Flag {
+ NEGATED = 1 << 0,
+ CONTAINS_SPLIT_SURROGATE = 1 << 1,
+ };
+ typedef base::Flags<Flag> Flags;
+
+ explicit RegExpCharacterClass(ZoneList<CharacterRange>* ranges,
+ Flags flags = Flags())
+ : set_(ranges), flags_(flags) {}
+ explicit RegExpCharacterClass(uc16 type) : set_(type), flags_(0) {}
void* Accept(RegExpVisitor* visitor, void* data) override;
RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
RegExpCharacterClass* AsCharacterClass() override;
@@ -322,11 +333,14 @@ class RegExpCharacterClass final : public RegExpTree {
// * : All characters, for advancing unanchored regexp
uc16 standard_type() { return set_.standard_set_type(); }
ZoneList<CharacterRange>* ranges(Zone* zone) { return set_.ranges(zone); }
- bool is_negated() { return is_negated_; }
+ bool is_negated() const { return (flags_ & NEGATED) != 0; }
+ bool contains_split_surrogate() const {
+ return (flags_ & CONTAINS_SPLIT_SURROGATE) != 0;
+ }
private:
CharacterSet set_;
- bool is_negated_;
+ const Flags flags_;
};
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index 2e3a8a2f76..681acc1325 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -9,9 +9,9 @@
#include "src/regexp/regexp-stack.h"
#include "src/simulator.h"
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
#include "unicode/uchar.h"
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
namespace v8 {
namespace internal {
@@ -41,7 +41,7 @@ int RegExpMacroAssembler::CaseInsensitiveCompareUC16(Address byte_offset1,
uc16* substring2 = reinterpret_cast<uc16*>(byte_offset2);
size_t length = byte_length >> 1;
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
if (isolate == nullptr) {
for (size_t i = 0; i < length; i++) {
uc32 c1 = substring1[i];
@@ -67,7 +67,7 @@ int RegExpMacroAssembler::CaseInsensitiveCompareUC16(Address byte_offset1,
}
return 1;
}
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
DCHECK_NOT_NULL(isolate);
for (size_t i = 0; i < length; i++) {
unibrow::uchar c1 = substring1[i];
@@ -170,15 +170,18 @@ int NativeRegExpMacroAssembler::CheckStackGuardState(
bool is_one_byte = subject_handle->IsOneByteRepresentationUnderneath();
StackLimitCheck check(isolate);
- if (check.JsHasOverflowed()) {
+ bool js_has_overflowed = check.JsHasOverflowed();
+
+ if (is_direct_call) {
+ // Direct calls from JavaScript can be interrupted in two ways:
+ // 1. A real stack overflow, in which case we let the caller throw the
+ // exception.
+ // 2. The stack guard was used to interrupt execution for another purpose,
+ // forcing the call through the runtime system.
+ return_value = js_has_overflowed ? EXCEPTION : RETRY;
+ } else if (js_has_overflowed) {
isolate->StackOverflow();
return_value = EXCEPTION;
- } else if (is_direct_call) {
- // If not real stack overflow the stack guard was used to interrupt
- // execution for another purpose. If this is a direct call from JavaScript
- // retry the RegExp forcing the call through the runtime system.
- // Currently the direct call cannot handle a GC.
- return_value = RETRY;
} else {
Object* result = isolate->stack_guard()->HandleInterrupts();
if (result->IsException(isolate)) return_value = EXCEPTION;
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 3d2261a919..20f023930f 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -12,9 +12,9 @@
#include "src/regexp/jsregexp.h"
#include "src/utils.h"
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
#include "unicode/uniset.h"
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
namespace v8 {
namespace internal {
@@ -46,13 +46,13 @@ RegExpParser::RegExpParser(FlatStringReader* in, Handle<String>* error,
Advance();
}
-inline uc32 RegExpParser::ReadNext(bool update_position, ScanMode mode) {
+template <bool update_position>
+inline uc32 RegExpParser::ReadNext() {
int position = next_pos_;
uc32 c0 = in()->Get(position);
position++;
- const bool try_combine_surrogate_pairs =
- (unicode() || mode == ScanMode::FORCE_COMBINE_SURROGATE_PAIRS);
- if (try_combine_surrogate_pairs && position < in()->length() &&
+ // Read the whole surrogate pair in case of unicode flag, if possible.
+ if (unicode() && position < in()->length() &&
unibrow::Utf16::IsLeadSurrogate(static_cast<uc16>(c0))) {
uc16 c1 = in()->Get(position);
if (unibrow::Utf16::IsTrailSurrogate(c1)) {
@@ -67,13 +67,13 @@ inline uc32 RegExpParser::ReadNext(bool update_position, ScanMode mode) {
uc32 RegExpParser::Next() {
if (has_next()) {
- return ReadNext(false, ScanMode::DEFAULT);
+ return ReadNext<false>();
} else {
return kEndMarker;
}
}
-void RegExpParser::Advance(ScanMode mode) {
+void RegExpParser::Advance() {
if (has_next()) {
StackLimitCheck check(isolate());
if (check.HasOverflowed()) {
@@ -83,7 +83,7 @@ void RegExpParser::Advance(ScanMode mode) {
} else if (zone()->excess_allocation()) {
ReportError(CStrVector("Regular expression too large"));
} else {
- current_ = ReadNext(true, mode);
+ current_ = ReadNext<true>();
}
} else {
current_ = kEndMarker;
@@ -101,9 +101,9 @@ void RegExpParser::Reset(int pos) {
Advance();
}
-void RegExpParser::Advance(int dist, ScanMode mode) {
+void RegExpParser::Advance(int dist) {
next_pos_ += dist - 1;
- Advance(mode);
+ Advance();
}
@@ -283,8 +283,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
CharacterRange::AddClassEscape('.', ranges, false, zone());
}
- RegExpCharacterClass* cc =
- new (zone()) RegExpCharacterClass(ranges, false);
+ RegExpCharacterClass* cc = new (zone()) RegExpCharacterClass(ranges);
builder->AddCharacterClass(cc);
break;
}
@@ -327,6 +326,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
if (FLAG_harmony_regexp_named_captures) {
has_named_captures_ = true;
is_named_capture = true;
+ Advance();
break;
}
// Fall through.
@@ -392,7 +392,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
CharacterRange::AddClassEscape(c, ranges,
unicode() && ignore_case(), zone());
RegExpCharacterClass* cc =
- new (zone()) RegExpCharacterClass(ranges, false);
+ new (zone()) RegExpCharacterClass(ranges);
builder->AddCharacterClass(cc);
break;
}
@@ -408,7 +408,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
return ReportError(CStrVector("Invalid property name"));
}
RegExpCharacterClass* cc =
- new (zone()) RegExpCharacterClass(ranges, false);
+ new (zone()) RegExpCharacterClass(ranges);
builder->AddCharacterClass(cc);
} else {
// With /u, no identity escapes except for syntax characters
@@ -762,24 +762,18 @@ static void push_code_unit(ZoneVector<uc16>* v, uint32_t code_unit) {
const ZoneVector<uc16>* RegExpParser::ParseCaptureGroupName() {
DCHECK(FLAG_harmony_regexp_named_captures);
- DCHECK_EQ(current(), '<');
ZoneVector<uc16>* name =
new (zone()->New(sizeof(ZoneVector<uc16>))) ZoneVector<uc16>(zone());
- // Capture names can always contain surrogate pairs, and we need to scan
- // accordingly.
- const ScanMode scan_mode = ScanMode::FORCE_COMBINE_SURROGATE_PAIRS;
- Advance(scan_mode);
-
bool at_start = true;
while (true) {
uc32 c = current();
- Advance(scan_mode);
+ Advance();
// Convert unicode escapes.
if (c == '\\' && current() == 'u') {
- Advance(scan_mode);
+ Advance();
if (!ParseUnicodeEscape(&c)) {
ReportError(CStrVector("Invalid Unicode escape sequence"));
return nullptr;
@@ -850,6 +844,7 @@ bool RegExpParser::ParseNamedBackReference(RegExpBuilder* builder,
return false;
}
+ Advance();
const ZoneVector<uc16>* name = ParseCaptureGroupName();
if (name == nullptr) {
return false;
@@ -1110,7 +1105,7 @@ bool RegExpParser::ParseUnicodeEscape(uc32* value) {
return result;
}
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
namespace {
@@ -1203,6 +1198,70 @@ bool LookupSpecialPropertyValueName(const char* name,
return true;
}
+// Explicitly whitelist supported binary properties. The spec forbids supporting
+// properties outside of this set to ensure interoperability.
+bool IsSupportedBinaryProperty(UProperty property) {
+ switch (property) {
+ case UCHAR_ALPHABETIC:
+ // 'Any' is not supported by ICU. See LookupSpecialPropertyValueName.
+ // 'ASCII' is not supported by ICU. See LookupSpecialPropertyValueName.
+ case UCHAR_ASCII_HEX_DIGIT:
+ // 'Assigned' is not supported by ICU. See LookupSpecialPropertyValueName.
+ case UCHAR_BIDI_CONTROL:
+ case UCHAR_BIDI_MIRRORED:
+ case UCHAR_CASE_IGNORABLE:
+ case UCHAR_CASED:
+ case UCHAR_CHANGES_WHEN_CASEFOLDED:
+ case UCHAR_CHANGES_WHEN_CASEMAPPED:
+ case UCHAR_CHANGES_WHEN_LOWERCASED:
+ case UCHAR_CHANGES_WHEN_NFKC_CASEFOLDED:
+ case UCHAR_CHANGES_WHEN_TITLECASED:
+ case UCHAR_CHANGES_WHEN_UPPERCASED:
+ case UCHAR_DASH:
+ case UCHAR_DEFAULT_IGNORABLE_CODE_POINT:
+ case UCHAR_DEPRECATED:
+ case UCHAR_DIACRITIC:
+ case UCHAR_EMOJI:
+ // TODO(yangguo): Uncomment this once we upgrade to ICU 60.
+ // See https://ssl.icu-project.org/trac/ticket/13062
+ // case UCHAR_EMOJI_COMPONENT:
+ case UCHAR_EMOJI_MODIFIER_BASE:
+ case UCHAR_EMOJI_MODIFIER:
+ case UCHAR_EMOJI_PRESENTATION:
+ case UCHAR_EXTENDER:
+ case UCHAR_GRAPHEME_BASE:
+ case UCHAR_GRAPHEME_EXTEND:
+ case UCHAR_HEX_DIGIT:
+ case UCHAR_ID_CONTINUE:
+ case UCHAR_ID_START:
+ case UCHAR_IDEOGRAPHIC:
+ case UCHAR_IDS_BINARY_OPERATOR:
+ case UCHAR_IDS_TRINARY_OPERATOR:
+ case UCHAR_JOIN_CONTROL:
+ case UCHAR_LOGICAL_ORDER_EXCEPTION:
+ case UCHAR_LOWERCASE:
+ case UCHAR_MATH:
+ case UCHAR_NONCHARACTER_CODE_POINT:
+ case UCHAR_PATTERN_SYNTAX:
+ case UCHAR_PATTERN_WHITE_SPACE:
+ case UCHAR_QUOTATION_MARK:
+ case UCHAR_RADICAL:
+ case UCHAR_S_TERM:
+ case UCHAR_SOFT_DOTTED:
+ case UCHAR_TERMINAL_PUNCTUATION:
+ case UCHAR_UNIFIED_IDEOGRAPH:
+ case UCHAR_UPPERCASE:
+ case UCHAR_VARIATION_SELECTOR:
+ case UCHAR_WHITE_SPACE:
+ case UCHAR_XID_CONTINUE:
+ case UCHAR_XID_START:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
} // anonymous namespace
bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result,
@@ -1249,8 +1308,7 @@ bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result,
}
// Then attempt to interpret as binary property name with value name 'Y'.
UProperty property = u_getPropertyEnum(name);
- if (property < UCHAR_BINARY_START) return false;
- if (property >= UCHAR_BINARY_LIMIT) return false;
+ if (!IsSupportedBinaryProperty(property)) return false;
if (!IsExactPropertyAlias(name, property)) return false;
return LookupPropertyValueName(property, negate ? "N" : "Y", false, result,
zone());
@@ -1273,14 +1331,14 @@ bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result,
}
}
-#else // V8_I18N_SUPPORT
+#else // V8_INTL_SUPPORT
bool RegExpParser::ParsePropertyClass(ZoneList<CharacterRange>* result,
bool negate) {
return false;
}
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value, uc32* value) {
uc32 x = 0;
@@ -1548,7 +1606,9 @@ RegExpTree* RegExpParser::ParseCharacterClass() {
ranges->Add(CharacterRange::Everything(), zone());
is_negated = !is_negated;
}
- return new (zone()) RegExpCharacterClass(ranges, is_negated);
+ RegExpCharacterClass::Flags flags;
+ if (is_negated) flags = RegExpCharacterClass::NEGATED;
+ return new (zone()) RegExpCharacterClass(ranges, flags);
}
@@ -1722,7 +1782,7 @@ void RegExpBuilder::AddCharacterClass(RegExpCharacterClass* cc) {
void RegExpBuilder::AddCharacterClassForDesugaring(uc32 c) {
AddTerm(new (zone()) RegExpCharacterClass(
- CharacterRange::List(zone(), CharacterRange::Singleton(c)), false));
+ CharacterRange::List(zone(), CharacterRange::Singleton(c))));
}
@@ -1803,7 +1863,7 @@ bool RegExpBuilder::NeedsDesugaringForUnicode(RegExpCharacterClass* cc) {
bool RegExpBuilder::NeedsDesugaringForIgnoreCase(uc32 c) {
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
if (unicode() && ignore_case()) {
icu::UnicodeSet set(c, c);
set.closeOver(USET_CASE_INSENSITIVE);
@@ -1812,7 +1872,7 @@ bool RegExpBuilder::NeedsDesugaringForIgnoreCase(uc32 c) {
}
// In the case where ICU is not included, we act as if the unicode flag is
// not set, and do not desugar.
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
return false;
}
diff --git a/deps/v8/src/regexp/regexp-parser.h b/deps/v8/src/regexp/regexp-parser.h
index b34932fa00..a3ef22d8b7 100644
--- a/deps/v8/src/regexp/regexp-parser.h
+++ b/deps/v8/src/regexp/regexp-parser.h
@@ -184,18 +184,11 @@ class RegExpParser BASE_EMBEDDED {
// can be reparsed.
bool ParseBackReferenceIndex(int* index_out);
- // The default behavior is to combine surrogate pairs in unicode mode and
- // don't combine them otherwise (a quantifier after a surrogate pair would
- // then apply only to the trailing surrogate). Forcing combination is required
- // when parsing capture names since they can always legally contain surrogate
- // pairs.
- enum class ScanMode { DEFAULT, FORCE_COMBINE_SURROGATE_PAIRS };
-
bool ParseClassProperty(ZoneList<CharacterRange>* result);
CharacterRange ParseClassAtom(uc16* char_class);
RegExpTree* ReportError(Vector<const char> message);
- void Advance(ScanMode mode = ScanMode::DEFAULT);
- void Advance(int dist, ScanMode mode = ScanMode::DEFAULT);
+ void Advance();
+ void Advance(int dist);
void Reset(int pos);
// Reports whether the pattern might be used as a literal search string.
@@ -311,7 +304,8 @@ class RegExpParser BASE_EMBEDDED {
bool has_more() { return has_more_; }
bool has_next() { return next_pos_ < in()->length(); }
uc32 Next();
- uc32 ReadNext(bool update_position, ScanMode mode);
+ template <bool update_position>
+ uc32 ReadNext();
FlatStringReader* in() { return in_; }
void ScanForCaptures();
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
index d927a110b9..e2fe913b36 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.cc
@@ -39,8 +39,7 @@ namespace internal {
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
- * - fp[112] Isolate* isolate (address of the current isolate)
- * - fp[108] secondary link/return address used by native call.
+ * - fp[108] Isolate* isolate (address of the current isolate)
* - fp[104] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
* - fp[100] stack_area_base (high end of the memory area to use as
@@ -83,16 +82,13 @@ namespace internal {
* Address start,
* Address end,
* int* capture_output_array,
+ * int num_capture_registers,
* byte* stack_area_base,
- * Address secondary_return_address, // Only used by native call.
- * bool direct_call = false)
+ * bool direct_call = false,
+ * Isolate* isolate);
* The call is performed by NativeRegExpMacroAssembler::Execute()
* (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
* in s390/simulator-s390.h.
- * When calling as a non-direct call (i.e., from C++ code), the return address
- * area is overwritten with the LR register by the RegExp code. When doing a
- * direct call from generated code, the return address is placed there by
- * the calling code, as in a normal exit frame.
*/
#define __ ACCESS_MASM(masm_)
@@ -324,11 +320,11 @@ void RegExpMacroAssemblerS390::CheckNotBackReferenceIgnoreCase(
__ SubP(r3, r3, r6);
}
// Isolate.
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
if (unicode) {
__ LoadImmP(r5, Operand::Zero());
} else // NOLINT
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
{
__ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
}
diff --git a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
index 755bc89066..b8a3bed5f1 100644
--- a/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
+++ b/deps/v8/src/regexp/s390/regexp-macro-assembler-s390.h
@@ -97,8 +97,7 @@ class RegExpMacroAssemblerS390 : public NativeRegExpMacroAssembler {
static const int kCaptureArraySize = kCallerFrame;
static const int kStackAreaBase = kCallerFrame + kPointerSize;
// kDirectCall again
- static const int kSecondaryReturnAddress = kStackAreaBase + 2 * kPointerSize;
- static const int kIsolate = kSecondaryReturnAddress + kPointerSize;
+ static const int kIsolate = kStackAreaBase + 2 * kPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
diff --git a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
index 54dc3415e8..8c51233e29 100644
--- a/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/regexp/x64/regexp-macro-assembler-x64.cc
@@ -85,9 +85,10 @@ namespace internal {
* Address start,
* Address end,
* int* capture_output_array,
- * bool at_start,
+ * int num_capture_registers,
* byte* stack_area_base,
- * bool direct_call)
+ * bool direct_call = false,
+ * Isolate* isolate);
*/
#define __ ACCESS_MASM((&masm_))
@@ -337,11 +338,11 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase(
// Set byte_length.
__ movp(arg_reg_3, rbx);
// Isolate.
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
if (unicode) {
__ movp(arg_reg_4, Immediate(0));
} else // NOLINT
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
{
__ LoadAddress(arg_reg_4, ExternalReference::isolate_address(isolate()));
}
diff --git a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
index 4a1c3a889a..622a36e021 100644
--- a/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
+++ b/deps/v8/src/regexp/x87/regexp-macro-assembler-x87.cc
@@ -69,9 +69,10 @@ namespace internal {
* Address start,
* Address end,
* int* capture_output_array,
- * bool at_start,
+ * int num_capture_registers,
* byte* stack_area_base,
- * bool direct_call)
+ * bool direct_call = false,
+ * Isolate* isolate);
*/
#define __ ACCESS_MASM(masm_)
@@ -297,11 +298,11 @@ void RegExpMacroAssemblerX87::CheckNotBackReferenceIgnoreCase(
// Isolate* isolate or 0 if unicode flag.
// Set isolate.
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
if (unicode) {
__ mov(Operand(esp, 3 * kPointerSize), Immediate(0));
} else // NOLINT
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
{
__ mov(Operand(esp, 3 * kPointerSize),
Immediate(ExternalReference::isolate_address(isolate())));
diff --git a/deps/v8/src/register-configuration.cc b/deps/v8/src/register-configuration.cc
index 28d0ab2cc5..af35fd3b03 100644
--- a/deps/v8/src/register-configuration.cc
+++ b/deps/v8/src/register-configuration.cc
@@ -22,9 +22,13 @@ static const int kAllocatableGeneralCodes[] = {
ALLOCATABLE_GENERAL_REGISTERS(REGISTER_CODE)};
#undef REGISTER_CODE
-static const int kAllocatableDoubleCodes[] = {
#define REGISTER_CODE(R) DoubleRegister::kCode_##R,
+static const int kAllocatableDoubleCodes[] = {
ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_CODE)};
+#if V8_TARGET_ARCH_ARM
+static const int kAllocatableNoVFP32DoubleCodes[] = {
+ ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_CODE)};
+#endif // V8_TARGET_ARCH_ARM
#undef REGISTER_CODE
static const char* const kGeneralRegisterNames[] = {
@@ -77,9 +81,7 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_ARM
- FLAG_enable_embedded_constant_pool
- ? (kMaxAllocatableGeneralRegisterCount - 1)
- : kMaxAllocatableGeneralRegisterCount,
+ kMaxAllocatableGeneralRegisterCount,
CpuFeatures::IsSupported(VFP32DREGS)
? kMaxAllocatableDoubleRegisterCount
: (ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_COUNT) 0),
@@ -101,7 +103,14 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
#else
#error Unsupported target architecture.
#endif
- kAllocatableGeneralCodes, kAllocatableDoubleCodes,
+ kAllocatableGeneralCodes,
+#if V8_TARGET_ARCH_ARM
+ CpuFeatures::IsSupported(VFP32DREGS)
+ ? kAllocatableDoubleCodes
+ : kAllocatableNoVFP32DoubleCodes,
+#else
+ kAllocatableDoubleCodes,
+#endif
kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE,
kGeneralRegisterNames, kFloatRegisterNames, kDoubleRegisterNames,
kSimd128RegisterNames) {
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index d09e69ae61..56ab1b89e4 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -57,7 +57,7 @@ static const int kMaxSizeEarlyOptIgnition =
// We aren't using the code size multiplier here because there is no
// "kMaxSizeOpt" with which we would need to normalize. This constant is
// only for optimization decisions coming into TurboFan from Ignition.
-static const int kMaxSizeOptIgnition = 250 * 1024;
+static const int kMaxSizeOptIgnition = 80 * KB;
#define OPTIMIZATION_REASON_LIST(V) \
V(DoNotOptimize, "do not optimize") \
@@ -432,7 +432,7 @@ OptimizationReason RuntimeProfiler::ShouldOptimizeIgnition(
void RuntimeProfiler::MarkCandidatesForOptimization() {
HandleScope scope(isolate_);
- if (!isolate_->use_crankshaft()) return;
+ if (!isolate_->use_optimizer()) return;
DisallowHeapAllocation no_gc;
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index 97432b6ef1..781065a371 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -52,14 +52,14 @@ RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) {
Handle<JSObject> holder =
isolate->factory()->NewJSObject(isolate->object_function());
- InstallBuiltin(isolate, holder, "pop", Builtins::kArrayPop);
+ InstallBuiltin(isolate, holder, "pop", Builtins::kFastArrayPop);
InstallBuiltin(isolate, holder, "push", Builtins::kFastArrayPush);
- InstallBuiltin(isolate, holder, "shift", Builtins::kArrayShift);
+ InstallBuiltin(isolate, holder, "shift", Builtins::kFastArrayShift);
InstallBuiltin(isolate, holder, "unshift", Builtins::kArrayUnshift);
InstallBuiltin(isolate, holder, "slice", Builtins::kArraySlice);
InstallBuiltin(isolate, holder, "splice", Builtins::kArraySplice);
- InstallBuiltin(isolate, holder, "includes", Builtins::kArrayIncludes, 2);
- InstallBuiltin(isolate, holder, "indexOf", Builtins::kArrayIndexOf, 2);
+ InstallBuiltin(isolate, holder, "includes", Builtins::kArrayIncludes);
+ InstallBuiltin(isolate, holder, "indexOf", Builtins::kArrayIndexOf);
InstallBuiltin(isolate, holder, "keys", Builtins::kArrayPrototypeKeys, 0,
kArrayKeys);
InstallBuiltin(isolate, holder, "values", Builtins::kArrayPrototypeValues, 0,
@@ -142,14 +142,14 @@ RUNTIME_FUNCTION(Runtime_MoveArrayContents) {
// How many elements does this object/array have?
RUNTIME_FUNCTION(Runtime_EstimateNumberOfElements) {
+ DisallowHeapAllocation no_gc;
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
- Handle<FixedArrayBase> elements(array->elements(), isolate);
+ CONVERT_ARG_CHECKED(JSArray, array, 0);
+ FixedArrayBase* elements = array->elements();
SealHandleScope shs(isolate);
if (elements->IsDictionary()) {
- int result =
- Handle<SeededNumberDictionary>::cast(elements)->NumberOfElements();
+ int result = SeededNumberDictionary::cast(elements)->NumberOfElements();
return Smi::FromInt(result);
} else {
DCHECK(array->length()->IsSmi());
@@ -531,16 +531,10 @@ RUNTIME_FUNCTION(Runtime_ArrayIndexOf) {
CONVERT_ARG_HANDLE_CHECKED(Object, from_index, 2);
// Let O be ? ToObject(this value).
- Handle<Object> receiver_obj = args.at(0);
- if (receiver_obj->IsNullOrUndefined(isolate)) {
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kCalledOnNullOrUndefined,
- isolate->factory()->NewStringFromAsciiChecked(
- "Array.prototype.indexOf")));
- }
Handle<JSReceiver> object;
- ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, object,
- Object::ToObject(isolate, args.at(0)));
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, object,
+ Object::ToObject(isolate, args.at(0), "Array.prototype.indexOf"));
// Let len be ? ToLength(? Get(O, "length")).
int64_t len;
@@ -574,7 +568,13 @@ RUNTIME_FUNCTION(Runtime_ArrayIndexOf) {
Object::ToInteger(isolate, from_index));
double fp = from_index->Number();
if (fp > len) return Smi::FromInt(-1);
- start_from = static_cast<int64_t>(fp);
+ if (V8_LIKELY(fp >=
+ static_cast<double>(std::numeric_limits<int64_t>::min()))) {
+ DCHECK(fp < std::numeric_limits<int64_t>::max());
+ start_from = static_cast<int64_t>(fp);
+ } else {
+ start_from = std::numeric_limits<int64_t>::min();
+ }
}
int64_t index;
@@ -661,7 +661,7 @@ RUNTIME_FUNCTION(Runtime_SpreadIterableFixed) {
Handle<FixedArray> result = isolate->factory()->NewFixedArray(spread_length);
ElementsAccessor* accessor = spread_array->GetElementsAccessor();
for (uint32_t i = 0; i < spread_length; i++) {
- DCHECK(accessor->HasElement(spread_array, i));
+ DCHECK(accessor->HasElement(*spread_array, i));
Handle<Object> element = accessor->Get(spread_array, i);
result->set(i, *element);
}
diff --git a/deps/v8/src/runtime/runtime-classes.cc b/deps/v8/src/runtime/runtime-classes.cc
index 7dadca5026..feb0120045 100644
--- a/deps/v8/src/runtime/runtime-classes.cc
+++ b/deps/v8/src/runtime/runtime-classes.cc
@@ -149,15 +149,6 @@ static MaybeHandle<Object> DefineClass(Isolate* isolate,
map->SetConstructor(*constructor);
Handle<JSObject> prototype = isolate->factory()->NewJSObjectFromMap(map);
- if (!super_class->IsTheHole(isolate)) {
- // Derived classes, just like builtins, don't create implicit receivers in
- // [[construct]]. Instead they just set up new.target and call into the
- // constructor. Hence we can reuse the builtins construct stub for derived
- // classes.
- Handle<Code> stub(isolate->builtins()->JSBuiltinsConstructStubForDerived());
- constructor->shared()->SetConstructStub(*stub);
- }
-
JSFunction::SetPrototype(constructor, prototype);
PropertyAttributes attribs =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index 214ce1c4e6..0e311517e9 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -325,5 +325,48 @@ RUNTIME_FUNCTION(Runtime_GetWeakSetValues) {
CHECK(max_values >= 0);
return *JSWeakCollection::GetEntries(holder, max_values);
}
+
+RUNTIME_FUNCTION(Runtime_IsJSMap) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Object, obj, 0);
+ return isolate->heap()->ToBoolean(obj->IsJSMap());
+}
+
+RUNTIME_FUNCTION(Runtime_IsJSSet) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Object, obj, 0);
+ return isolate->heap()->ToBoolean(obj->IsJSSet());
+}
+
+RUNTIME_FUNCTION(Runtime_IsJSMapIterator) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Object, obj, 0);
+ return isolate->heap()->ToBoolean(obj->IsJSMapIterator());
+}
+
+RUNTIME_FUNCTION(Runtime_IsJSSetIterator) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Object, obj, 0);
+ return isolate->heap()->ToBoolean(obj->IsJSSetIterator());
+}
+
+RUNTIME_FUNCTION(Runtime_IsJSWeakMap) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Object, obj, 0);
+ return isolate->heap()->ToBoolean(obj->IsJSWeakMap());
+}
+
+RUNTIME_FUNCTION(Runtime_IsJSWeakSet) {
+ SealHandleScope shs(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_CHECKED(Object, obj, 0);
+ return isolate->heap()->ToBoolean(obj->IsJSWeakSet());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index b7151f83c6..7b73967acc 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -68,6 +68,17 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized_NotConcurrent) {
return function->code();
}
+RUNTIME_FUNCTION(Runtime_EvictOptimizedCodeSlot) {
+ SealHandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+
+ DCHECK(function->is_compiled());
+ function->feedback_vector()->EvictOptimizedCodeMarkedForDeoptimization(
+ function->shared(), "Runtime_EvictOptimizedCodeSlot");
+ return function->code();
+}
+
RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
HandleScope scope(isolate);
DCHECK_EQ(args.length(), 4);
@@ -85,12 +96,11 @@ RUNTIME_FUNCTION(Runtime_InstantiateAsmJs) {
if (args[3]->IsJSArrayBuffer()) {
memory = args.at<JSArrayBuffer>(3);
}
- if (function->shared()->HasAsmWasmData() &&
- AsmJs::IsStdlibValid(isolate, handle(function->shared()->asm_wasm_data()),
- stdlib)) {
- MaybeHandle<Object> result;
- result = AsmJs::InstantiateAsmWasm(
- isolate, handle(function->shared()->asm_wasm_data()), memory, foreign);
+ if (function->shared()->HasAsmWasmData()) {
+ Handle<SharedFunctionInfo> shared(function->shared());
+ Handle<FixedArray> data(shared->asm_wasm_data());
+ MaybeHandle<Object> result = AsmJs::InstantiateAsmWasm(
+ isolate, shared, data, stdlib, foreign, memory);
if (!result.is_null()) {
return *result.ToHandleChecked();
}
@@ -205,8 +215,15 @@ RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
// Evict optimized code for this function from the cache so that it
// doesn't get used for new closures.
- function->shared()->EvictFromOptimizedCodeMap(*optimized_code,
- "notify deoptimized");
+ if (function->feedback_vector()->optimized_code() == *optimized_code) {
+ function->ClearOptimizedCodeSlot("notify deoptimized");
+ }
+ // Remove the code from the osr optimized code cache.
+ DeoptimizationInputData* deopt_data =
+ DeoptimizationInputData::cast(optimized_code->deoptimization_data());
+ if (deopt_data->OsrAstId()->value() == BailoutId::None().ToInt()) {
+ isolate->EvictOSROptimizedCode(*optimized_code, "notify deoptimized");
+ }
} else {
// TODO(titzer): we should probably do DeoptimizeCodeList(code)
// unconditionally if the code is not already marked for deoptimization.
diff --git a/deps/v8/src/runtime/runtime-debug.cc b/deps/v8/src/runtime/runtime-debug.cc
index a13d3f95cc..b65757b2de 100644
--- a/deps/v8/src/runtime/runtime-debug.cc
+++ b/deps/v8/src/runtime/runtime-debug.cc
@@ -551,7 +551,7 @@ RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
// bit 0: invoked in the debugger context.
// bit 1: optimized frame.
// bit 2: inlined in optimized frame
- int flags = 0;
+ int flags = inlined_frame_index << 2;
if (*save->context() == *isolate->debug()->debug_context()) {
flags |= 1 << 0;
}
@@ -830,7 +830,7 @@ RUNTIME_FUNCTION(Runtime_GetAllScopesDetails) {
CHECK(isolate->debug()->CheckExecutionState(break_id));
CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
- CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
+ CONVERT_NUMBER_CHECKED(int, inlined_frame_index, Int32, args[2]);
ScopeIterator::Option option = ScopeIterator::DEFAULT;
if (args.length() == 4) {
@@ -842,9 +842,19 @@ RUNTIME_FUNCTION(Runtime_GetAllScopesDetails) {
StackFrame::Id id = DebugFrameHelper::UnwrapFrameId(wrapped_id);
StackTraceFrameIterator frame_it(isolate, id);
StandardFrame* frame = frame_it.frame();
- FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
- List<Handle<JSObject> > result(4);
+ // Handle wasm frames specially. They provide exactly two scopes (global /
+ // local).
+ if (frame->is_wasm_interpreter_entry()) {
+ Handle<WasmDebugInfo> debug_info(
+ WasmInterpreterEntryFrame::cast(frame)->wasm_instance()->debug_info(),
+ isolate);
+ return *WasmDebugInfo::GetScopeDetails(debug_info, frame->fp(),
+ inlined_frame_index);
+ }
+
+ FrameInspector frame_inspector(frame, inlined_frame_index, isolate);
+ List<Handle<JSObject>> result(4);
ScopeIterator it(isolate, &frame_inspector, option);
for (; !it.Done(); it.Next()) {
Handle<JSObject> details;
diff --git a/deps/v8/src/runtime/runtime-function.cc b/deps/v8/src/runtime/runtime-function.cc
index ac8a430761..c7100d1bf5 100644
--- a/deps/v8/src/runtime/runtime-function.cc
+++ b/deps/v8/src/runtime/runtime-function.cc
@@ -111,8 +111,7 @@ RUNTIME_FUNCTION(Runtime_FunctionGetContextData) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSFunction, fun, 0);
- FixedArray* array = fun->native_context()->embedder_data();
- return array->get(v8::Context::kDebugIdIndex);
+ return fun->native_context()->debug_context_id();
}
RUNTIME_FUNCTION(Runtime_FunctionSetInstanceClassName) {
@@ -145,8 +144,7 @@ RUNTIME_FUNCTION(Runtime_FunctionSetPrototype) {
CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
CHECK(fun->IsConstructor());
- RETURN_FAILURE_ON_EXCEPTION(isolate,
- Accessors::FunctionSetPrototype(fun, value));
+ JSFunction::SetPrototype(fun, value);
return args[0]; // return TOS
}
@@ -189,7 +187,7 @@ RUNTIME_FUNCTION(Runtime_SetCode) {
}
target_shared->set_scope_info(source_shared->scope_info());
target_shared->set_outer_scope_info(source_shared->outer_scope_info());
- target_shared->set_length(source_shared->length());
+ target_shared->set_length(source_shared->GetLength());
target_shared->set_feedback_metadata(source_shared->feedback_metadata());
target_shared->set_internal_formal_parameter_count(
source_shared->internal_formal_parameter_count());
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index 0c8fe6db01..74b1fe90d2 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -66,6 +66,10 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetContext) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+ // Runtime call is implemented in InterpreterIntrinsics and lowered in
+ // JSIntrinsicLowering
+ UNREACHABLE();
+
return generator->context();
}
@@ -74,6 +78,10 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetInputOrDebugPos) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+ // Runtime call is implemented in InterpreterIntrinsics and lowered in
+ // JSIntrinsicLowering
+ UNREACHABLE();
+
return generator->input_or_debug_pos();
}
@@ -81,7 +89,6 @@ RUNTIME_FUNCTION(Runtime_AsyncGeneratorGetAwaitInputOrDebugPos) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSAsyncGeneratorObject, generator, 0);
-
return generator->await_input_or_debug_pos();
}
@@ -112,6 +119,10 @@ RUNTIME_FUNCTION(Runtime_GeneratorGetResumeMode) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+ // Runtime call is implemented in InterpreterIntrinsics and lowered in
+ // JSIntrinsicLowering
+ UNREACHABLE();
+
return Smi::FromInt(generator->resume_mode());
}
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index 8c566c081d..7348d5f007 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -288,14 +288,6 @@ RUNTIME_FUNCTION(Runtime_ThrowNotConstructor) {
isolate, NewTypeError(MessageTemplate::kNotConstructor, object));
}
-RUNTIME_FUNCTION(Runtime_ThrowNotGeneric) {
- HandleScope scope(isolate);
- DCHECK_EQ(1, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, arg0, 0);
- THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kNotGeneric, arg0));
-}
-
RUNTIME_FUNCTION(Runtime_ThrowGeneratorRunning) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
@@ -455,11 +447,18 @@ RUNTIME_FUNCTION(Runtime_ThrowConstructedNonConstructable) {
isolate, NewTypeError(MessageTemplate::kNotConstructor, callsite));
}
-RUNTIME_FUNCTION(Runtime_ThrowDerivedConstructorReturnedNonObject) {
+RUNTIME_FUNCTION(Runtime_ThrowConstructorReturnedNonObject) {
HandleScope scope(isolate);
DCHECK_EQ(0, args.length());
+ if (FLAG_harmony_restrict_constructor_return) {
+ THROW_NEW_ERROR_RETURN_FAILURE(
+ isolate,
+ NewTypeError(MessageTemplate::kClassConstructorReturnedNonObject));
+ }
+
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kDerivedConstructorReturn));
+ isolate,
+ NewTypeError(MessageTemplate::kDerivedConstructorReturnedNonObject));
}
RUNTIME_FUNCTION(Runtime_ThrowUndefinedOrNullToObject) {
diff --git a/deps/v8/src/runtime/runtime-interpreter.cc b/deps/v8/src/runtime/runtime-interpreter.cc
index 9f3897bf64..5889a477c3 100644
--- a/deps/v8/src/runtime/runtime-interpreter.cc
+++ b/deps/v8/src/runtime/runtime-interpreter.cc
@@ -34,6 +34,8 @@ RUNTIME_FUNCTION(Runtime_InterpreterNewClosure) {
static_cast<PretenureFlag>(pretenured_flag));
}
+#ifdef V8_TRACE_IGNITION
+
namespace {
void AdvanceToOffsetForTracing(
@@ -109,17 +111,22 @@ void PrintRegisters(std::ostream& os, bool is_input,
} // namespace
RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeEntry) {
+ if (!FLAG_trace_ignition) {
+ return isolate->heap()->undefined_value();
+ }
+
SealHandleScope shs(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(BytecodeArray, bytecode_array, 0);
CONVERT_SMI_ARG_CHECKED(bytecode_offset, 1);
CONVERT_ARG_HANDLE_CHECKED(Object, accumulator, 2);
- OFStream os(stdout);
int offset = bytecode_offset - BytecodeArray::kHeaderSize + kHeapObjectTag;
interpreter::BytecodeArrayIterator bytecode_iterator(bytecode_array);
AdvanceToOffsetForTracing(bytecode_iterator, offset);
if (offset == bytecode_iterator.current_offset()) {
+ OFStream os(stdout);
+
// Print bytecode.
const uint8_t* base_address = bytecode_array->GetFirstBytecodeAddress();
const uint8_t* bytecode_address = base_address + offset;
@@ -137,6 +144,10 @@ RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeEntry) {
}
RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeExit) {
+ if (!FLAG_trace_ignition) {
+ return isolate->heap()->undefined_value();
+ }
+
SealHandleScope shs(isolate);
DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(BytecodeArray, bytecode_array, 0);
@@ -160,6 +171,8 @@ RUNTIME_FUNCTION(Runtime_InterpreterTraceBytecodeExit) {
return isolate->heap()->undefined_value();
}
+#endif
+
RUNTIME_FUNCTION(Runtime_InterpreterAdvanceBytecodeOffset) {
SealHandleScope shs(isolate);
DCHECK_EQ(2, args.length());
diff --git a/deps/v8/src/runtime/runtime-i18n.cc b/deps/v8/src/runtime/runtime-intl.cc
index 80c8a9cd01..623fe05fe8 100644
--- a/deps/v8/src/runtime/runtime-i18n.cc
+++ b/deps/v8/src/runtime/runtime-intl.cc
@@ -2,8 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#ifndef V8_INTL_SUPPORT
+#error Internationalization is expected to be enabled.
+#endif // V8_INTL_SUPPORT
-#ifdef V8_I18N_SUPPORT
#include "src/runtime/runtime-utils.h"
#include <memory>
@@ -12,9 +14,10 @@
#include "src/api.h"
#include "src/arguments.h"
#include "src/factory.h"
-#include "src/i18n.h"
+#include "src/intl.h"
#include "src/isolate-inl.h"
#include "src/messages.h"
+#include "src/objects/intl-objects.h"
#include "src/utils.h"
#include "unicode/brkiter.h"
@@ -41,9 +44,12 @@
#include "unicode/uloc.h"
#include "unicode/unistr.h"
#include "unicode/unum.h"
-#include "unicode/ustring.h"
+#include "unicode/uvernum.h"
#include "unicode/uversion.h"
+#if U_ICU_VERSION_MAJOR_NUM >= 59
+#include "unicode/char16ptr.h"
+#endif
namespace v8 {
namespace internal {
@@ -85,7 +91,6 @@ RUNTIME_FUNCTION(Runtime_CanonicalizeLanguageTag) {
return *factory->NewStringFromAsciiChecked(result);
}
-
RUNTIME_FUNCTION(Runtime_AvailableLocalesOf) {
HandleScope scope(isolate);
Factory* factory = isolate->factory();
@@ -130,7 +135,6 @@ RUNTIME_FUNCTION(Runtime_AvailableLocalesOf) {
return *locales;
}
-
RUNTIME_FUNCTION(Runtime_GetDefaultICULocale) {
HandleScope scope(isolate);
Factory* factory = isolate->factory();
@@ -151,7 +155,6 @@ RUNTIME_FUNCTION(Runtime_GetDefaultICULocale) {
return *factory->NewStringFromStaticChars("und");
}
-
RUNTIME_FUNCTION(Runtime_GetLanguageTagVariants) {
HandleScope scope(isolate);
Factory* factory = isolate->factory();
@@ -236,7 +239,6 @@ RUNTIME_FUNCTION(Runtime_GetLanguageTagVariants) {
return *result;
}
-
RUNTIME_FUNCTION(Runtime_IsInitializedIntlObject) {
HandleScope scope(isolate);
@@ -252,7 +254,6 @@ RUNTIME_FUNCTION(Runtime_IsInitializedIntlObject) {
return isolate->heap()->ToBoolean(!tag->IsUndefined(isolate));
}
-
RUNTIME_FUNCTION(Runtime_IsInitializedIntlObjectOfType) {
HandleScope scope(isolate);
@@ -270,7 +271,6 @@ RUNTIME_FUNCTION(Runtime_IsInitializedIntlObjectOfType) {
String::cast(*tag)->Equals(*expected_type));
}
-
RUNTIME_FUNCTION(Runtime_MarkAsInitializedIntlObjectOfType) {
HandleScope scope(isolate);
@@ -285,7 +285,6 @@ RUNTIME_FUNCTION(Runtime_MarkAsInitializedIntlObjectOfType) {
return isolate->heap()->undefined_value();
}
-
RUNTIME_FUNCTION(Runtime_CreateDateTimeFormat) {
HandleScope scope(isolate);
@@ -318,7 +317,6 @@ RUNTIME_FUNCTION(Runtime_CreateDateTimeFormat) {
return *local_object;
}
-
RUNTIME_FUNCTION(Runtime_InternalDateFormat) {
HandleScope scope(isolate);
@@ -406,9 +404,10 @@ bool AddElement(Handle<JSArray> array, int index, int32_t field_id,
icu::UnicodeString field(formatted.tempSubStringBetween(begin, end));
ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, value, factory->NewStringFromTwoByte(Vector<const uint16_t>(
- reinterpret_cast<const uint16_t*>(field.getBuffer()),
- field.length())),
+ isolate, value,
+ factory->NewStringFromTwoByte(Vector<const uint16_t>(
+ reinterpret_cast<const uint16_t*>(field.getBuffer()),
+ field.length())),
false);
JSObject::AddProperty(element, factory->value_string(), value, NONE);
@@ -507,7 +506,6 @@ RUNTIME_FUNCTION(Runtime_CreateNumberFormat) {
return *local_object;
}
-
RUNTIME_FUNCTION(Runtime_InternalNumberFormat) {
HandleScope scope(isolate);
@@ -587,7 +585,6 @@ RUNTIME_FUNCTION(Runtime_CreateCollator) {
return *local_object;
}
-
RUNTIME_FUNCTION(Runtime_InternalCompare) {
HandleScope scope(isolate);
@@ -624,7 +621,6 @@ RUNTIME_FUNCTION(Runtime_InternalCompare) {
return *isolate->factory()->NewNumberFromInt(result);
}
-
RUNTIME_FUNCTION(Runtime_CreateBreakIterator) {
HandleScope scope(isolate);
@@ -660,7 +656,6 @@ RUNTIME_FUNCTION(Runtime_CreateBreakIterator) {
return *local_object;
}
-
RUNTIME_FUNCTION(Runtime_BreakIteratorAdoptText) {
HandleScope scope(isolate);
@@ -691,7 +686,6 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorAdoptText) {
return isolate->heap()->undefined_value();
}
-
RUNTIME_FUNCTION(Runtime_BreakIteratorFirst) {
HandleScope scope(isolate);
@@ -706,7 +700,6 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorFirst) {
return *isolate->factory()->NewNumberFromInt(break_iterator->first());
}
-
RUNTIME_FUNCTION(Runtime_BreakIteratorNext) {
HandleScope scope(isolate);
@@ -721,7 +714,6 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorNext) {
return *isolate->factory()->NewNumberFromInt(break_iterator->next());
}
-
RUNTIME_FUNCTION(Runtime_BreakIteratorCurrent) {
HandleScope scope(isolate);
@@ -736,7 +728,6 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorCurrent) {
return *isolate->factory()->NewNumberFromInt(break_iterator->current());
}
-
RUNTIME_FUNCTION(Runtime_BreakIteratorBreakType) {
HandleScope scope(isolate);
@@ -768,7 +759,7 @@ RUNTIME_FUNCTION(Runtime_BreakIteratorBreakType) {
}
}
-RUNTIME_FUNCTION(Runtime_StringToLowerCaseI18N) {
+RUNTIME_FUNCTION(Runtime_StringToLowerCaseIntl) {
HandleScope scope(isolate);
DCHECK_EQ(args.length(), 1);
CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
@@ -776,7 +767,7 @@ RUNTIME_FUNCTION(Runtime_StringToLowerCaseI18N) {
return ConvertToLower(s, isolate);
}
-RUNTIME_FUNCTION(Runtime_StringToUpperCaseI18N) {
+RUNTIME_FUNCTION(Runtime_StringToUpperCaseIntl) {
HandleScope scope(isolate);
DCHECK_EQ(args.length(), 1);
CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
@@ -798,7 +789,10 @@ RUNTIME_FUNCTION(Runtime_StringLocaleConvertCase) {
s = String::Flatten(s);
// All the languages requiring special-handling have two-letter codes.
- if (V8_UNLIKELY(lang_arg->length() > 2))
+ // Note that we have to check for '!= 2' here because private-use language
+ // tags (x-foo) or grandfathered irregular tags (e.g. i-enochian) would have
+ // only 'x' or 'i' when they get here.
+ if (V8_UNLIKELY(lang_arg->length() != 2))
return ConvertCase(s, is_upper, isolate);
char c1, c2;
@@ -843,5 +837,3 @@ RUNTIME_FUNCTION(Runtime_DateCacheVersion) {
} // namespace internal
} // namespace v8
-
-#endif // V8_I18N_SUPPORT
diff --git a/deps/v8/src/runtime/runtime-literals.cc b/deps/v8/src/runtime/runtime-literals.cc
index 7beadf5e0b..1a2b1f584e 100644
--- a/deps/v8/src/runtime/runtime-literals.cc
+++ b/deps/v8/src/runtime/runtime-literals.cc
@@ -14,65 +14,56 @@
namespace v8 {
namespace internal {
-static Handle<Map> ComputeObjectLiteralMap(
- Handle<Context> context,
- Handle<BoilerplateDescription> boilerplate_description,
- bool* is_result_from_cache) {
- int number_of_properties = boilerplate_description->backing_store_size();
- Isolate* isolate = context->GetIsolate();
- return isolate->factory()->ObjectLiteralMapFromCache(
- context, number_of_properties, is_result_from_cache);
-}
-
MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
Isolate* isolate, Handle<FeedbackVector> vector,
- Handle<BoilerplateDescription> boilerplate_description);
+ Handle<FixedArray> compile_time_value);
MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
Isolate* isolate, Handle<FeedbackVector> vector,
Handle<BoilerplateDescription> boilerplate_description,
- bool should_have_fast_elements) {
- Handle<Context> context = isolate->native_context();
+ bool use_fast_elements, bool has_null_prototype) {
+ Handle<Context> native_context = isolate->native_context();
// In case we have function literals, we want the object to be in
// slow properties mode for now. We don't go in the map cache because
// maps with constant functions can't be shared if the functions are
// not the same (which is the common case).
- bool is_result_from_cache = false;
- Handle<Map> map = ComputeObjectLiteralMap(context, boilerplate_description,
- &is_result_from_cache);
+ int number_of_properties = boilerplate_description->backing_store_size();
+
+ // Ignoring number_of_properties for force dictionary map with __proto__:null.
+ Handle<Map> map =
+ has_null_prototype
+ ? handle(native_context->slow_object_with_null_prototype_map(),
+ isolate)
+ : isolate->factory()->ObjectLiteralMapFromCache(native_context,
+ number_of_properties);
PretenureFlag pretenure_flag =
isolate->heap()->InNewSpace(*vector) ? NOT_TENURED : TENURED;
Handle<JSObject> boilerplate =
- isolate->factory()->NewJSObjectFromMap(map, pretenure_flag);
+ map->is_dictionary_map()
+ ? isolate->factory()->NewSlowJSObjectFromMap(
+ map, number_of_properties, pretenure_flag)
+ : isolate->factory()->NewJSObjectFromMap(map, pretenure_flag);
// Normalize the elements of the boilerplate to save space if needed.
- if (!should_have_fast_elements) JSObject::NormalizeElements(boilerplate);
+ if (!use_fast_elements) JSObject::NormalizeElements(boilerplate);
// Add the constant properties to the boilerplate.
int length = boilerplate_description->size();
- bool should_transform =
- !is_result_from_cache && boilerplate->HasFastProperties();
- bool should_normalize = should_transform;
- if (should_normalize) {
- // TODO(verwaest): We might not want to ever normalize here.
- JSObject::NormalizeProperties(boilerplate, KEEP_INOBJECT_PROPERTIES, length,
- "Boilerplate");
- }
// TODO(verwaest): Support tracking representations in the boilerplate.
for (int index = 0; index < length; index++) {
Handle<Object> key(boilerplate_description->name(index), isolate);
Handle<Object> value(boilerplate_description->value(index), isolate);
- if (value->IsBoilerplateDescription()) {
- // The value contains the boilerplate properties of a
- // simple object or array literal.
- Handle<BoilerplateDescription> boilerplate =
- Handle<BoilerplateDescription>::cast(value);
+ if (value->IsFixedArray()) {
+ // The value contains the CompileTimeValue with the boilerplate properties
+ // of a simple object or array literal.
+ Handle<FixedArray> compile_time_value = Handle<FixedArray>::cast(value);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, value,
- CreateLiteralBoilerplate(isolate, vector, boilerplate), Object);
+ CreateLiteralBoilerplate(isolate, vector, compile_time_value),
+ Object);
}
MaybeHandle<Object> maybe_result;
uint32_t element_index = 0;
@@ -92,11 +83,9 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
RETURN_ON_EXCEPTION(isolate, maybe_result, Object);
}
- // Transform to fast properties if necessary. For object literals with
- // containing function literals we defer this operation until after all
- // computed properties have been assigned so that we can generate
- // constant function properties.
- if (should_transform) {
+ if (map->is_dictionary_map() && !has_null_prototype) {
+ // TODO(cbruni): avoid making the boilerplate fast again, the clone stub
+ // supports dict-mode objects directly.
JSObject::MigrateSlowToFast(boilerplate,
boilerplate->map()->unused_property_fields(),
"FastLiteral");
@@ -154,15 +143,16 @@ static MaybeHandle<Object> CreateArrayLiteralBoilerplate(
copied_elements_values = fixed_array_values_copy;
FOR_WITH_HANDLE_SCOPE(
isolate, int, i = 0, i, i < fixed_array_values->length(), i++, {
- if (fixed_array_values->get(i)->IsBoilerplateDescription()) {
- // The value contains the boilerplate properties of a
- // simple object or array literal.
- Handle<BoilerplateDescription> boilerplate(
- BoilerplateDescription::cast(fixed_array_values->get(i)));
+ if (fixed_array_values->get(i)->IsFixedArray()) {
+ // The value contains the CompileTimeValue with the
+ // boilerplate description of a simple object or
+ // array literal.
+ Handle<FixedArray> compile_time_value(
+ FixedArray::cast(fixed_array_values->get(i)));
Handle<Object> result;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result,
- CreateLiteralBoilerplate(isolate, vector, boilerplate),
+ CreateLiteralBoilerplate(isolate, vector, compile_time_value),
Object);
fixed_array_values_copy->set(i, *result);
}
@@ -178,28 +168,21 @@ static MaybeHandle<Object> CreateArrayLiteralBoilerplate(
MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
Isolate* isolate, Handle<FeedbackVector> vector,
- Handle<BoilerplateDescription> array) {
- Handle<HeapObject> elements = CompileTimeValue::GetElements(array);
- switch (CompileTimeValue::GetLiteralType(array)) {
- case CompileTimeValue::OBJECT_LITERAL_FAST_ELEMENTS: {
- Handle<BoilerplateDescription> props =
- Handle<BoilerplateDescription>::cast(elements);
- return CreateObjectLiteralBoilerplate(isolate, vector, props, true);
- }
- case CompileTimeValue::OBJECT_LITERAL_SLOW_ELEMENTS: {
- Handle<BoilerplateDescription> props =
- Handle<BoilerplateDescription>::cast(elements);
- return CreateObjectLiteralBoilerplate(isolate, vector, props, false);
- }
- case CompileTimeValue::ARRAY_LITERAL: {
- Handle<ConstantElementsPair> elems =
- Handle<ConstantElementsPair>::cast(elements);
- return CreateArrayLiteralBoilerplate(isolate, vector, elems);
- }
- default:
- UNREACHABLE();
- return MaybeHandle<Object>();
+ Handle<FixedArray> compile_time_value) {
+ Handle<HeapObject> elements =
+ CompileTimeValue::GetElements(compile_time_value);
+ int flags = CompileTimeValue::GetLiteralTypeFlags(compile_time_value);
+ if (flags == CompileTimeValue::kArrayLiteralFlag) {
+ Handle<ConstantElementsPair> elems =
+ Handle<ConstantElementsPair>::cast(elements);
+ return CreateArrayLiteralBoilerplate(isolate, vector, elems);
}
+ Handle<BoilerplateDescription> props =
+ Handle<BoilerplateDescription>::cast(elements);
+ bool use_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
+ bool has_null_prototype = (flags & ObjectLiteral::kHasNullPrototype) != 0;
+ return CreateObjectLiteralBoilerplate(isolate, vector, props,
+ use_fast_elements, has_null_prototype);
}
@@ -233,8 +216,9 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
2);
CONVERT_SMI_ARG_CHECKED(flags, 3);
Handle<FeedbackVector> vector(closure->feedback_vector(), isolate);
- bool should_have_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
+ bool use_fast_elements = (flags & ObjectLiteral::kFastElements) != 0;
bool enable_mementos = (flags & ObjectLiteral::kDisableMementos) == 0;
+ bool has_null_prototype = (flags & ObjectLiteral::kHasNullPrototype) != 0;
FeedbackSlot literals_slot(FeedbackVector::ToSlot(literals_index));
CHECK(literals_slot.ToInt() < vector->slot_count());
@@ -248,7 +232,7 @@ RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, raw_boilerplate,
CreateObjectLiteralBoilerplate(isolate, vector, boilerplate_description,
- should_have_fast_elements));
+ use_fast_elements, has_null_prototype));
boilerplate = Handle<JSObject>::cast(raw_boilerplate);
AllocationSiteCreationContext creation_context(isolate);
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 4c34b0e563..eef2e6616a 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -126,11 +126,72 @@ static MaybeHandle<Object> KeyedGetObjectProperty(Isolate* isolate,
return Runtime::GetObjectProperty(isolate, receiver_obj, key_obj);
}
+namespace {
+
+bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
+ Handle<Object> raw_key) {
+ DisallowHeapAllocation no_allocation;
+ // This implements a special case for fast property deletion: when the
+ // last property in an object is deleted, then instead of normalizing
+ // the properties, we can undo the last map transition, with a few
+ // prerequisites:
+ // (1) The receiver must be a regular object and the key a unique name.
+ Map* map = receiver->map();
+ if (map->IsSpecialReceiverMap()) return false;
+ if (!raw_key->IsUniqueName()) return false;
+ Handle<Name> key = Handle<Name>::cast(raw_key);
+ // (2) The property to be deleted must be the last property.
+ int nof = map->NumberOfOwnDescriptors();
+ if (nof == 0) return false;
+ int descriptor = nof - 1;
+ DescriptorArray* descriptors = map->instance_descriptors();
+ if (descriptors->GetKey(descriptor) != *key) return false;
+ // (3) The property to be deleted must be deletable.
+ PropertyDetails details = descriptors->GetDetails(descriptor);
+ if (!details.IsConfigurable()) return false;
+ // (4) The map must have a back pointer.
+ Object* backpointer = map->GetBackPointer();
+ if (!backpointer->IsMap()) return false;
+ // (5) The last transition must have been caused by adding a property
+ // (and not any kind of special transition).
+ if (Map::cast(backpointer)->NumberOfOwnDescriptors() != nof - 1) return false;
+
+ // Preconditions successful. No more bailouts after this point.
+
+ // Zap the property to avoid keeping objects alive. Zapping is not necessary
+ // for properties stored in the descriptor array.
+ if (details.location() == kField) {
+ isolate->heap()->NotifyObjectLayoutChange(*receiver, no_allocation);
+ Object* filler = isolate->heap()->one_pointer_filler_map();
+ FieldIndex index = FieldIndex::ForPropertyIndex(map, details.field_index());
+ JSObject::cast(*receiver)->RawFastPropertyAtPut(index, filler);
+ // We must clear any recorded slot for the deleted property, because
+ // subsequent object modifications might put a raw double there.
+ // Slot clearing is the reason why this entire function cannot currently
+ // be implemented in the DeleteProperty stub.
+ if (index.is_inobject() && !map->IsUnboxedDoubleField(index)) {
+ isolate->heap()->ClearRecordedSlot(
+ *receiver, HeapObject::RawField(*receiver, index.offset()));
+ }
+ }
+ // If the map was marked stable before, then there could be optimized code
+ // that depends on the assumption that no object that reached this map
+ // transitions away from it without triggering the "deoptimize dependent
+ // code" mechanism.
+ map->NotifyLeafMapLayoutChange();
+ // Finally, perform the map rollback.
+ receiver->synchronized_set_map(Map::cast(backpointer));
+ return true;
+}
+
+} // namespace
Maybe<bool> Runtime::DeleteObjectProperty(Isolate* isolate,
Handle<JSReceiver> receiver,
Handle<Object> key,
LanguageMode language_mode) {
+ if (DeleteObjectPropertyFast(isolate, receiver, key)) return Just(true);
+
bool success = false;
LookupIterator it = LookupIterator::PropertyOrElement(
isolate, receiver, key, &success, LookupIterator::OWN);
@@ -139,6 +200,26 @@ Maybe<bool> Runtime::DeleteObjectProperty(Isolate* isolate,
return JSReceiver::DeleteProperty(&it, language_mode);
}
+// ES #sec-object.keys
+RUNTIME_FUNCTION(Runtime_ObjectKeys) {
+ HandleScope scope(isolate);
+ Handle<Object> object = args.at(0);
+
+ // Convert the {object} to a proper {receiver}.
+ Handle<JSReceiver> receiver;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, receiver,
+ Object::ToObject(isolate, object));
+
+ // Collect the own keys for the {receiver}.
+ Handle<FixedArray> keys;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, keys,
+ KeyAccumulator::GetKeys(receiver, KeyCollectionMode::kOwnOnly,
+ ENUMERABLE_STRINGS,
+ GetKeysConversion::kConvertToString));
+ return *keys;
+}
+
// ES6 19.1.3.2
RUNTIME_FUNCTION(Runtime_ObjectHasOwnProperty) {
HandleScope scope(isolate);
@@ -251,18 +332,12 @@ RUNTIME_FUNCTION(Runtime_ObjectCreate) {
Handle<Map> map =
Map::GetObjectCreateMap(Handle<HeapObject>::cast(prototype));
- bool is_dictionary_map = map->is_dictionary_map();
- Handle<FixedArray> object_properties;
- if (is_dictionary_map) {
- // Allocate the actual properties dictionay up front to avoid invalid object
- // state.
- object_properties =
- NameDictionary::New(isolate, NameDictionary::kInitialCapacity);
- }
// Actually allocate the object.
- Handle<JSObject> object = isolate->factory()->NewJSObjectFromMap(map);
- if (is_dictionary_map) {
- object->set_properties(*object_properties);
+ Handle<JSObject> object;
+ if (map->is_dictionary_map()) {
+ object = isolate->factory()->NewSlowJSObjectFromMap(map);
+ } else {
+ object = isolate->factory()->NewJSObjectFromMap(map);
}
// Define the properties if properties was specified and is not undefined.
@@ -459,25 +534,28 @@ Object* DeleteProperty(Isolate* isolate, Handle<Object> object,
} // namespace
-
-RUNTIME_FUNCTION(Runtime_DeleteProperty_Sloppy) {
+RUNTIME_FUNCTION(Runtime_DeleteProperty) {
HandleScope scope(isolate);
- DCHECK_EQ(2, args.length());
+ DCHECK_EQ(3, args.length());
CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- return DeleteProperty(isolate, object, key, SLOPPY);
+ CONVERT_SMI_ARG_CHECKED(language_mode, 2);
+ return DeleteProperty(isolate, object, key,
+ static_cast<LanguageMode>(language_mode));
}
-
-RUNTIME_FUNCTION(Runtime_DeleteProperty_Strict) {
+RUNTIME_FUNCTION(Runtime_ShrinkPropertyDictionary) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
- CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
- return DeleteProperty(isolate, object, key, STRICT);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
+ CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
+ Handle<NameDictionary> dictionary(receiver->property_dictionary(), isolate);
+ Handle<NameDictionary> new_properties =
+ NameDictionary::Shrink(dictionary, key);
+ receiver->set_properties(*new_properties);
+ return Smi::kZero;
}
-
// ES6 section 12.9.3, operator in.
RUNTIME_FUNCTION(Runtime_HasProperty) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/runtime/runtime-regexp.cc b/deps/v8/src/runtime/runtime-regexp.cc
index c9f201c11d..8803deff0f 100644
--- a/deps/v8/src/runtime/runtime-regexp.cc
+++ b/deps/v8/src/runtime/runtime-regexp.cc
@@ -1637,10 +1637,10 @@ RUNTIME_FUNCTION(Runtime_RegExpSplit) {
argv[0] = recv;
argv[1] = new_flags;
- Handle<JSFunction> ctor_fun = Handle<JSFunction>::cast(ctor);
Handle<Object> splitter_obj;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
- isolate, splitter_obj, Execution::New(ctor_fun, argc, argv.start()));
+ isolate, splitter_obj,
+ Execution::New(isolate, ctor, argc, argv.start()));
splitter = Handle<JSReceiver>::cast(splitter_obj);
}
diff --git a/deps/v8/src/runtime/runtime-strings.cc b/deps/v8/src/runtime/runtime-strings.cc
index 1ef04ed467..99fbf2d475 100644
--- a/deps/v8/src/runtime/runtime-strings.cc
+++ b/deps/v8/src/runtime/runtime-strings.cc
@@ -17,11 +17,12 @@ namespace internal {
RUNTIME_FUNCTION(Runtime_GetSubstitution) {
HandleScope scope(isolate);
- DCHECK_EQ(4, args.length());
+ DCHECK_EQ(5, args.length());
CONVERT_ARG_HANDLE_CHECKED(String, matched, 0);
CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
CONVERT_SMI_ARG_CHECKED(position, 2);
CONVERT_ARG_HANDLE_CHECKED(String, replacement, 3);
+ CONVERT_SMI_ARG_CHECKED(start_index, 4);
// A simple match without captures.
class SimpleMatch : public String::Match {
@@ -58,7 +59,8 @@ RUNTIME_FUNCTION(Runtime_GetSubstitution) {
SimpleMatch match(matched, prefix, suffix);
RETURN_RESULT_OR_FAILURE(
- isolate, String::GetSubstitution(isolate, &match, replacement));
+ isolate,
+ String::GetSubstitution(isolate, &match, replacement, start_index));
}
// This may return an empty MaybeHandle if an exception is thrown or
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 4574b5103e..6e1d09f6ad 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -111,6 +111,21 @@ bool WasmInstantiateOverride(const v8::FunctionCallbackInfo<v8::Value>& args) {
return true;
}
+bool GetWasmFromArray(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ CHECK(args.Length() == 1);
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ v8::Local<v8::Value> module =
+ v8::Local<v8::Object>::Cast(args[0])->Get(context, 0).ToLocalChecked();
+
+ v8::Local<v8::Promise::Resolver> resolver =
+ v8::Promise::Resolver::New(context).ToLocalChecked();
+ args.GetReturnValue().Set(resolver->GetPromise());
+ USE(resolver->Resolve(context, module));
+ return true;
+}
+
+bool NoExtension(const v8::FunctionCallbackInfo<v8::Value>&) { return false; }
+
} // namespace
namespace v8 {
@@ -150,6 +165,7 @@ RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
return isolate->heap()->undefined_value();
}
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
+ function->shared()->set_marked_for_tier_up(false);
// If the function is not optimized, just return.
if (!function->IsOptimized()) return isolate->heap()->undefined_value();
@@ -264,6 +280,11 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
if (function->IsOptimized()) return isolate->heap()->undefined_value();
function->MarkForOptimization();
+ if (FLAG_trace_opt) {
+ PrintF("[manually marking ");
+ function->ShortPrint();
+ PrintF(" for optimization]\n");
+ }
if (args.length() == 2) {
CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
@@ -319,7 +340,7 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
HandleScope scope(isolate);
DCHECK(args.length() == 1 || args.length() == 2);
int status = 0;
- if (!isolate->use_crankshaft()) {
+ if (!isolate->use_optimizer()) {
status |= static_cast<int>(OptimizationStatus::kNeverOptimize);
}
if (FLAG_always_opt || FLAG_prepare_always_opt) {
@@ -452,6 +473,16 @@ RUNTIME_FUNCTION(Runtime_ClearFunctionFeedback) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_SetWasmCompileFromPromiseOverload) {
+ isolate->set_wasm_compile_callback(GetWasmFromArray);
+ return isolate->heap()->undefined_value();
+}
+
+RUNTIME_FUNCTION(Runtime_ResetWasmOverloads) {
+ isolate->set_wasm_compile_callback(NoExtension);
+ return isolate->heap()->undefined_value();
+}
+
RUNTIME_FUNCTION(Runtime_CheckWasmWrapperElision) {
// This only supports the case where the function being exported
// calls an intermediate function, and the intermediate function
@@ -677,16 +708,6 @@ RUNTIME_FUNCTION(Runtime_NativeScriptsCount) {
return Smi::FromInt(Natives::GetBuiltinsCount());
}
-// TODO(5510): remove this.
-RUNTIME_FUNCTION(Runtime_GetV8Version) {
- HandleScope scope(isolate);
- DCHECK_EQ(0, args.length());
-
- const char* version_string = v8::V8::GetVersion();
-
- return *isolate->factory()->NewStringFromAsciiChecked(version_string);
-}
-
RUNTIME_FUNCTION(Runtime_DisassembleFunction) {
HandleScope scope(isolate);
@@ -998,14 +1019,5 @@ RUNTIME_FUNCTION(Runtime_RedirectToWasmInterpreter) {
return isolate->heap()->undefined_value();
}
-RUNTIME_FUNCTION(Runtime_IncrementWaitCount) {
- isolate->IncrementWaitCountForTesting();
- return isolate->heap()->undefined_value();
-}
-
-RUNTIME_FUNCTION(Runtime_DecrementWaitCount) {
- isolate->DecrementWaitCountForTesting();
- return isolate->heap()->undefined_value();
-}
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index eeaa40e5ea..aa87c921eb 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -50,6 +50,22 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
return isolate->heap()->undefined_value();
}
+RUNTIME_FUNCTION(Runtime_TypedArrayCopyElements) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(3, args.length());
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, destination, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, source, 1);
+ CONVERT_NUMBER_ARG_HANDLE_CHECKED(length_obj, 2);
+
+ size_t length;
+ CHECK(TryNumberToSize(*length_obj, &length));
+
+ Handle<JSTypedArray> destination_ta = Handle<JSTypedArray>::cast(destination);
+
+ ElementsAccessor* accessor = destination_ta->GetElementsAccessor();
+ return accessor->CopyElements(source, destination, length);
+}
+
#define BUFFER_VIEW_GETTER(Type, getter, accessor) \
RUNTIME_FUNCTION(Runtime_##Type##Get##getter) { \
HandleScope scope(isolate); \
@@ -64,6 +80,12 @@ BUFFER_VIEW_GETTER(TypedArray, Length, length)
#undef BUFFER_VIEW_GETTER
+RUNTIME_FUNCTION(Runtime_ArrayBufferViewWasNeutered) {
+ HandleScope scope(isolate);
+ DCHECK_EQ(1, args.length());
+ return isolate->heap()->ToBoolean(JSTypedArray::cast(args[0])->WasNeutered());
+}
+
RUNTIME_FUNCTION(Runtime_TypedArrayGetBuffer) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -223,7 +245,6 @@ RUNTIME_FUNCTION(Runtime_IsTypedArray) {
return isolate->heap()->ToBoolean(args[0]->IsJSTypedArray());
}
-
RUNTIME_FUNCTION(Runtime_IsSharedTypedArray) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
@@ -260,5 +281,21 @@ RUNTIME_FUNCTION(Runtime_IsSharedInteger32TypedArray) {
obj->type() == kExternalInt32Array);
}
+RUNTIME_FUNCTION(Runtime_TypedArraySpeciesCreateByLength) {
+ HandleScope scope(isolate);
+ DCHECK(args.length() == 2);
+ Handle<JSTypedArray> exemplar = args.at<JSTypedArray>(0);
+ Handle<Object> length = args.at(1);
+ int argc = 1;
+ ScopedVector<Handle<Object>> argv(argc);
+ argv[0] = length;
+ Handle<JSTypedArray> result_array;
+ // TODO(tebbi): Pass correct method name.
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result_array,
+ JSTypedArray::SpeciesCreate(isolate, exemplar, argc, argv.start(), ""));
+ return *result_array;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-wasm.cc b/deps/v8/src/runtime/runtime-wasm.cc
index 090d5b4a40..bb5360abe9 100644
--- a/deps/v8/src/runtime/runtime-wasm.cc
+++ b/deps/v8/src/runtime/runtime-wasm.cc
@@ -99,9 +99,9 @@ Object* ThrowRuntimeError(Isolate* isolate, int message_id, int byte_offset,
// properties).
Handle<Object> detailed_stack_trace_obj = JSReceiver::GetDataProperty(
error, isolate->factory()->detailed_stack_trace_symbol());
- if (detailed_stack_trace_obj->IsJSArray()) {
+ if (detailed_stack_trace_obj->IsFixedArray()) {
Handle<FixedArray> stack_elements(
- FixedArray::cast(JSArray::cast(*detailed_stack_trace_obj)->elements()));
+ FixedArray::cast(*detailed_stack_trace_obj));
DCHECK_GE(stack_elements->length(), 1);
Handle<StackFrameInfo> top_frame(
StackFrameInfo::cast(stack_elements->get(0)));
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index eb5f09db9b..386b1a8108 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -118,12 +118,19 @@ namespace internal {
F(WeakCollectionHas, 3, 1) \
F(WeakCollectionDelete, 3, 1) \
F(WeakCollectionSet, 4, 1) \
- F(GetWeakSetValues, 2, 1)
+ F(GetWeakSetValues, 2, 1) \
+ F(IsJSMap, 1, 1) \
+ F(IsJSSet, 1, 1) \
+ F(IsJSMapIterator, 1, 1) \
+ F(IsJSSetIterator, 1, 1) \
+ F(IsJSWeakMap, 1, 1) \
+ F(IsJSWeakSet, 1, 1)
#define FOR_EACH_INTRINSIC_COMPILER(F) \
F(CompileLazy, 1, 1) \
F(CompileOptimized_Concurrent, 1, 1) \
F(CompileOptimized_NotConcurrent, 1, 1) \
+ F(EvictOptimizedCodeSlot, 1, 1) \
F(NotifyStubFailure, 0, 1) \
F(NotifyDeoptimized, 1, 1) \
F(CompileForOnStackReplacement, 1, 1) \
@@ -209,10 +216,17 @@ namespace internal {
F(ForInFilter, 2, 1) \
F(ForInHasProperty, 2, 1)
+#ifdef V8_TRACE_IGNITION
+#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F) \
+ F(InterpreterTraceBytecodeEntry, 3, 1) \
+ F(InterpreterTraceBytecodeExit, 3, 1)
+#else
+#define FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F)
+#endif
+
#define FOR_EACH_INTRINSIC_INTERPRETER(F) \
+ FOR_EACH_INTRINSIC_INTERPRETER_TRACE(F) \
F(InterpreterNewClosure, 4, 1) \
- F(InterpreterTraceBytecodeEntry, 3, 1) \
- F(InterpreterTraceBytecodeExit, 3, 1) \
F(InterpreterAdvanceBytecodeOffset, 2, 1)
#define FOR_EACH_INTRINSIC_FUNCTION(F) \
@@ -251,8 +265,8 @@ namespace internal {
F(GeneratorGetSourcePosition, 1, 1) \
F(GeneratorGetResumeMode, 1, 1)
-#ifdef V8_I18N_SUPPORT
-#define FOR_EACH_INTRINSIC_I18N(F) \
+#ifdef V8_INTL_SUPPORT
+#define FOR_EACH_INTRINSIC_INTL(F) \
F(CanonicalizeLanguageTag, 1, 1) \
F(AvailableLocalesOf, 1, 1) \
F(GetDefaultICULocale, 0, 1) \
@@ -274,63 +288,62 @@ namespace internal {
F(BreakIteratorNext, 1, 1) \
F(BreakIteratorCurrent, 1, 1) \
F(BreakIteratorBreakType, 1, 1) \
- F(StringToLowerCaseI18N, 1, 1) \
- F(StringToUpperCaseI18N, 1, 1) \
+ F(StringToLowerCaseIntl, 1, 1) \
+ F(StringToUpperCaseIntl, 1, 1) \
F(StringLocaleConvertCase, 3, 1) \
F(DateCacheVersion, 0, 1)
#else
-#define FOR_EACH_INTRINSIC_I18N(F)
+#define FOR_EACH_INTRINSIC_INTL(F)
#endif
-#define FOR_EACH_INTRINSIC_INTERNAL(F) \
- F(AllocateInNewSpace, 1, 1) \
- F(AllocateInTargetSpace, 2, 1) \
- F(AllocateSeqOneByteString, 1, 1) \
- F(AllocateSeqTwoByteString, 1, 1) \
- F(CheckIsBootstrapping, 0, 1) \
- F(CreateAsyncFromSyncIterator, 1, 1) \
- F(CreateListFromArrayLike, 1, 1) \
- F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
- F(ExportFromRuntime, 1, 1) \
- F(IncrementUseCounter, 1, 1) \
- F(InstallToContext, 1, 1) \
- F(Interrupt, 0, 1) \
- F(IS_VAR, 1, 1) \
- F(NewReferenceError, 2, 1) \
- F(NewSyntaxError, 2, 1) \
- F(NewTypeError, 2, 1) \
- F(OrdinaryHasInstance, 2, 1) \
- F(PromoteScheduledException, 0, 1) \
- F(ReThrow, 1, 1) \
- F(RunMicrotasks, 0, 1) \
- F(StackGuard, 0, 1) \
- F(Throw, 1, 1) \
- F(ThrowApplyNonFunction, 1, 1) \
- F(ThrowCannotConvertToPrimitive, 0, 1) \
- F(ThrowCalledNonCallable, 1, 1) \
- F(ThrowCalledOnNullOrUndefined, 1, 1) \
- F(ThrowConstructedNonConstructable, 1, 1) \
- F(ThrowDerivedConstructorReturnedNonObject, 0, 1) \
- F(ThrowGeneratorRunning, 0, 1) \
- F(ThrowIllegalInvocation, 0, 1) \
- F(ThrowIncompatibleMethodReceiver, 2, 1) \
- F(ThrowInvalidHint, 1, 1) \
- F(ThrowInvalidStringLength, 0, 1) \
- F(ThrowInvalidTypedArrayAlignment, 2, 1) \
- F(ThrowIteratorResultNotAnObject, 1, 1) \
- F(ThrowSymbolIteratorInvalid, 0, 1) \
- F(ThrowNonCallableInInstanceOfCheck, 0, 1) \
- F(ThrowNonObjectInInstanceOfCheck, 0, 1) \
- F(ThrowNotConstructor, 1, 1) \
- F(ThrowNotGeneric, 1, 1) \
- F(ThrowRangeError, -1 /* >= 1 */, 1) \
- F(ThrowReferenceError, 1, 1) \
- F(ThrowStackOverflow, 0, 1) \
- F(ThrowSymbolAsyncIteratorInvalid, 0, 1) \
- F(ThrowTypeError, -1 /* >= 1 */, 1) \
- F(ThrowUndefinedOrNullToObject, 1, 1) \
- F(Typeof, 1, 1) \
- F(UnwindAndFindExceptionHandler, 0, 1) \
+#define FOR_EACH_INTRINSIC_INTERNAL(F) \
+ F(AllocateInNewSpace, 1, 1) \
+ F(AllocateInTargetSpace, 2, 1) \
+ F(AllocateSeqOneByteString, 1, 1) \
+ F(AllocateSeqTwoByteString, 1, 1) \
+ F(CheckIsBootstrapping, 0, 1) \
+ F(CreateAsyncFromSyncIterator, 1, 1) \
+ F(CreateListFromArrayLike, 1, 1) \
+ F(GetAndResetRuntimeCallStats, -1 /* <= 2 */, 1) \
+ F(ExportFromRuntime, 1, 1) \
+ F(IncrementUseCounter, 1, 1) \
+ F(InstallToContext, 1, 1) \
+ F(Interrupt, 0, 1) \
+ F(IS_VAR, 1, 1) \
+ F(NewReferenceError, 2, 1) \
+ F(NewSyntaxError, 2, 1) \
+ F(NewTypeError, 2, 1) \
+ F(OrdinaryHasInstance, 2, 1) \
+ F(PromoteScheduledException, 0, 1) \
+ F(ReThrow, 1, 1) \
+ F(RunMicrotasks, 0, 1) \
+ F(StackGuard, 0, 1) \
+ F(Throw, 1, 1) \
+ F(ThrowApplyNonFunction, 1, 1) \
+ F(ThrowCannotConvertToPrimitive, 0, 1) \
+ F(ThrowCalledNonCallable, 1, 1) \
+ F(ThrowCalledOnNullOrUndefined, 1, 1) \
+ F(ThrowConstructedNonConstructable, 1, 1) \
+ F(ThrowConstructorReturnedNonObject, 0, 1) \
+ F(ThrowGeneratorRunning, 0, 1) \
+ F(ThrowIllegalInvocation, 0, 1) \
+ F(ThrowIncompatibleMethodReceiver, 2, 1) \
+ F(ThrowInvalidHint, 1, 1) \
+ F(ThrowInvalidStringLength, 0, 1) \
+ F(ThrowInvalidTypedArrayAlignment, 2, 1) \
+ F(ThrowIteratorResultNotAnObject, 1, 1) \
+ F(ThrowSymbolIteratorInvalid, 0, 1) \
+ F(ThrowNonCallableInInstanceOfCheck, 0, 1) \
+ F(ThrowNonObjectInInstanceOfCheck, 0, 1) \
+ F(ThrowNotConstructor, 1, 1) \
+ F(ThrowRangeError, -1 /* >= 1 */, 1) \
+ F(ThrowReferenceError, 1, 1) \
+ F(ThrowStackOverflow, 0, 1) \
+ F(ThrowSymbolAsyncIteratorInvalid, 0, 1) \
+ F(ThrowTypeError, -1 /* >= 1 */, 1) \
+ F(ThrowUndefinedOrNullToObject, 1, 1) \
+ F(Typeof, 1, 1) \
+ F(UnwindAndFindExceptionHandler, 0, 1) \
F(AllowDynamicFunction, 1, 1)
#define FOR_EACH_INTRINSIC_LITERALS(F) \
@@ -378,6 +391,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_OBJECT(F) \
F(AddDictionaryProperty, 3, 1) \
F(GetPrototype, 1, 1) \
+ F(ObjectKeys, 1, 1) \
F(ObjectHasOwnProperty, 2, 1) \
F(ObjectCreate, 2, 1) \
F(InternalSetPrototype, 2, 1) \
@@ -388,8 +402,8 @@ namespace internal {
F(SetProperty, 4, 1) \
F(AddElement, 3, 1) \
F(AppendElement, 2, 1) \
- F(DeleteProperty_Sloppy, 2, 1) \
- F(DeleteProperty_Strict, 2, 1) \
+ F(DeleteProperty, 3, 1) \
+ F(ShrinkPropertyDictionary, 2, 1) \
F(HasProperty, 2, 1) \
F(GetOwnPropertyKeys, 2, 1) \
F(GetInterceptorInfo, 1, 1) \
@@ -466,9 +480,7 @@ namespace internal {
F(PromiseRevokeReject, 1, 1) \
F(PromiseResult, 1, 1) \
F(PromiseStatus, 1, 1) \
- F(ReportPromiseReject, 2, 1) \
- F(IncrementWaitCount, 0, 1) \
- F(DecrementWaitCount, 0, 1)
+ F(ReportPromiseReject, 2, 1)
#define FOR_EACH_INTRINSIC_PROXY(F) \
F(IsJSProxy, 1, 1) \
@@ -519,7 +531,7 @@ namespace internal {
F(StoreLookupSlot_Strict, 2, 1)
#define FOR_EACH_INTRINSIC_STRINGS(F) \
- F(GetSubstitution, 4, 1) \
+ F(GetSubstitution, 5, 1) \
F(StringReplaceOneCharWithString, 3, 1) \
F(StringIndexOf, 3, 1) \
F(StringIndexOfUnchecked, 3, 1) \
@@ -581,7 +593,6 @@ namespace internal {
F(Abort, 1, 1) \
F(AbortJS, 1, 1) \
F(NativeScriptsCount, 0, 1) \
- F(GetV8Version, 0, 1) \
F(DisassembleFunction, 1, 1) \
F(TraceEnter, 0, 1) \
F(TraceExit, 1, 1) \
@@ -617,24 +628,29 @@ namespace internal {
F(ValidateWasmOrphanedInstance, 1, 1) \
F(SetWasmCompileControls, 2, 1) \
F(SetWasmInstantiateControls, 0, 1) \
+ F(SetWasmCompileFromPromiseOverload, 0, 1) \
+ F(ResetWasmOverloads, 0, 1) \
F(HeapObjectVerify, 1, 1) \
F(WasmNumInterpretedCalls, 1, 1) \
F(RedirectToWasmInterpreter, 2, 1)
-#define FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
- F(ArrayBufferGetByteLength, 1, 1) \
- F(ArrayBufferNeuter, 1, 1) \
- F(ArrayBufferViewGetByteLength, 1, 1) \
- F(ArrayBufferViewGetByteOffset, 1, 1) \
- F(TypedArrayGetLength, 1, 1) \
- F(TypedArrayGetBuffer, 1, 1) \
- F(TypedArraySetFastCases, 3, 1) \
- F(TypedArraySortFast, 1, 1) \
- F(TypedArrayMaxSizeInHeap, 0, 1) \
- F(IsTypedArray, 1, 1) \
- F(IsSharedTypedArray, 1, 1) \
- F(IsSharedIntegerTypedArray, 1, 1) \
- F(IsSharedInteger32TypedArray, 1, 1)
+#define FOR_EACH_INTRINSIC_TYPEDARRAY(F) \
+ F(ArrayBufferGetByteLength, 1, 1) \
+ F(ArrayBufferNeuter, 1, 1) \
+ F(TypedArrayCopyElements, 3, 1) \
+ F(ArrayBufferViewGetByteLength, 1, 1) \
+ F(ArrayBufferViewGetByteOffset, 1, 1) \
+ F(ArrayBufferViewWasNeutered, 1, 1) \
+ F(TypedArrayGetLength, 1, 1) \
+ F(TypedArrayGetBuffer, 1, 1) \
+ F(TypedArraySetFastCases, 3, 1) \
+ F(TypedArraySortFast, 1, 1) \
+ F(TypedArrayMaxSizeInHeap, 0, 1) \
+ F(IsTypedArray, 1, 1) \
+ F(IsSharedTypedArray, 1, 1) \
+ F(IsSharedIntegerTypedArray, 1, 1) \
+ F(IsSharedInteger32TypedArray, 1, 1) \
+ F(TypedArraySpeciesCreateByLength, 2, 1)
#define FOR_EACH_INTRINSIC_WASM(F) \
F(WasmGrowMemory, 1, 1) \
@@ -692,7 +708,7 @@ namespace internal {
FOR_EACH_INTRINSIC_INTERPRETER(F) \
FOR_EACH_INTRINSIC_FUNCTION(F) \
FOR_EACH_INTRINSIC_GENERATOR(F) \
- FOR_EACH_INTRINSIC_I18N(F) \
+ FOR_EACH_INTRINSIC_INTL(F) \
FOR_EACH_INTRINSIC_INTERNAL(F) \
FOR_EACH_INTRINSIC_LITERALS(F) \
FOR_EACH_INTRINSIC_LIVEEDIT(F) \
diff --git a/deps/v8/src/s390/assembler-s390-inl.h b/deps/v8/src/s390/assembler-s390-inl.h
index 12b607165d..6024232f9b 100644
--- a/deps/v8/src/s390/assembler-s390-inl.h
+++ b/deps/v8/src/s390/assembler-s390-inl.h
@@ -300,22 +300,22 @@ template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
+ visitor->VisitEmbeddedPointer(host(), this);
} else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
+ visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::CELL) {
- visitor->VisitCell(this);
+ visitor->VisitCellPointer(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(this);
+ visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
- visitor->VisitInternalReference(this);
+ visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
+ visitor->VisitCodeAgeSequence(host(), this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
- visitor->VisitDebugTarget(this);
+ visitor->VisitDebugTarget(host(), this);
} else if (IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(this);
+ visitor->VisitRuntimeEntry(host(), this);
}
}
diff --git a/deps/v8/src/s390/assembler-s390.cc b/deps/v8/src/s390/assembler-s390.cc
index 246ab118f8..35305fc074 100644
--- a/deps/v8/src/s390/assembler-s390.cc
+++ b/deps/v8/src/s390/assembler-s390.cc
@@ -2101,7 +2101,14 @@ void Assembler::GrowBuffer(int needed) {
if (space < needed) {
desc.buffer_size += needed - space;
}
- CHECK_GT(desc.buffer_size, 0); // no overflow
+
+ // Some internal data structures overflow for very large buffers,
+ // they must ensure that kMaximalBufferSize is not too large.
+ if (desc.buffer_size > kMaximalBufferSize ||
+ static_cast<size_t>(desc.buffer_size) >
+ isolate_data().max_old_generation_size_) {
+ V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
+ }
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
diff --git a/deps/v8/src/s390/assembler-s390.h b/deps/v8/src/s390/assembler-s390.h
index 311ecfcab0..bee7452aaa 100644
--- a/deps/v8/src/s390/assembler-s390.h
+++ b/deps/v8/src/s390/assembler-s390.h
@@ -1366,6 +1366,9 @@ class Assembler : public AssemblerBase {
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
private:
+ // Avoid overflows for displacements etc.
+ static const int kMaximalBufferSize = 512 * MB;
+
// Code generation
// The relocation writer's position is at least kGap bytes below the end of
// the generated instructions. This is so that multi-instruction sequences do
diff --git a/deps/v8/src/s390/code-stubs-s390.cc b/deps/v8/src/s390/code-stubs-s390.cc
index 15d54751d0..e47cb3e903 100644
--- a/deps/v8/src/s390/code-stubs-s390.cc
+++ b/deps/v8/src/s390/code-stubs-s390.cc
@@ -1205,87 +1205,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ b(r14);
}
-void RegExpExecStub::Generate(MacroAssembler* masm) {
-#ifdef V8_INTERPRETED_REGEXP
- // This case is handled prior to the RegExpExecStub call.
- __ Abort(kUnexpectedRegExpExecCall);
-#else // V8_INTERPRETED_REGEXP
- __ CleanseP(r14);
-
- // Isolates: note we add an additional parameter here (isolate pointer).
- const int kRegExpExecuteArguments = 10;
- const int kParameterRegisters = 5;
- __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
-
- // Stack pointer now points to cell where return address is to be written.
- // Arguments are before that on the stack or in registers.
-
- // Argument 10 (in stack parameter area): Pass current isolate address.
- __ mov(r6, Operand(ExternalReference::isolate_address(isolate())));
- __ StoreP(r6, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
- 4 * kPointerSize));
-
- // Argument 9 is a dummy that reserves the space used for
- // the return address added by the ExitFrame in native calls.
- __ mov(r6, Operand::Zero());
- __ StoreP(r6, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
- 3 * kPointerSize));
-
- // Argument 8: Indicate that this is a direct call from JavaScript.
- __ mov(r6, Operand(1));
- __ StoreP(r6, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
- 2 * kPointerSize));
-
- // Argument 7: Start (high end) of backtracking stack memory area.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate());
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate());
- __ mov(r6, Operand(address_of_regexp_stack_memory_address));
- __ LoadP(r6, MemOperand(r6, 0));
- __ mov(r1, Operand(address_of_regexp_stack_memory_size));
- __ LoadP(r1, MemOperand(r1, 0));
- __ AddP(r6, r1);
- __ StoreP(r6, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
- 1 * kPointerSize));
-
- // Argument 6: Set the number of capture registers to zero to force
- // global egexps to behave as non-global. This does not affect non-global
- // regexps.
- __ mov(r6, Operand::Zero());
- __ StoreP(r6, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize +
- 0 * kPointerSize));
-
- // Argument 5 (r6): static offsets vector buffer.
- __ mov(
- r6,
- Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
-
- // Argument 4, r5: End of string data
- // Argument 3, r4: Start of string data
- CHECK(r5.is(RegExpExecDescriptor::StringEndRegister()));
- CHECK(r4.is(RegExpExecDescriptor::StringStartRegister()));
-
- // Argument 2 (r3): Previous index.
- CHECK(r3.is(RegExpExecDescriptor::LastIndexRegister()));
-
- // Argument 1 (r2): Subject string.
- CHECK(r2.is(RegExpExecDescriptor::StringRegister()));
-
- // Locate the code entry and call it.
- Register code_reg = RegExpExecDescriptor::CodeRegister();
- __ AddP(code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- DirectCEntryStub stub(isolate());
- stub.GenerateCall(masm, code_reg);
-
- __ LeaveExitFrame(false, no_reg, true);
-
- // Return the smi-tagged result.
- __ SmiTag(r2);
- __ Ret();
-#endif // V8_INTERPRETED_REGEXP
-}
static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) {
// r2 : number of arguments to the construct function
@@ -2956,9 +2875,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
__ push(call_data);
Register scratch = call_data;
- if (!call_data_undefined()) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- }
+ __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
// return value
__ push(scratch);
// return value default
diff --git a/deps/v8/src/s390/codegen-s390.cc b/deps/v8/src/s390/codegen-s390.cc
index 6b84200510..04dc77129c 100644
--- a/deps/v8/src/s390/codegen-s390.cc
+++ b/deps/v8/src/s390/codegen-s390.cc
@@ -36,7 +36,8 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(&desc);
- DCHECK(ABI_USES_FUNCTION_DESCRIPTORS || !RelocInfo::RequiresRelocation(desc));
+ DCHECK(ABI_USES_FUNCTION_DESCRIPTORS ||
+ !RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
diff --git a/deps/v8/src/s390/deoptimizer-s390.cc b/deps/v8/src/s390/deoptimizer-s390.cc
index a1c46dc2db..a0ca01849e 100644
--- a/deps/v8/src/s390/deoptimizer-s390.cc
+++ b/deps/v8/src/s390/deoptimizer-s390.cc
@@ -35,25 +35,22 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- if (FLAG_zap_code_space) {
- // Fail hard and early if we enter this code object again.
- byte* pointer = code->FindCodeAgeSequence();
- if (pointer != NULL) {
- pointer += kNoCodeAgeSequenceLength;
- } else {
- pointer = code->instruction_start();
- }
- CodePatcher patcher(isolate, pointer, 2);
- patcher.masm()->bkpt(0);
-
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- int osr_offset = data->OsrPcOffset()->value();
- if (osr_offset > 0) {
- CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
- 2);
- osr_patcher.masm()->bkpt(0);
- }
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(isolate, pointer, 2);
+ patcher.masm()->bkpt(0);
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(isolate, code_start_address + osr_offset, 2);
+ osr_patcher.masm()->bkpt(0);
}
DeoptimizationInputData* deopt_data =
diff --git a/deps/v8/src/s390/interface-descriptors-s390.cc b/deps/v8/src/s390/interface-descriptors-s390.cc
index 091a64583a..0028b9578b 100644
--- a/deps/v8/src/s390/interface-descriptors-s390.cc
+++ b/deps/v8/src/s390/interface-descriptors-s390.cc
@@ -56,12 +56,6 @@ const Register MathPowIntegerDescriptor::exponent() {
return MathPowTaggedDescriptor::exponent();
}
-const Register RegExpExecDescriptor::StringRegister() { return r2; }
-const Register RegExpExecDescriptor::LastIndexRegister() { return r3; }
-const Register RegExpExecDescriptor::StringStartRegister() { return r4; }
-const Register RegExpExecDescriptor::StringEndRegister() { return r5; }
-const Register RegExpExecDescriptor::CodeRegister() { return r9; }
-
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r2; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r5; }
@@ -151,9 +145,20 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
+ // r2 : number of arguments
// r4 : start index (to support rest parameters)
// r3 : the target to call
- Register registers[] = {r3, r4};
+ Register registers[] = {r3, r2, r4};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // r2 : number of arguments
+ // r5 : the new target
+ // r4 : start index (to support rest parameters)
+ // r3 : the target to call
+ Register registers[] = {r3, r5, r2, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/s390/macro-assembler-s390.cc b/deps/v8/src/s390/macro-assembler-s390.cc
index bd6f962a84..d0e3ea022a 100644
--- a/deps/v8/src/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/s390/macro-assembler-s390.cc
@@ -276,7 +276,7 @@ void MacroAssembler::RecordWriteField(
lay(dst, MemOperand(object, offset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
- AndP(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
+ AndP(r0, dst, Operand(kPointerSize - 1));
beq(&ok, Label::kNear);
stop("Unaligned cell in write barrier");
bind(&ok);
@@ -329,7 +329,7 @@ void MacroAssembler::RecordWriteForMap(Register object, Register map,
lay(dst, MemOperand(object, HeapObject::kMapOffset - kHeapObjectTag));
if (emit_debug_code()) {
Label ok;
- AndP(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
+ AndP(r0, dst, Operand(kPointerSize - 1));
beq(&ok, Label::kNear);
stop("Unaligned cell in write barrier");
bind(&ok);
@@ -2712,6 +2712,7 @@ void MacroAssembler::CallCFunction(Register function, int num_arguments) {
void MacroAssembler::CallCFunctionHelper(Register function,
int num_reg_arguments,
int num_double_arguments) {
+ DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
DCHECK(has_frame());
// Just call directly. The function called cannot cause a GC, or
diff --git a/deps/v8/src/s390/simulator-s390.cc b/deps/v8/src/s390/simulator-s390.cc
index b0fa0917ad..9f41bace2a 100644
--- a/deps/v8/src/s390/simulator-s390.cc
+++ b/deps/v8/src/s390/simulator-s390.cc
@@ -1644,6 +1644,8 @@ void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
void* Simulator::RedirectExternalReference(Isolate* isolate,
void* external_function,
ExternalReference::Type type) {
+ base::LockGuard<base::Mutex> lock_guard(
+ isolate->simulator_redirection_mutex());
Redirection* redirection = Redirection::Get(isolate, external_function, type);
return redirection->address();
}
@@ -1934,7 +1936,9 @@ static void decodeObjectPair(ObjectPair* pair, intptr_t* x, intptr_t* y) {
// Calls into the V8 runtime.
typedef intptr_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
intptr_t arg2, intptr_t arg3,
- intptr_t arg4, intptr_t arg5);
+ intptr_t arg4, intptr_t arg5,
+ intptr_t arg6, intptr_t arg7,
+ intptr_t arg8);
typedef ObjectPair (*SimulatorRuntimePairCall)(intptr_t arg0, intptr_t arg1,
intptr_t arg2, intptr_t arg3,
intptr_t arg4, intptr_t arg5);
@@ -1971,7 +1975,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
(get_register(sp) & (::v8::internal::FLAG_sim_stack_alignment - 1)) ==
0;
Redirection* redirection = Redirection::FromSwiInstruction(instr);
- const int kArgCount = 6;
+ const int kArgCount = 9;
+ const int kRegisterArgCount = 5;
int arg0_regnum = 2;
intptr_t result_buffer = 0;
bool uses_result_buffer =
@@ -1983,11 +1988,18 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
arg0_regnum++;
}
intptr_t arg[kArgCount];
- for (int i = 0; i < kArgCount - 1; i++) {
+ // First 5 arguments in registers r2-r6.
+ for (int i = 0; i < kRegisterArgCount; i++) {
arg[i] = get_register(arg0_regnum + i);
}
+ // Remaining arguments on stack
intptr_t* stack_pointer = reinterpret_cast<intptr_t*>(get_register(sp));
- arg[5] = stack_pointer[kCalleeRegisterSaveAreaSize / kPointerSize];
+ for (int i = kRegisterArgCount; i < kArgCount; i++) {
+ arg[i] = stack_pointer[(kCalleeRegisterSaveAreaSize / kPointerSize) +
+ (i - kRegisterArgCount)];
+ }
+ STATIC_ASSERT(kArgCount == kRegisterArgCount + 4);
+ STATIC_ASSERT(kMaxCParameters == 9);
bool fp_call =
(redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
@@ -2165,9 +2177,10 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF(
"Call to host function at %p,\n"
"\t\t\t\targs %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+ ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR,
static_cast<void*>(FUNCTION_ADDR(target)), arg[0], arg[1], arg[2],
- arg[3], arg[4], arg[5]);
+ arg[3], arg[4], arg[5], arg[6], arg[7], arg[8]);
if (!stack_aligned) {
PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
static_cast<intptr_t>(get_register(sp)));
@@ -2214,8 +2227,8 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
- intptr_t result =
- target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+ intptr_t result = target(arg[0], arg[1], arg[2], arg[3], arg[4],
+ arg[5], arg[6], arg[7], arg[8]);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %08" V8PRIxPTR "\n", result);
}
diff --git a/deps/v8/src/s390/simulator-s390.h b/deps/v8/src/s390/simulator-s390.h
index 95a7d05772..a214b198df 100644
--- a/deps/v8/src/s390/simulator-s390.h
+++ b/deps/v8/src/s390/simulator-s390.h
@@ -25,16 +25,14 @@ namespace internal {
(entry(p0, p1, p2, p3, p4))
typedef int (*s390_regexp_matcher)(String*, int, const byte*, const byte*, int*,
- int, Address, int, void*, Isolate*);
+ int, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type ppc_regexp_matcher.
-// The ninth argument is a dummy that reserves the space used for
-// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
(FUNCTION_CAST<s390_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
- NULL, p8))
+ p8))
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on s390 uses the C stack, we
@@ -442,7 +440,7 @@ class Simulator {
static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
void* page);
- // Runtime call support.
+ // Runtime call support. Uses the isolate in a thread-safe way.
static void* RedirectExternalReference(
Isolate* isolate, void* external_function,
v8::internal::ExternalReference::Type type);
@@ -1259,10 +1257,9 @@ class Simulator {
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
p7, p8) \
- Simulator::current(isolate)->Call(entry, 10, (intptr_t)p0, (intptr_t)p1, \
- (intptr_t)p2, (intptr_t)p3, (intptr_t)p4, \
- (intptr_t)p5, (intptr_t)p6, (intptr_t)p7, \
- (intptr_t)NULL, (intptr_t)p8)
+ Simulator::current(isolate)->Call( \
+ entry, 9, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2, (intptr_t)p3, \
+ (intptr_t)p4, (intptr_t)p5, (intptr_t)p6, (intptr_t)p7, (intptr_t)p8)
// The simulator has its own stack. Thus it has a different stack limit from
// the C-based native code. The JS-based limit normally points near the end of
diff --git a/deps/v8/src/safepoint-table.h b/deps/v8/src/safepoint-table.h
index a368d0e77d..393def7e8a 100644
--- a/deps/v8/src/safepoint-table.h
+++ b/deps/v8/src/safepoint-table.h
@@ -6,7 +6,8 @@
#define V8_SAFEPOINT_TABLE_H_
#include "src/allocation.h"
-#include "src/heap/heap.h"
+#include "src/assert-scope.h"
+#include "src/utils.h"
#include "src/v8memory.h"
#include "src/zone/zone.h"
diff --git a/deps/v8/src/setup-isolate-deserialize.cc b/deps/v8/src/setup-isolate-deserialize.cc
index ce14c83d17..a01bb5a3f8 100644
--- a/deps/v8/src/setup-isolate-deserialize.cc
+++ b/deps/v8/src/setup-isolate-deserialize.cc
@@ -16,19 +16,10 @@ void SetupIsolateDelegate::SetupBuiltins(Isolate* isolate,
bool create_heap_objects) {
DCHECK(!create_heap_objects);
// No actual work to be done; builtins will be deserialized from the snapshot.
- isolate->builtins()->MarkInitialized();
}
void SetupIsolateDelegate::SetupInterpreter(
interpreter::Interpreter* interpreter, bool create_heap_objects) {
-#ifdef V8_USE_SNAPSHOT
- if (FLAG_trace_ignition || FLAG_trace_ignition_codegen ||
- FLAG_trace_ignition_dispatches) {
- OFStream os(stdout);
- os << "Warning: --trace-ignition-* flags must be passed at mksnapshot "
- << "time or used with nosnapshot builds." << std::endl;
- }
-#endif
DCHECK(interpreter->IsDispatchTableInitialized());
}
diff --git a/deps/v8/src/setup-isolate-full.cc b/deps/v8/src/setup-isolate-full.cc
index 007d4f7bf3..9dd7e64d29 100644
--- a/deps/v8/src/setup-isolate-full.cc
+++ b/deps/v8/src/setup-isolate-full.cc
@@ -5,6 +5,7 @@
#include "src/setup-isolate.h"
#include "src/base/logging.h"
+#include "src/interpreter/interpreter.h"
#include "src/interpreter/setup-interpreter.h"
#include "src/isolate.h"
@@ -13,30 +14,20 @@ namespace internal {
void SetupIsolateDelegate::SetupBuiltins(Isolate* isolate,
bool create_heap_objects) {
-#ifdef V8_GYP_BUILD
- // Compatibility hack to keep the deprecated GYP build working.
if (create_heap_objects) {
SetupBuiltinsInternal(isolate);
} else {
- isolate->builtins()->MarkInitialized();
+ DCHECK(isolate->snapshot_available());
}
- return;
-#endif
- DCHECK(create_heap_objects);
- SetupBuiltinsInternal(isolate);
}
void SetupIsolateDelegate::SetupInterpreter(
interpreter::Interpreter* interpreter, bool create_heap_objects) {
-#ifdef V8_GYP_BUILD
- // Compatibility hack to keep the deprecated GYP build working.
if (create_heap_objects) {
interpreter::SetupInterpreter::InstallBytecodeHandlers(interpreter);
+ } else {
+ DCHECK(interpreter->IsDispatchTableInitialized());
}
- return;
-#endif
- DCHECK(create_heap_objects);
- interpreter::SetupInterpreter::InstallBytecodeHandlers(interpreter);
}
} // namespace internal
diff --git a/deps/v8/src/setup-isolate.h b/deps/v8/src/setup-isolate.h
index 2e56eaec64..1cdc9c9e76 100644
--- a/deps/v8/src/setup-isolate.h
+++ b/deps/v8/src/setup-isolate.h
@@ -16,11 +16,12 @@ class Interpreter;
// This class is an abstraction layer around initialization of components
// that are either deserialized from the snapshot or generated from scratch.
// Currently this includes builtins and interpreter bytecode handlers.
-// There are three implementations to choose from (at link time):
+// There are two implementations to choose from at link time:
// - setup-isolate-deserialize.cc: always loads things from snapshot.
-// - setup-isolate-full.cc: always generates things.
-// - setup-isolate-for-tests.cc: does the one or the other, controlled by
-// the |create_heap_objects| flag.
+// - setup-isolate-full.cc: loads from snapshot or bootstraps from scratch,
+// controlled by the |create_heap_objects| flag.
+// For testing, the implementation in setup-isolate-for-tests.cc can be chosen
+// to force the behavior of setup-isolate-full.cc at runtime.
//
// The actual implementations of generation of builtins and handlers is in
// setup-builtins-internal.cc and setup-interpreter-internal.cc, and is
diff --git a/deps/v8/src/snapshot/code-serializer.cc b/deps/v8/src/snapshot/code-serializer.cc
index 48813f5c61..cd8b1f1a0e 100644
--- a/deps/v8/src/snapshot/code-serializer.cc
+++ b/deps/v8/src/snapshot/code-serializer.cc
@@ -14,6 +14,7 @@
#include "src/snapshot/deserializer.h"
#include "src/snapshot/snapshot.h"
#include "src/version.h"
+#include "src/visitors.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
@@ -50,7 +51,7 @@ ScriptData* CodeSerializer::Serialize(Isolate* isolate,
ScriptData* CodeSerializer::Serialize(Handle<HeapObject> obj) {
DisallowHeapAllocation no_gc;
- VisitPointer(Handle<Object>::cast(obj).location());
+ VisitRootPointer(Root::kHandleScope, Handle<Object>::cast(obj).location());
SerializeDeferredObjects();
Pad();
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index 4b1bd45ecb..c76e4eca54 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -22,11 +22,11 @@ namespace internal {
void Deserializer::DecodeReservation(
Vector<const SerializedData::Reservation> res) {
- DCHECK_EQ(0, reservations_[NEW_SPACE].length());
+ DCHECK_EQ(0, reservations_[NEW_SPACE].size());
STATIC_ASSERT(NEW_SPACE == 0);
int current_space = NEW_SPACE;
for (auto& r : res) {
- reservations_[current_space].Add({r.chunk_size(), NULL, NULL});
+ reservations_[current_space].push_back({r.chunk_size(), NULL, NULL});
if (r.is_last()) current_space++;
}
DCHECK_EQ(kNumberOfSpaces, current_space);
@@ -57,7 +57,7 @@ void Deserializer::FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects() {
bool Deserializer::ReserveSpace() {
#ifdef DEBUG
for (int i = NEW_SPACE; i < kNumberOfSpaces; ++i) {
- CHECK(reservations_[i].length() > 0);
+ CHECK(reservations_[i].size() > 0);
}
#endif // DEBUG
DCHECK(allocated_maps_.is_empty());
@@ -92,6 +92,8 @@ void Deserializer::Deserialize(Isolate* isolate) {
DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty());
// Partial snapshot cache is not yet populated.
DCHECK(isolate_->partial_snapshot_cache()->is_empty());
+ // Builtins are not yet created.
+ DCHECK(!isolate_->builtins()->is_initialized());
{
DisallowHeapAllocation no_gc;
@@ -122,7 +124,7 @@ void Deserializer::Deserialize(Isolate* isolate) {
LOG_CODE_EVENT(isolate_, LogBytecodeHandlers());
LOG_CODE_EVENT(isolate_, LogCompiledFunctions());
- if (FLAG_rehash_snapshot && can_rehash_) Rehash();
+ isolate_->builtins()->MarkInitialized();
}
MaybeHandle<Object> Deserializer::DeserializePartial(
@@ -142,7 +144,7 @@ MaybeHandle<Object> Deserializer::DeserializePartial(
OldSpace* code_space = isolate_->heap()->code_space();
Address start_address = code_space->top();
Object* root;
- VisitPointer(&root);
+ VisitRootPointer(Root::kPartialSnapshotCache, &root);
DeserializeDeferredObjects();
DeserializeEmbedderFields(embedder_fields_deserializer);
@@ -153,9 +155,6 @@ MaybeHandle<Object> Deserializer::DeserializePartial(
// changed and logging should be added to notify the profiler et al of the
// new code, which also has to be flushed from instruction cache.
CHECK_EQ(start_address, code_space->top());
-
- if (FLAG_rehash_snapshot && can_rehash_) RehashContext(Context::cast(root));
-
return Handle<Object>(root, isolate);
}
@@ -170,7 +169,7 @@ MaybeHandle<HeapObject> Deserializer::DeserializeObject(Isolate* isolate) {
{
DisallowHeapAllocation no_gc;
Object* root;
- VisitPointer(&root);
+ VisitRootPointer(Root::kPartialSnapshotCache, &root);
DeserializeDeferredObjects();
FlushICacheForNewCodeObjectsAndRecordEmbeddedObjects();
result = Handle<HeapObject>(HeapObject::cast(root));
@@ -182,63 +181,6 @@ MaybeHandle<HeapObject> Deserializer::DeserializeObject(Isolate* isolate) {
}
}
-// We only really just need HashForObject here.
-class StringRehashKey : public HashTableKey {
- public:
- uint32_t HashForObject(Object* other) override {
- return String::cast(other)->Hash();
- }
-
- static uint32_t StringHash(Object* obj) {
- UNREACHABLE();
- return String::cast(obj)->Hash();
- }
-
- bool IsMatch(Object* string) override {
- UNREACHABLE();
- return false;
- }
-
- uint32_t Hash() override {
- UNREACHABLE();
- return 0;
- }
-
- Handle<Object> AsHandle(Isolate* isolate) override {
- UNREACHABLE();
- return isolate->factory()->empty_string();
- }
-};
-
-void Deserializer::Rehash() {
- DCHECK(can_rehash_);
- isolate_->heap()->InitializeHashSeed();
- if (FLAG_profile_deserialization) {
- PrintF("Re-initializing hash seed to %x\n",
- isolate_->heap()->hash_seed()->value());
- }
- StringRehashKey string_rehash_key;
- isolate_->heap()->string_table()->Rehash(&string_rehash_key);
- SortMapDescriptors();
-}
-
-void Deserializer::RehashContext(Context* context) {
- DCHECK(can_rehash_);
- for (const auto& array : transition_arrays_) array->Sort();
- Handle<Name> dummy = isolate_->factory()->empty_string();
- context->global_object()->global_dictionary()->Rehash(dummy);
- SortMapDescriptors();
-}
-
-void Deserializer::SortMapDescriptors() {
- for (const auto& address : allocated_maps_) {
- Map* map = Map::cast(HeapObject::FromAddress(address));
- if (map->instance_descriptors()->number_of_descriptors() > 1) {
- map->instance_descriptors()->Sort();
- }
- }
-}
-
Deserializer::~Deserializer() {
#ifdef DEBUG
// Do not perform checks if we aborted deserialization.
@@ -247,7 +189,7 @@ Deserializer::~Deserializer() {
while (source_.HasMore()) CHECK_EQ(kNop, source_.Get());
for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
int chunk_index = current_chunk_[space];
- CHECK_EQ(reservations_[space].length(), chunk_index + 1);
+ CHECK_EQ(reservations_[space].size(), chunk_index + 1);
CHECK_EQ(reservations_[space][chunk_index].end, high_water_[space]);
}
CHECK_EQ(allocated_maps_.length(), next_map_index_);
@@ -256,7 +198,7 @@ Deserializer::~Deserializer() {
// This is called on the roots. It is the driver of the deserialization
// process. It is also called on the body of each function.
-void Deserializer::VisitPointers(Object** start, Object** end) {
+void Deserializer::VisitRootPointers(Root root, Object** start, Object** end) {
// The space must be new space. Any other space would cause ReadChunk to try
// to update the remembered using NULL as the address.
ReadData(start, end, NEW_SPACE, NULL);
@@ -429,16 +371,6 @@ HeapObject* Deserializer::PostProcessNewObject(HeapObject* obj, int space) {
string->resource()));
isolate_->heap()->RegisterExternalString(string);
}
- if (FLAG_rehash_snapshot && can_rehash_ && !deserializing_user_code()) {
- if (obj->IsString()) {
- // Uninitialize hash field as we are going to reinitialize the hash seed.
- String* string = String::cast(obj);
- string->set_hash_field(String::kEmptyHashField);
- } else if (obj->IsTransitionArray() &&
- TransitionArray::cast(obj)->number_of_entries() > 1) {
- transition_arrays_.Add(TransitionArray::cast(obj));
- }
- }
// Check alignment.
DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(), obj->RequiredAlignment()));
return obj;
@@ -863,7 +795,7 @@ bool Deserializer::ReadData(Object** current, Object** limit, int source_space,
CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
// Move to next reserved chunk.
chunk_index = ++current_chunk_[space];
- CHECK_LT(chunk_index, reservation.length());
+ CHECK_LT(chunk_index, reservation.size());
high_water_[space] = reservation[chunk_index].start;
break;
}
diff --git a/deps/v8/src/snapshot/deserializer.h b/deps/v8/src/snapshot/deserializer.h
index f3a60b89c3..a56adb67d4 100644
--- a/deps/v8/src/snapshot/deserializer.h
+++ b/deps/v8/src/snapshot/deserializer.h
@@ -39,8 +39,7 @@ class Deserializer : public SerializerDeserializer {
external_reference_table_(NULL),
deserialized_large_objects_(0),
deserializing_user_code_(deserializing_user_code),
- next_alignment_(kWordAligned),
- can_rehash_(false) {
+ next_alignment_(kWordAligned) {
DecodeReservation(data->Reservations());
}
@@ -63,15 +62,11 @@ class Deserializer : public SerializerDeserializer {
attached_objects_.Add(attached_object);
}
- void SetRehashability(bool v) { can_rehash_ = v; }
-
private:
- void VisitPointers(Object** start, Object** end) override;
+ void VisitRootPointers(Root root, Object** start, Object** end) override;
void Synchronize(VisitorSynchronization::SyncTag tag) override;
- void VisitRuntimeEntry(RelocInfo* rinfo) override { UNREACHABLE(); }
-
void Initialize(Isolate* isolate);
bool deserializing_user_code() { return deserializing_user_code_; }
@@ -120,15 +115,6 @@ class Deserializer : public SerializerDeserializer {
// snapshot by chunk index and offset.
HeapObject* GetBackReferencedObject(int space);
- // Rehash after deserializing an isolate.
- void Rehash();
-
- // Rehash after deserializing a context.
- void RehashContext(Context* context);
-
- // Sort descriptors of deserialized maps using new string hashes.
- void SortMapDescriptors();
-
// Cached current isolate.
Isolate* isolate_;
@@ -156,15 +142,11 @@ class Deserializer : public SerializerDeserializer {
List<AccessorInfo*> accessor_infos_;
List<Handle<String> > new_internalized_strings_;
List<Handle<Script> > new_scripts_;
- List<TransitionArray*> transition_arrays_;
bool deserializing_user_code_;
AllocationAlignment next_alignment_;
- // TODO(6593): generalize rehashing, and remove this flag.
- bool can_rehash_;
-
DISALLOW_COPY_AND_ASSIGN(Deserializer);
};
diff --git a/deps/v8/src/snapshot/partial-serializer.cc b/deps/v8/src/snapshot/partial-serializer.cc
index 894fea7dba..d3e60e0e4e 100644
--- a/deps/v8/src/snapshot/partial-serializer.cc
+++ b/deps/v8/src/snapshot/partial-serializer.cc
@@ -15,9 +15,7 @@ PartialSerializer::PartialSerializer(
v8::SerializeEmbedderFieldsCallback callback)
: Serializer(isolate),
startup_serializer_(startup_serializer),
- serialize_embedder_fields_(callback),
- rehashable_global_dictionary_(nullptr),
- can_be_rehashed_(true) {
+ serialize_embedder_fields_(callback) {
InitializeCodeAddressMap();
}
@@ -26,7 +24,7 @@ PartialSerializer::~PartialSerializer() {
}
void PartialSerializer::Serialize(Object** o, bool include_global_proxy) {
- if ((*o)->IsNativeContext()) {
+ if ((*o)->IsContext()) {
Context* context = Context::cast(*o);
reference_map()->AddAttachedReference(context->global_proxy());
// The bootstrap snapshot has a code-stub context. When serializing the
@@ -34,20 +32,16 @@ void PartialSerializer::Serialize(Object** o, bool include_global_proxy) {
// and it's next context pointer may point to the code-stub context. Clear
// it before serializing, it will get re-added to the context list
// explicitly when it's loaded.
- context->set(Context::NEXT_CONTEXT_LINK,
- isolate_->heap()->undefined_value());
- DCHECK(!context->global_object()->IsUndefined(context->GetIsolate()));
- // Reset math random cache to get fresh random numbers.
- context->set_math_random_index(Smi::kZero);
- context->set_math_random_cache(isolate_->heap()->undefined_value());
- DCHECK_NULL(rehashable_global_dictionary_);
- rehashable_global_dictionary_ =
- context->global_object()->global_dictionary();
- } else {
- // We only do rehashing for native contexts.
- can_be_rehashed_ = false;
+ if (context->IsNativeContext()) {
+ context->set(Context::NEXT_CONTEXT_LINK,
+ isolate_->heap()->undefined_value());
+ DCHECK(!context->global_object()->IsUndefined(context->GetIsolate()));
+ // Reset math random cache to get fresh random numbers.
+ context->set_math_random_index(Smi::kZero);
+ context->set_math_random_cache(isolate_->heap()->undefined_value());
+ }
}
- VisitPointer(o);
+ VisitRootPointer(Root::kPartialSnapshotCache, o);
SerializeDeferredObjects();
SerializeEmbedderFields();
Pad();
@@ -110,8 +104,6 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
}
}
- if (obj->IsHashTable()) CheckRehashability(obj);
-
// Object has not yet been serialized. Serialize it here.
ObjectSerializer serializer(this, obj, &sink_, how_to_code, where_to_point);
serializer.Serialize();
@@ -160,14 +152,5 @@ void PartialSerializer::SerializeEmbedderFields() {
sink_.Put(kSynchronize, "Finished with embedder fields data");
}
-void PartialSerializer::CheckRehashability(HeapObject* table) {
- DCHECK(table->IsHashTable());
- if (!can_be_rehashed_) return;
- // We can only correctly rehash if the global dictionary is the only hash
- // table that we deserialize.
- if (table == rehashable_global_dictionary_) return;
- can_be_rehashed_ = false;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/partial-serializer.h b/deps/v8/src/snapshot/partial-serializer.h
index 4b3035f9e5..313a800042 100644
--- a/deps/v8/src/snapshot/partial-serializer.h
+++ b/deps/v8/src/snapshot/partial-serializer.h
@@ -23,8 +23,6 @@ class PartialSerializer : public Serializer {
// Serialize the objects reachable from a single object pointer.
void Serialize(Object** o, bool include_global_proxy);
- bool can_be_rehashed() const { return can_be_rehashed_; }
-
private:
void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) override;
@@ -33,15 +31,9 @@ class PartialSerializer : public Serializer {
void SerializeEmbedderFields();
- void CheckRehashability(HeapObject* table);
-
StartupSerializer* startup_serializer_;
List<JSObject*> embedder_field_holders_;
v8::SerializeEmbedderFieldsCallback serialize_embedder_fields_;
- GlobalDictionary* rehashable_global_dictionary_;
- // Indicates whether we only serialized hash tables that we can rehash.
- // TODO(yangguo): generalize rehashing, and remove this flag.
- bool can_be_rehashed_;
DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
};
diff --git a/deps/v8/src/snapshot/serializer-common.cc b/deps/v8/src/snapshot/serializer-common.cc
index 89aabdf263..05a18ab727 100644
--- a/deps/v8/src/snapshot/serializer-common.cc
+++ b/deps/v8/src/snapshot/serializer-common.cc
@@ -67,14 +67,14 @@ void SerializedData::AllocateData(int size) {
// - during normal GC to keep its content alive.
// - not during serialization. The partial serializer adds to it explicitly.
DISABLE_CFI_PERF
-void SerializerDeserializer::Iterate(Isolate* isolate, ObjectVisitor* visitor) {
+void SerializerDeserializer::Iterate(Isolate* isolate, RootVisitor* visitor) {
List<Object*>* cache = isolate->partial_snapshot_cache();
for (int i = 0;; ++i) {
// Extend the array ready to get a value when deserializing.
if (cache->length() <= i) cache->Add(Smi::kZero);
// During deserialization, the visitor populates the partial snapshot cache
// and eventually terminates the cache with undefined.
- visitor->VisitPointer(&cache->at(i));
+ visitor->VisitRootPointer(Root::kPartialSnapshotCache, &cache->at(i));
if (cache->at(i)->IsUndefined(isolate)) break;
}
}
diff --git a/deps/v8/src/snapshot/serializer-common.h b/deps/v8/src/snapshot/serializer-common.h
index 8605c43f3b..d445cb95c9 100644
--- a/deps/v8/src/snapshot/serializer-common.h
+++ b/deps/v8/src/snapshot/serializer-common.h
@@ -8,6 +8,7 @@
#include "src/address-map.h"
#include "src/external-reference-table.h"
#include "src/globals.h"
+#include "src/visitors.h"
namespace v8 {
namespace internal {
@@ -73,9 +74,9 @@ class HotObjectsList {
// The Serializer/Deserializer class is a common superclass for Serializer and
// Deserializer which is used to store common constants and methods used by
// both.
-class SerializerDeserializer : public ObjectVisitor {
+class SerializerDeserializer : public RootVisitor {
public:
- static void Iterate(Isolate* isolate, ObjectVisitor* visitor);
+ static void Iterate(Isolate* isolate, RootVisitor* visitor);
// No reservation for large object space necessary.
// We also handle map space differenly.
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 625958812f..a63d888d11 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -76,6 +76,7 @@ void Serializer::OutputStatistics(const char* name) {
for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
PrintF("%16" PRIuS, s);
}
+ PrintF("%16d", num_maps_ * Map::kSize);
PrintF("%16d\n", large_objects_total_size_);
#ifdef OBJECT_PRINT
PrintF(" Instance types (count and bytes):\n");
@@ -99,7 +100,7 @@ void Serializer::SerializeDeferredObjects() {
sink_.Put(kSynchronize, "Finished with deferred objects");
}
-void Serializer::VisitPointers(Object** start, Object** end) {
+void Serializer::VisitRootPointers(Root root, Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsSmi()) {
PutSmi(Smi::cast(*current));
@@ -598,7 +599,8 @@ void Serializer::ObjectSerializer::SerializeDeferred() {
OutputRawData(object_->address() + size);
}
-void Serializer::ObjectSerializer::VisitPointers(Object** start, Object** end) {
+void Serializer::ObjectSerializer::VisitPointers(HeapObject* host,
+ Object** start, Object** end) {
Object** current = start;
while (current < end) {
while (current < end && (*current)->IsSmi()) current++;
@@ -636,7 +638,8 @@ void Serializer::ObjectSerializer::VisitPointers(Object** start, Object** end) {
}
}
-void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
+void Serializer::ObjectSerializer::VisitEmbeddedPointer(Code* host,
+ RelocInfo* rinfo) {
int skip = OutputRawData(rinfo->target_address_address(),
kCanReturnSkipInsteadOfSkipping);
HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
@@ -646,7 +649,8 @@ void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
bytes_processed_so_far_ += rinfo->target_address_size();
}
-void Serializer::ObjectSerializer::VisitExternalReference(Address* p) {
+void Serializer::ObjectSerializer::VisitExternalReference(Foreign* host,
+ Address* p) {
int skip = OutputRawData(reinterpret_cast<Address>(p),
kCanReturnSkipInsteadOfSkipping);
Address target = *p;
@@ -658,7 +662,8 @@ void Serializer::ObjectSerializer::VisitExternalReference(Address* p) {
bytes_processed_so_far_ += kPointerSize;
}
-void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
+void Serializer::ObjectSerializer::VisitExternalReference(Code* host,
+ RelocInfo* rinfo) {
int skip = OutputRawData(rinfo->target_address_address(),
kCanReturnSkipInsteadOfSkipping);
HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
@@ -673,7 +678,8 @@ void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
bytes_processed_so_far_ += rinfo->target_address_size();
}
-void Serializer::ObjectSerializer::VisitInternalReference(RelocInfo* rinfo) {
+void Serializer::ObjectSerializer::VisitInternalReference(Code* host,
+ RelocInfo* rinfo) {
// We can only reference to internal references of code that has been output.
DCHECK(object_->IsCode() && code_has_been_output_);
// We do not use skip from last patched pc to find the pc to patch, since
@@ -697,7 +703,8 @@ void Serializer::ObjectSerializer::VisitInternalReference(RelocInfo* rinfo) {
sink_->PutInt(static_cast<uintptr_t>(target_offset), "internal ref value");
}
-void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
+void Serializer::ObjectSerializer::VisitRuntimeEntry(Code* host,
+ RelocInfo* rinfo) {
int skip = OutputRawData(rinfo->target_address_address(),
kCanReturnSkipInsteadOfSkipping);
HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
@@ -711,7 +718,8 @@ void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
bytes_processed_so_far_ += rinfo->target_address_size();
}
-void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
+void Serializer::ObjectSerializer::VisitCodeTarget(Code* host,
+ RelocInfo* rinfo) {
int skip = OutputRawData(rinfo->target_address_address(),
kCanReturnSkipInsteadOfSkipping);
Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -719,14 +727,16 @@ void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
bytes_processed_so_far_ += rinfo->target_address_size();
}
-void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
+void Serializer::ObjectSerializer::VisitCodeEntry(JSFunction* host,
+ Address entry_address) {
int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
Code* object = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
bytes_processed_so_far_ += kPointerSize;
}
-void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
+void Serializer::ObjectSerializer::VisitCellPointer(Code* host,
+ RelocInfo* rinfo) {
int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
Cell* object = Cell::cast(rinfo->target_cell());
serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
diff --git a/deps/v8/src/snapshot/serializer.h b/deps/v8/src/snapshot/serializer.h
index 1af259db57..cc4d30bfc5 100644
--- a/deps/v8/src/snapshot/serializer.h
+++ b/deps/v8/src/snapshot/serializer.h
@@ -156,7 +156,7 @@ class Serializer : public SerializerDeserializer {
virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) = 0;
- void VisitPointers(Object** start, Object** end) override;
+ void VisitRootPointers(Root root, Object** start, Object** end) override;
void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
int skip);
@@ -282,15 +282,15 @@ class Serializer::ObjectSerializer : public ObjectVisitor {
void Serialize();
void SerializeContent();
void SerializeDeferred();
- void VisitPointers(Object** start, Object** end) override;
- void VisitEmbeddedPointer(RelocInfo* target) override;
- void VisitExternalReference(Address* p) override;
- void VisitExternalReference(RelocInfo* rinfo) override;
- void VisitInternalReference(RelocInfo* rinfo) override;
- void VisitCodeTarget(RelocInfo* target) override;
- void VisitCodeEntry(Address entry_address) override;
- void VisitCell(RelocInfo* rinfo) override;
- void VisitRuntimeEntry(RelocInfo* reloc) override;
+ void VisitPointers(HeapObject* host, Object** start, Object** end) override;
+ void VisitEmbeddedPointer(Code* host, RelocInfo* target) override;
+ void VisitExternalReference(Foreign* host, Address* p) override;
+ void VisitExternalReference(Code* host, RelocInfo* rinfo) override;
+ void VisitInternalReference(Code* host, RelocInfo* rinfo) override;
+ void VisitCodeTarget(Code* host, RelocInfo* target) override;
+ void VisitCodeEntry(JSFunction* host, Address entry_address) override;
+ void VisitCellPointer(Code* host, RelocInfo* rinfo) override;
+ void VisitRuntimeEntry(Code* host, RelocInfo* reloc) override;
private:
bool TryEncodeDeoptimizationEntry(HowToCode how_to_code, Address target,
diff --git a/deps/v8/src/snapshot/snapshot-common.cc b/deps/v8/src/snapshot/snapshot-common.cc
index 9f299e697e..9350ec6b54 100644
--- a/deps/v8/src/snapshot/snapshot-common.cc
+++ b/deps/v8/src/snapshot/snapshot-common.cc
@@ -41,7 +41,6 @@ bool Snapshot::Initialize(Isolate* isolate) {
Vector<const byte> startup_data = ExtractStartupData(blob);
SnapshotData snapshot_data(startup_data);
Deserializer deserializer(&snapshot_data);
- deserializer.SetRehashability(ExtractRehashability(blob));
bool success = isolate->Init(&deserializer);
if (FLAG_profile_deserialization) {
double ms = timer.Elapsed().InMillisecondsF();
@@ -63,7 +62,6 @@ MaybeHandle<Context> Snapshot::NewContextFromSnapshot(
ExtractContextData(blob, static_cast<int>(context_index));
SnapshotData snapshot_data(context_data);
Deserializer deserializer(&snapshot_data);
- deserializer.SetRehashability(ExtractRehashability(blob));
MaybeHandle<Object> maybe_context = deserializer.DeserializePartial(
isolate, global_proxy, embedder_fields_deserializer);
@@ -100,7 +98,7 @@ void ProfileDeserialization(const SnapshotData* startup_snapshot,
v8::StartupData Snapshot::CreateSnapshotBlob(
const SnapshotData* startup_snapshot,
- const List<SnapshotData*>* context_snapshots, bool can_be_rehashed) {
+ const List<SnapshotData*>* context_snapshots) {
int num_contexts = context_snapshots->length();
int startup_snapshot_offset = StartupSnapshotOffset(num_contexts);
int total_length = startup_snapshot_offset;
@@ -113,8 +111,6 @@ v8::StartupData Snapshot::CreateSnapshotBlob(
char* data = new char[total_length];
memcpy(data + kNumberOfContextsOffset, &num_contexts, kInt32Size);
- int rehashability = can_be_rehashed ? 1 : 0;
- memcpy(data + kRehashabilityOffset, &rehashability, kInt32Size);
int payload_offset = StartupSnapshotOffset(num_contexts);
int payload_length = startup_snapshot->RawData().length();
memcpy(data + payload_offset, startup_snapshot->RawData().start(),
@@ -147,13 +143,6 @@ int Snapshot::ExtractNumContexts(const v8::StartupData* data) {
return num_contexts;
}
-bool Snapshot::ExtractRehashability(const v8::StartupData* data) {
- CHECK_LT(kRehashabilityOffset, data->raw_size);
- int rehashability;
- memcpy(&rehashability, data->data + kRehashabilityOffset, kInt32Size);
- return rehashability != 0;
-}
-
Vector<const byte> Snapshot::ExtractStartupData(const v8::StartupData* data) {
int num_contexts = ExtractNumContexts(data);
int startup_offset = StartupSnapshotOffset(num_contexts);
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index e0fd226e62..7d9082e6d2 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -62,8 +62,6 @@ class Snapshot : public AllStatic {
size_t context_index,
v8::DeserializeEmbedderFieldsCallback embedder_fields_deserializer);
- static bool HaveASnapshotToStartFrom(Isolate* isolate);
-
static bool HasContextSnapshot(Isolate* isolate, size_t index);
static bool EmbedsScript(Isolate* isolate);
@@ -73,7 +71,7 @@ class Snapshot : public AllStatic {
static v8::StartupData CreateSnapshotBlob(
const SnapshotData* startup_snapshot,
- const List<SnapshotData*>* context_snapshots, bool can_be_rehashed);
+ const List<SnapshotData*>* context_snapshots);
#ifdef DEBUG
static bool SnapshotIsValid(v8::StartupData* snapshot_blob);
@@ -81,16 +79,14 @@ class Snapshot : public AllStatic {
private:
static int ExtractNumContexts(const v8::StartupData* data);
- static bool ExtractRehashability(const v8::StartupData* data);
static Vector<const byte> ExtractStartupData(const v8::StartupData* data);
static Vector<const byte> ExtractContextData(const v8::StartupData* data,
int index);
// Snapshot blob layout:
// [0] number of contexts N
- // [1] rehashability
- // [2] offset to context 0
- // [3] offset to context 1
+ // [1] offset to context 0
+ // [2] offset to context 1
// ...
// ... offset to context N - 1
// ... startup snapshot data
@@ -98,10 +94,8 @@ class Snapshot : public AllStatic {
// ... context 1 snapshot data
static const int kNumberOfContextsOffset = 0;
- // TODO(yangguo): generalize rehashing, and remove this flag.
- static const int kRehashabilityOffset = kNumberOfContextsOffset + kInt32Size;
static const int kFirstContextOffsetOffset =
- kRehashabilityOffset + kInt32Size;
+ kNumberOfContextsOffset + kInt32Size;
static int StartupSnapshotOffset(int num_contexts) {
return kFirstContextOffsetOffset + num_contexts * kInt32Size;
diff --git a/deps/v8/src/snapshot/startup-serializer.cc b/deps/v8/src/snapshot/startup-serializer.cc
index b63474ec75..dfc02036d8 100644
--- a/deps/v8/src/snapshot/startup-serializer.cc
+++ b/deps/v8/src/snapshot/startup-serializer.cc
@@ -16,8 +16,7 @@ StartupSerializer::StartupSerializer(
: Serializer(isolate),
clear_function_code_(function_code_handling ==
v8::SnapshotCreator::FunctionCodeHandling::kClear),
- serializing_builtins_(false),
- can_be_rehashed_(true) {
+ serializing_builtins_(false) {
InitializeCodeAddressMap();
}
@@ -74,10 +73,11 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
Address original_address = Foreign::cast(info->getter())->foreign_address();
Foreign::cast(info->js_getter())->set_foreign_address(original_address);
accessor_infos_.Add(info);
+ } else if (obj->IsScript() && Script::cast(obj)->IsUserJavaScript()) {
+ Script::cast(obj)->set_context_data(
+ isolate_->heap()->uninitialized_symbol());
}
- if (obj->IsHashTable()) CheckRehashability(obj);
-
// Object has not yet been serialized. Serialize it here.
ObjectSerializer object_serializer(this, obj, &sink_, how_to_code,
where_to_point);
@@ -98,7 +98,7 @@ void StartupSerializer::SerializeWeakReferencesAndDeferred() {
// add entries to the partial snapshot cache of the startup snapshot. Add
// one entry with 'undefined' to terminate the partial snapshot cache.
Object* undefined = isolate()->heap()->undefined_value();
- VisitPointer(&undefined);
+ VisitRootPointer(Root::kPartialSnapshotCache, &undefined);
isolate()->heap()->IterateWeakRoots(this, VISIT_ALL);
SerializeDeferredObjects();
Pad();
@@ -110,7 +110,8 @@ int StartupSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
// This object is not part of the partial snapshot cache yet. Add it to the
// startup snapshot so we can refer to it via partial snapshot index from
// the partial snapshot.
- VisitPointer(reinterpret_cast<Object**>(&heap_object));
+ VisitRootPointer(Root::kPartialSnapshotCache,
+ reinterpret_cast<Object**>(&heap_object));
}
return index;
}
@@ -147,7 +148,8 @@ void StartupSerializer::SerializeStrongReferences() {
VISIT_ONLY_STRONG_FOR_SERIALIZATION);
}
-void StartupSerializer::VisitPointers(Object** start, Object** end) {
+void StartupSerializer::VisitRootPointers(Root root, Object** start,
+ Object** end) {
if (start == isolate()->heap()->roots_array_start()) {
// Serializing the root list needs special handling:
// - The first pass over the root list only serializes immortal immovables.
@@ -174,7 +176,7 @@ void StartupSerializer::VisitPointers(Object** start, Object** end) {
}
FlushSkip(skip);
} else {
- Serializer::VisitPointers(start, end);
+ Serializer::VisitRootPointers(root, start, end);
}
}
@@ -187,17 +189,5 @@ bool StartupSerializer::RootShouldBeSkipped(int root_index) {
serializing_immortal_immovables_roots_;
}
-void StartupSerializer::CheckRehashability(HeapObject* table) {
- DCHECK(table->IsHashTable());
- if (!can_be_rehashed_) return;
- // We can only correctly rehash if the four hash tables below are the only
- // ones that we deserialize.
- if (table == isolate_->heap()->empty_slow_element_dictionary()) return;
- if (table == isolate_->heap()->empty_properties_dictionary()) return;
- if (table == isolate_->heap()->weak_object_to_code_table()) return;
- if (table == isolate_->heap()->string_table()) return;
- can_be_rehashed_ = false;
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/snapshot/startup-serializer.h b/deps/v8/src/snapshot/startup-serializer.h
index 5f76808cd0..223e1c7bff 100644
--- a/deps/v8/src/snapshot/startup-serializer.h
+++ b/deps/v8/src/snapshot/startup-serializer.h
@@ -29,8 +29,6 @@ class StartupSerializer : public Serializer {
int PartialSnapshotCacheIndex(HeapObject* o);
- bool can_be_rehashed() const { return can_be_rehashed_; }
-
private:
class PartialCacheIndexMap {
public:
@@ -59,7 +57,7 @@ class StartupSerializer : public Serializer {
// The StartupSerializer has to serialize the root array, which is slightly
// different.
- void VisitPointers(Object** start, Object** end) override;
+ void VisitRootPointers(Root root, Object** start, Object** end) override;
void SerializeObject(HeapObject* o, HowToCode how_to_code,
WhereToPoint where_to_point, int skip) override;
void Synchronize(VisitorSynchronization::SyncTag tag) override;
@@ -70,18 +68,12 @@ class StartupSerializer : public Serializer {
// roots. In the second pass, we serialize the rest.
bool RootShouldBeSkipped(int root_index);
- void CheckRehashability(HeapObject* hashtable);
-
bool clear_function_code_;
bool serializing_builtins_;
bool serializing_immortal_immovables_roots_;
std::bitset<Heap::kStrongRootListLength> root_has_been_serialized_;
PartialCacheIndexMap partial_cache_index_map_;
List<AccessorInfo*> accessor_infos_;
- // Indicates whether we only serialized hash tables that we can rehash.
- // TODO(yangguo): generalize rehashing, and remove this flag.
- bool can_be_rehashed_;
-
DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
};
diff --git a/deps/v8/src/source-position-table.cc b/deps/v8/src/source-position-table.cc
index 35d8e7c2f6..4babd4c0eb 100644
--- a/deps/v8/src/source-position-table.cc
+++ b/deps/v8/src/source-position-table.cc
@@ -168,18 +168,27 @@ Handle<ByteArray> SourcePositionTableBuilder::ToSourcePositionTable(
}
SourcePositionTableIterator::SourcePositionTableIterator(ByteArray* byte_array)
- : table_(byte_array), index_(0), current_() {
+ : raw_table_(byte_array) {
Advance();
}
+SourcePositionTableIterator::SourcePositionTableIterator(
+ Handle<ByteArray> byte_array)
+ : table_(byte_array) {
+ Advance();
+ // We can enable allocation because we keep the table in a handle.
+ no_gc.Release();
+}
+
void SourcePositionTableIterator::Advance() {
+ ByteArray* table = raw_table_ ? raw_table_ : *table_;
DCHECK(!done());
- DCHECK(index_ >= 0 && index_ <= table_->length());
- if (index_ >= table_->length()) {
+ DCHECK(index_ >= 0 && index_ <= table->length());
+ if (index_ >= table->length()) {
index_ = kDone;
} else {
PositionTableEntry tmp;
- DecodeEntry(table_, &index_, &tmp);
+ DecodeEntry(table, &index_, &tmp);
AddAndSetEntry(current_, tmp);
}
}
diff --git a/deps/v8/src/source-position-table.h b/deps/v8/src/source-position-table.h
index 756838d1e5..c77c1ef26e 100644
--- a/deps/v8/src/source-position-table.h
+++ b/deps/v8/src/source-position-table.h
@@ -61,6 +61,16 @@ class V8_EXPORT_PRIVATE SourcePositionTableBuilder {
class V8_EXPORT_PRIVATE SourcePositionTableIterator {
public:
+ // We expose two flavours of the iterator, depending on the argument passed
+ // to the constructor:
+
+ // Handlified iterator allows allocation, but it needs a handle (and thus
+ // a handle scope). This is the preferred version.
+ explicit SourcePositionTableIterator(Handle<ByteArray> byte_array);
+
+ // Non-handlified iterator does not need a handle scope, but it disallows
+ // allocation during its lifetime. This is useful if there is no handle
+ // scope around.
explicit SourcePositionTableIterator(ByteArray* byte_array);
void Advance();
@@ -82,8 +92,9 @@ class V8_EXPORT_PRIVATE SourcePositionTableIterator {
private:
static const int kDone = -1;
- ByteArray* table_;
- int index_;
+ ByteArray* raw_table_ = nullptr;
+ Handle<ByteArray> table_;
+ int index_ = 0;
PositionTableEntry current_;
DisallowHeapAllocation no_gc;
};
diff --git a/deps/v8/src/string-hasher-inl.h b/deps/v8/src/string-hasher-inl.h
new file mode 100644
index 0000000000..c4e353f1ef
--- /dev/null
+++ b/deps/v8/src/string-hasher-inl.h
@@ -0,0 +1,147 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_STRING_HASHER_INL_H_
+#define V8_STRING_HASHER_INL_H_
+
+#include "src/objects.h"
+#include "src/string-hasher.h"
+
+namespace v8 {
+namespace internal {
+
+StringHasher::StringHasher(int length, uint32_t seed)
+ : length_(length),
+ raw_running_hash_(seed),
+ array_index_(0),
+ is_array_index_(0 < length_ && length_ <= String::kMaxArrayIndexSize),
+ is_first_char_(true) {
+ DCHECK(FLAG_randomize_hashes || raw_running_hash_ == 0);
+}
+
+bool StringHasher::has_trivial_hash() {
+ return length_ > String::kMaxHashCalcLength;
+}
+
+uint32_t StringHasher::AddCharacterCore(uint32_t running_hash, uint16_t c) {
+ running_hash += c;
+ running_hash += (running_hash << 10);
+ running_hash ^= (running_hash >> 6);
+ return running_hash;
+}
+
+uint32_t StringHasher::GetHashCore(uint32_t running_hash) {
+ running_hash += (running_hash << 3);
+ running_hash ^= (running_hash >> 11);
+ running_hash += (running_hash << 15);
+ if ((running_hash & String::kHashBitMask) == 0) {
+ return kZeroHash;
+ }
+ return running_hash;
+}
+
+uint32_t StringHasher::ComputeRunningHash(uint32_t running_hash,
+ const uc16* chars, int length) {
+ DCHECK_NOT_NULL(chars);
+ DCHECK(length >= 0);
+ for (int i = 0; i < length; ++i) {
+ running_hash = AddCharacterCore(running_hash, *chars++);
+ }
+ return running_hash;
+}
+
+uint32_t StringHasher::ComputeRunningHashOneByte(uint32_t running_hash,
+ const char* chars,
+ int length) {
+ DCHECK_NOT_NULL(chars);
+ DCHECK(length >= 0);
+ for (int i = 0; i < length; ++i) {
+ uint16_t c = static_cast<uint16_t>(*chars++);
+ running_hash = AddCharacterCore(running_hash, c);
+ }
+ return running_hash;
+}
+
+void StringHasher::AddCharacter(uint16_t c) {
+ // Use the Jenkins one-at-a-time hash function to update the hash
+ // for the given character.
+ raw_running_hash_ = AddCharacterCore(raw_running_hash_, c);
+}
+
+bool StringHasher::UpdateIndex(uint16_t c) {
+ DCHECK(is_array_index_);
+ if (c < '0' || c > '9') {
+ is_array_index_ = false;
+ return false;
+ }
+ int d = c - '0';
+ if (is_first_char_) {
+ is_first_char_ = false;
+ if (c == '0' && length_ > 1) {
+ is_array_index_ = false;
+ return false;
+ }
+ }
+ if (array_index_ > 429496729U - ((d + 3) >> 3)) {
+ is_array_index_ = false;
+ return false;
+ }
+ array_index_ = array_index_ * 10 + d;
+ return true;
+}
+
+template <typename Char>
+inline void StringHasher::AddCharacters(const Char* chars, int length) {
+ DCHECK(sizeof(Char) == 1 || sizeof(Char) == 2);
+ int i = 0;
+ if (is_array_index_) {
+ for (; i < length; i++) {
+ AddCharacter(chars[i]);
+ if (!UpdateIndex(chars[i])) {
+ i++;
+ break;
+ }
+ }
+ }
+ for (; i < length; i++) {
+ DCHECK(!is_array_index_);
+ AddCharacter(chars[i]);
+ }
+}
+
+template <typename schar>
+uint32_t StringHasher::HashSequentialString(const schar* chars, int length,
+ uint32_t seed) {
+ StringHasher hasher(length, seed);
+ if (!hasher.has_trivial_hash()) hasher.AddCharacters(chars, length);
+ return hasher.GetHashField();
+}
+
+IteratingStringHasher::IteratingStringHasher(int len, uint32_t seed)
+ : StringHasher(len, seed) {}
+
+uint32_t IteratingStringHasher::Hash(String* string, uint32_t seed) {
+ IteratingStringHasher hasher(string->length(), seed);
+ // Nothing to do.
+ if (hasher.has_trivial_hash()) return hasher.GetHashField();
+ ConsString* cons_string = String::VisitFlat(&hasher, string);
+ if (cons_string == nullptr) return hasher.GetHashField();
+ hasher.VisitConsString(cons_string);
+ return hasher.GetHashField();
+}
+
+void IteratingStringHasher::VisitOneByteString(const uint8_t* chars,
+ int length) {
+ AddCharacters(chars, length);
+}
+
+void IteratingStringHasher::VisitTwoByteString(const uint16_t* chars,
+ int length) {
+ AddCharacters(chars, length);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_STRING_HASHER_INL_H_
diff --git a/deps/v8/src/string-hasher.h b/deps/v8/src/string-hasher.h
new file mode 100644
index 0000000000..867a480a41
--- /dev/null
+++ b/deps/v8/src/string-hasher.h
@@ -0,0 +1,90 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_STRING_HASHER_H_
+#define V8_STRING_HASHER_H_
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class ConsString;
+class String;
+
+template <typename T>
+class Vector;
+
+class V8_EXPORT_PRIVATE StringHasher {
+ public:
+ explicit inline StringHasher(int length, uint32_t seed);
+
+ template <typename schar>
+ static inline uint32_t HashSequentialString(const schar* chars, int length,
+ uint32_t seed);
+
+ // Reads all the data, even for long strings and computes the utf16 length.
+ static uint32_t ComputeUtf8Hash(Vector<const char> chars, uint32_t seed,
+ int* utf16_length_out);
+
+ // Calculated hash value for a string consisting of 1 to
+ // String::kMaxArrayIndexSize digits with no leading zeros (except "0").
+ // value is represented decimal value.
+ static uint32_t MakeArrayIndexHash(uint32_t value, int length);
+
+ // No string is allowed to have a hash of zero. That value is reserved
+ // for internal properties. If the hash calculation yields zero then we
+ // use 27 instead.
+ static const int kZeroHash = 27;
+
+ // Reusable parts of the hashing algorithm.
+ INLINE(static uint32_t AddCharacterCore(uint32_t running_hash, uint16_t c));
+ INLINE(static uint32_t GetHashCore(uint32_t running_hash));
+ INLINE(static uint32_t ComputeRunningHash(uint32_t running_hash,
+ const uc16* chars, int length));
+ INLINE(static uint32_t ComputeRunningHashOneByte(uint32_t running_hash,
+ const char* chars,
+ int length));
+
+ protected:
+ // Returns the value to store in the hash field of a string with
+ // the given length and contents.
+ uint32_t GetHashField();
+ // Returns true if the hash of this string can be computed without
+ // looking at the contents.
+ inline bool has_trivial_hash();
+ // Adds a block of characters to the hash.
+ template <typename Char>
+ inline void AddCharacters(const Char* chars, int len);
+
+ private:
+ // Add a character to the hash.
+ inline void AddCharacter(uint16_t c);
+ // Update index. Returns true if string is still an index.
+ inline bool UpdateIndex(uint16_t c);
+
+ int length_;
+ uint32_t raw_running_hash_;
+ uint32_t array_index_;
+ bool is_array_index_;
+ bool is_first_char_;
+ DISALLOW_COPY_AND_ASSIGN(StringHasher);
+};
+
+class IteratingStringHasher : public StringHasher {
+ public:
+ static inline uint32_t Hash(String* string, uint32_t seed);
+ inline void VisitOneByteString(const uint8_t* chars, int length);
+ inline void VisitTwoByteString(const uint16_t* chars, int length);
+
+ private:
+ inline IteratingStringHasher(int len, uint32_t seed);
+ void VisitConsString(ConsString* cons_string);
+ DISALLOW_COPY_AND_ASSIGN(IteratingStringHasher);
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_STRING_HASHER_H_
diff --git a/deps/v8/src/string-search.h b/deps/v8/src/string-search.h
index 7db09934f5..637d000c56 100644
--- a/deps/v8/src/string-search.h
+++ b/deps/v8/src/string-search.h
@@ -563,6 +563,19 @@ int SearchString(Isolate* isolate,
return search.Search(subject, start_index);
}
+// A wrapper function around SearchString that wraps raw pointers to the subject
+// and pattern as vectors before calling SearchString. Used from the
+// StringIndexOf builtin.
+template <typename SubjectChar, typename PatternChar>
+int SearchStringRaw(Isolate* isolate, const SubjectChar* subject_ptr,
+ int subject_length, const PatternChar* pattern_ptr,
+ int pattern_length, int start_index) {
+ DisallowHeapAllocation no_gc;
+ Vector<const SubjectChar> subject(subject_ptr, subject_length);
+ Vector<const PatternChar> pattern(pattern_ptr, pattern_length);
+ return SearchString(isolate, subject, pattern, start_index);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index 650b3cf93a..28cc44a220 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -190,15 +190,15 @@ void StringStream::PrintObject(Object* o) {
HeapObject* ho = HeapObject::cast(o);
DebugObjectCache* debug_object_cache = ho->GetIsolate()->
string_stream_debug_object_cache();
- for (int i = 0; i < debug_object_cache->length(); i++) {
+ for (size_t i = 0; i < debug_object_cache->size(); i++) {
if ((*debug_object_cache)[i] == o) {
- Add("#%d#", i);
+ Add("#%d#", static_cast<int>(i));
return;
}
}
- if (debug_object_cache->length() < kMentionedObjectCacheMaxSize) {
- Add("#%d#", debug_object_cache->length());
- debug_object_cache->Add(HeapObject::cast(o));
+ if (debug_object_cache->size() < kMentionedObjectCacheMaxSize) {
+ Add("#%d#", static_cast<int>(debug_object_cache->size()));
+ debug_object_cache->push_back(HeapObject::cast(o));
} else {
Add("@%p", o);
}
@@ -244,16 +244,16 @@ Handle<String> StringStream::ToString(Isolate* isolate) {
void StringStream::ClearMentionedObjectCache(Isolate* isolate) {
isolate->set_string_stream_current_security_token(NULL);
if (isolate->string_stream_debug_object_cache() == NULL) {
- isolate->set_string_stream_debug_object_cache(new DebugObjectCache(0));
+ isolate->set_string_stream_debug_object_cache(new DebugObjectCache());
}
- isolate->string_stream_debug_object_cache()->Clear();
+ isolate->string_stream_debug_object_cache()->clear();
}
#ifdef DEBUG
bool StringStream::IsMentionedObjectCacheClear(Isolate* isolate) {
return object_print_mode_ == kPrintObjectConcise ||
- isolate->string_stream_debug_object_cache()->length() == 0;
+ isolate->string_stream_debug_object_cache()->size() == 0;
}
#endif
@@ -377,9 +377,9 @@ void StringStream::PrintMentionedObjectCache(Isolate* isolate) {
DebugObjectCache* debug_object_cache =
isolate->string_stream_debug_object_cache();
Add("==== Key ============================================\n\n");
- for (int i = 0; i < debug_object_cache->length(); i++) {
+ for (size_t i = 0; i < debug_object_cache->size(); i++) {
HeapObject* printee = (*debug_object_cache)[i];
- Add(" #%d# %p: ", i, printee);
+ Add(" #%d# %p: ", static_cast<int>(i), printee);
printee->ShortPrint(this);
Add("\n");
if (printee->IsJSObject()) {
diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/transitions-inl.h
index 694674dc0f..df28c2c991 100644
--- a/deps/v8/src/transitions-inl.h
+++ b/deps/v8/src/transitions-inl.h
@@ -106,6 +106,7 @@ int TransitionArray::SearchName(Name* name, int* out_insertion_index) {
}
+#ifdef DEBUG
bool TransitionArray::IsSpecialTransition(Name* name) {
if (!name->IsSymbol()) return false;
Heap* heap = name->GetHeap();
@@ -114,6 +115,7 @@ bool TransitionArray::IsSpecialTransition(Name* name) {
name == heap->elements_transition_symbol() ||
name == heap->strict_function_transition_symbol();
}
+#endif
int TransitionArray::CompareKeys(Name* key1, uint32_t hash1, PropertyKind kind1,
diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc
index 42d1c89507..5333fa6e25 100644
--- a/deps/v8/src/transitions.cc
+++ b/deps/v8/src/transitions.cc
@@ -551,47 +551,5 @@ int TransitionArray::Search(PropertyKind kind, Name* name,
if (transition == kNotFound) return kNotFound;
return SearchDetails(transition, kind, attributes, out_insertion_index);
}
-
-void TransitionArray::Sort() {
- DisallowHeapAllocation no_gc;
- // In-place insertion sort.
- int length = number_of_transitions();
- for (int i = 1; i < length; i++) {
- Name* key = GetKey(i);
- Map* target = GetTarget(i);
- PropertyKind kind = kData;
- PropertyAttributes attributes = NONE;
- if (!IsSpecialTransition(key)) {
- PropertyDetails details = GetTargetDetails(key, target);
- kind = details.kind();
- attributes = details.attributes();
- }
- int j;
- for (j = i - 1; j >= 0; j--) {
- Name* temp_key = GetKey(j);
- Map* temp_target = GetTarget(j);
- PropertyKind temp_kind = kData;
- PropertyAttributes temp_attributes = NONE;
- if (!IsSpecialTransition(temp_key)) {
- PropertyDetails details = GetTargetDetails(temp_key, temp_target);
- temp_kind = details.kind();
- temp_attributes = details.attributes();
- }
- int cmp =
- CompareKeys(temp_key, temp_key->Hash(), temp_kind, temp_attributes,
- key, key->Hash(), kind, attributes);
- if (cmp > 0) {
- SetKey(j + 1, temp_key);
- SetTarget(j + 1, temp_target);
- } else {
- break;
- }
- }
- SetKey(j + 1, key);
- SetTarget(j + 1, target);
- }
- DCHECK(IsSortedNoDuplicates());
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/transitions.h b/deps/v8/src/transitions.h
index 839888cb3a..e553a05ce4 100644
--- a/deps/v8/src/transitions.h
+++ b/deps/v8/src/transitions.h
@@ -7,10 +7,10 @@
#include "src/checks.h"
#include "src/elements-kind.h"
-#include "src/heap/heap.h"
#include "src/isolate.h"
#include "src/objects.h"
#include "src/objects/descriptor-array.h"
+#include "src/objects/map.h"
namespace v8 {
namespace internal {
@@ -190,17 +190,15 @@ class TransitionArray: public FixedArray {
void TransitionArrayVerify();
#endif
- void Sort();
-
#ifdef DEBUG
bool IsSortedNoDuplicates(int valid_entries = -1);
static bool IsSortedNoDuplicates(Map* map);
static bool IsConsistentWithBackPointers(Map* map);
-#endif
// Returns true for a non-property transitions like elements kind, observed
// or frozen transitions.
static inline bool IsSpecialTransition(Name* name);
+#endif
// Constant for denoting key was not found.
static const int kNotFound = -1;
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 4bb971e8cd..0b8dd147bd 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -10,6 +10,7 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/objects-inl.h"
+#include "src/objects/map.h"
namespace v8 {
namespace internal {
@@ -388,8 +389,7 @@ void TypeFeedbackOracle::PropertyReceiverTypes(FeedbackSlot slot,
receiver_types->Clear();
if (!slot.IsInvalid()) {
LoadICNexus nexus(feedback_vector_, slot);
- CollectReceiverTypes(isolate()->load_stub_cache(), &nexus, name,
- receiver_types);
+ CollectReceiverTypes(&nexus, receiver_types);
}
}
@@ -412,8 +412,8 @@ void TypeFeedbackOracle::AssignmentReceiverTypes(FeedbackSlot slot,
Handle<Name> name,
SmallMapList* receiver_types) {
receiver_types->Clear();
- CollectReceiverTypes(isolate()->store_stub_cache(), slot, name,
- receiver_types);
+ StoreICNexus nexus(feedback_vector_, slot);
+ CollectReceiverTypes(&nexus, receiver_types);
}
void TypeFeedbackOracle::KeyedAssignmentReceiverTypes(
@@ -430,27 +430,6 @@ void TypeFeedbackOracle::CountReceiverTypes(FeedbackSlot slot,
if (!slot.IsInvalid()) CollectReceiverTypes(slot, receiver_types);
}
-void TypeFeedbackOracle::CollectReceiverTypes(StubCache* stub_cache,
- FeedbackSlot slot,
- Handle<Name> name,
- SmallMapList* types) {
- StoreICNexus nexus(feedback_vector_, slot);
- CollectReceiverTypes(stub_cache, &nexus, name, types);
-}
-
-void TypeFeedbackOracle::CollectReceiverTypes(StubCache* stub_cache,
- FeedbackNexus* nexus,
- Handle<Name> name,
- SmallMapList* types) {
- if (FLAG_collect_megamorphic_maps_from_stub_cache &&
- nexus->ic_state() == MEGAMORPHIC) {
- types->Reserve(4, zone());
- stub_cache->CollectMatchingMaps(types, name, native_context_, zone());
- } else {
- CollectReceiverTypes(nexus, types);
- }
-}
-
void TypeFeedbackOracle::CollectReceiverTypes(FeedbackSlot slot,
SmallMapList* types) {
FeedbackSlotKind kind = feedback_vector_->GetKind(slot);
@@ -467,21 +446,14 @@ void TypeFeedbackOracle::CollectReceiverTypes(FeedbackSlot slot,
void TypeFeedbackOracle::CollectReceiverTypes(FeedbackNexus* nexus,
SmallMapList* types) {
- MapHandleList maps;
- if (nexus->ic_state() == MONOMORPHIC) {
- Map* map = nexus->FindFirstMap();
- if (map != NULL) maps.Add(handle(map));
- } else if (nexus->ic_state() == POLYMORPHIC) {
- nexus->FindAllMaps(&maps);
- } else {
+ MapHandles maps;
+ if (nexus->ExtractMaps(&maps) == 0) {
return;
}
- types->Reserve(maps.length(), zone());
- for (int i = 0; i < maps.length(); i++) {
- Handle<Map> map(maps.at(i));
- if (IsRelevantFeedback(*map, *native_context_)) {
- types->AddMapIfMissing(maps.at(i), zone());
- }
+
+ types->Reserve(static_cast<int>(maps.size()), zone());
+ for (Handle<Map> map : maps) {
+ types->AddMapIfMissing(map, zone());
}
}
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index c8e35564ab..d767a297c6 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -58,13 +58,6 @@ class TypeFeedbackOracle: public ZoneObject {
void CollectReceiverTypes(FeedbackSlot slot, SmallMapList* types);
void CollectReceiverTypes(FeedbackNexus* nexus, SmallMapList* types);
- static bool IsRelevantFeedback(Map* map, Context* native_context) {
- Object* constructor = map->GetConstructor();
- return !constructor->IsJSFunction() ||
- JSFunction::cast(constructor)->context()->native_context() ==
- native_context;
- }
-
Handle<JSFunction> GetCallTarget(FeedbackSlot slot);
Handle<AllocationSite> GetCallAllocationSite(FeedbackSlot slot);
Handle<JSFunction> GetCallNewTarget(FeedbackSlot slot);
@@ -91,11 +84,6 @@ class TypeFeedbackOracle: public ZoneObject {
Isolate* isolate() const { return isolate_; }
private:
- void CollectReceiverTypes(StubCache* stub_cache, FeedbackSlot slot,
- Handle<Name> name, SmallMapList* types);
- void CollectReceiverTypes(StubCache* stub_cache, FeedbackNexus* nexus,
- Handle<Name> name, SmallMapList* types);
-
// Returns true if there is at least one string map and if
// all maps are string maps.
bool HasOnlyStringMaps(SmallMapList* receiver_types);
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index 887988a488..6c8dadbd71 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -161,13 +161,6 @@ int Compare(const T& a, const T& b) {
return 1;
}
-
-template <typename T>
-int PointerValueCompare(const T* a, const T* b) {
- return Compare<T>(*a, *b);
-}
-
-
// Compare function to compare the object pointer value of two
// handlified objects. The handles are passed as pointers to the
// handles.
@@ -313,7 +306,7 @@ class BitFieldBase {
static const T kMax = static_cast<T>((kOne << size) - 1);
// Tells whether the provided value fits into the bit field.
- static bool is_valid(T value) {
+ static constexpr bool is_valid(T value) {
return (static_cast<U>(value) & ~static_cast<U>(kMax)) == 0;
}
@@ -986,7 +979,7 @@ void PRINTF_FORMAT(2, 3) PrintIsolate(void* isolate, const char* format, ...);
// Safe formatting print. Ensures that str is always null-terminated.
// Returns the number of chars written, or -1 if output was truncated.
int PRINTF_FORMAT(2, 3) SNPrintF(Vector<char> str, const char* format, ...);
-int PRINTF_FORMAT(2, 0)
+V8_EXPORT_PRIVATE int PRINTF_FORMAT(2, 0)
VSNPrintF(Vector<char> str, const char* format, va_list args);
void StrNCpy(Vector<char> dest, const char* src, size_t n);
@@ -1049,11 +1042,8 @@ int WriteAsCFile(const char* filename, const char* varname,
template <typename T>
inline void CopyWords(T* dst, const T* src, size_t num_words) {
STATIC_ASSERT(sizeof(T) == kPointerSize);
- // TODO(mvstanton): disabled because mac builds are bogus failing on this
- // assert. They are doing a signed comparison. Investigate in
- // the morning.
- // DCHECK(Min(dst, const_cast<T*>(src)) + num_words <=
- // Max(dst, const_cast<T*>(src)));
+ DCHECK(Min(dst, const_cast<T*>(src)) + num_words <=
+ Max(dst, const_cast<T*>(src)));
DCHECK(num_words > 0);
// Use block copying MemCopy if the segment we're copying is
diff --git a/deps/v8/src/v8.gyp b/deps/v8/src/v8.gyp
index a8efcdcf3f..e7e19f5059 100644
--- a/deps/v8/src/v8.gyp
+++ b/deps/v8/src/v8.gyp
@@ -99,7 +99,7 @@
# The dependency on v8_base should come from a transitive
# dependency however the Android toolchain requires libv8_base.a
# to appear before libv8_snapshot.a so it's listed explicitly.
- 'dependencies': ['v8_base', 'v8_nosnapshot'],
+ 'dependencies': ['v8_base', 'v8_builtins_setup', 'v8_nosnapshot'],
}],
['v8_use_snapshot=="true" and v8_use_external_startup_data==0', {
# The dependency on v8_base should come from a transitive
@@ -133,6 +133,158 @@
]
},
{
+ 'target_name': 'v8_builtins_setup',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'v8_builtins_generators',
+ ],
+ 'variables': {
+ 'optimize': 'max',
+ },
+ 'include_dirs+': [
+ '..',
+ '../include',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'setup-isolate-full.cc',
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'v8_builtins_generators',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'v8_base',
+ ],
+ 'variables': {
+ 'optimize': 'max',
+ },
+ 'include_dirs+': [
+ '..',
+ '../include',
+ ],
+ 'sources': [ ### gcmole(all) ###
+ 'builtins/builtins-arguments-gen.cc',
+ 'builtins/builtins-arguments-gen.h',
+ 'builtins/builtins-array-gen.cc',
+ 'builtins/builtins-async-function-gen.cc',
+ 'builtins/builtins-async-gen.cc',
+ 'builtins/builtins-async-gen.h',
+ 'builtins/builtins-async-generator-gen.cc',
+ 'builtins/builtins-async-iterator-gen.cc',
+ 'builtins/builtins-boolean-gen.cc',
+ 'builtins/builtins-call-gen.cc',
+ 'builtins/builtins-console-gen.cc',
+ 'builtins/builtins-constructor-gen.cc',
+ 'builtins/builtins-constructor-gen.h',
+ 'builtins/builtins-constructor.h',
+ 'builtins/builtins-conversion-gen.cc',
+ 'builtins/builtins-date-gen.cc',
+ 'builtins/builtins-forin-gen.cc',
+ 'builtins/builtins-forin-gen.h',
+ 'builtins/builtins-function-gen.cc',
+ 'builtins/builtins-generator-gen.cc',
+ 'builtins/builtins-global-gen.cc',
+ 'builtins/builtins-handler-gen.cc',
+ 'builtins/builtins-ic-gen.cc',
+ 'builtins/builtins-internal-gen.cc',
+ 'builtins/builtins-interpreter-gen.cc',
+ 'builtins/builtins-intl-gen.cc',
+ 'builtins/builtins-math-gen.cc',
+ 'builtins/builtins-number-gen.cc',
+ 'builtins/builtins-object-gen.cc',
+ 'builtins/builtins-promise-gen.cc',
+ 'builtins/builtins-promise-gen.h',
+ 'builtins/builtins-regexp-gen.cc',
+ 'builtins/builtins-regexp-gen.h',
+ 'builtins/builtins-sharedarraybuffer-gen.cc',
+ 'builtins/builtins-string-gen.cc',
+ 'builtins/builtins-string-gen.h',
+ 'builtins/builtins-symbol-gen.cc',
+ 'builtins/builtins-typedarray-gen.cc',
+ 'builtins/builtins-utils-gen.h',
+ 'builtins/builtins-wasm-gen.cc',
+ 'builtins/setup-builtins-internal.cc',
+ 'ic/accessor-assembler.cc',
+ 'ic/accessor-assembler.h',
+ 'ic/binary-op-assembler.cc',
+ 'ic/binary-op-assembler.h',
+ 'ic/keyed-store-generic.cc',
+ 'ic/keyed-store-generic.h',
+ 'interpreter/interpreter-assembler.cc',
+ 'interpreter/interpreter-assembler.h',
+ 'interpreter/interpreter-generator.cc',
+ 'interpreter/interpreter-generator.h',
+ 'interpreter/interpreter-intrinsics-generator.cc',
+ 'interpreter/interpreter-intrinsics-generator.h',
+ 'interpreter/setup-interpreter-internal.cc',
+ 'interpreter/setup-interpreter.h',
+ ],
+ 'conditions': [
+ ['want_separate_host_toolset==1', {
+ 'toolsets': ['host', 'target'],
+ }, {
+ 'toolsets': ['target'],
+ }],
+ ['v8_target_arch=="ia32"', {
+ 'sources': [ ### gcmole(arch:ia32) ###
+ 'builtins/ia32/builtins-ia32.cc',
+ ],
+ }],
+ ['v8_target_arch=="x64"', {
+ 'sources': [ ### gcmole(arch:x64) ###
+ 'builtins/x64/builtins-x64.cc',
+ ],
+ }],
+ ['v8_target_arch=="arm"', {
+ 'sources': [ ### gcmole(arch:arm) ###
+ 'builtins/arm/builtins-arm.cc',
+ ],
+ }],
+ ['v8_target_arch=="arm64"', {
+ 'sources': [ ### gcmole(arch:arm64) ###
+ 'builtins/arm64/builtins-arm64.cc',
+ ],
+ }],
+ ['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
+ 'sources': [ ### gcmole(arch:mipsel) ###
+ 'builtins/mips/builtins-mips.cc',
+ ],
+ }],
+ ['v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
+ 'sources': [ ### gcmole(arch:mips64el) ###
+ 'builtins/mips64/builtins-mips64.cc',
+ ],
+ }],
+ ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
+ 'sources': [ ### gcmole(arch:ppc) ###
+ 'builtins/ppc/builtins-ppc.cc',
+ ],
+ }],
+ ['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
+ 'sources': [ ### gcmole(arch:s390) ###
+ 'builtins/s390/builtins-s390.cc',
+ ],
+ }],
+ ['v8_target_arch=="x87"', {
+ 'sources': [ ### gcmole(arch:x87) ###
+ 'builtins/x87/builtins-x87.cc',
+ ],
+ }],
+ ['v8_enable_i18n_support==0', {
+ 'sources!': [
+ 'builtins/builtins-intl-gen.cc',
+ ],
+ }],
+ ],
+ },
+ {
'target_name': 'v8_snapshot',
'type': 'static_library',
'conditions': [
@@ -172,6 +324,7 @@
'<(SHARED_INTERMEDIATE_DIR)/extras-libraries.cc',
'<(SHARED_INTERMEDIATE_DIR)/experimental-extras-libraries.cc',
'<(INTERMEDIATE_DIR)/snapshot.cc',
+ 'setup-isolate-deserialize.cc',
],
'actions': [
{
@@ -285,6 +438,7 @@
'<(DEPTH)',
],
'sources': [
+ 'setup-isolate-deserialize.cc',
'snapshot/natives-external.cc',
'snapshot/snapshot-external.cc',
],
@@ -369,9 +523,9 @@
'dependencies': [
'v8_libbase',
'v8_libsampler',
- 'inspector/inspector.gyp:protocol_generated_sources',
- 'inspector/inspector.gyp:inspector_injected_script',
- 'inspector/inspector.gyp:inspector_debugger_script',
+ 'inspector/inspector.gyp:protocol_generated_sources#target',
+ 'inspector/inspector.gyp:inspector_injected_script#target',
+ 'inspector/inspector.gyp:inspector_debugger_script#target',
],
'objs': ['foo.o'],
'variables': {
@@ -389,6 +543,7 @@
'../include/v8-profiler.h',
'../include/v8-testing.h',
'../include/v8-util.h',
+ '../include/v8-value-serializer-version.h',
'../include/v8-version-string.h',
'../include/v8-version.h',
'../include/v8.h',
@@ -417,12 +572,8 @@
'asmjs/asm-parser.h',
'asmjs/asm-scanner.cc',
'asmjs/asm-scanner.h',
- 'asmjs/asm-typer.cc',
- 'asmjs/asm-typer.h',
'asmjs/asm-types.cc',
'asmjs/asm-types.h',
- 'asmjs/asm-wasm-builder.cc',
- 'asmjs/asm-wasm-builder.h',
'asmjs/switch-logic.h',
'asmjs/switch-logic.cc',
'assembler.cc',
@@ -471,74 +622,38 @@
'bootstrapper.cc',
'bootstrapper.h',
'builtins/builtins-api.cc',
- 'builtins/builtins-arguments-gen.cc',
- 'builtins/builtins-arguments-gen.h',
'builtins/builtins-arraybuffer.cc',
'builtins/builtins-array.cc',
- 'builtins/builtins-array-gen.cc',
- 'builtins/builtins-async-function-gen.cc',
- 'builtins/builtins-async-generator-gen.cc',
- 'builtins/builtins-async-iterator-gen.cc',
- 'builtins/builtins-async-gen.cc',
- 'builtins/builtins-async-gen.h',
'builtins/builtins-boolean.cc',
- 'builtins/builtins-boolean-gen.cc',
'builtins/builtins-call.cc',
- 'builtins/builtins-call-gen.cc',
'builtins/builtins-callsite.cc',
- 'builtins/builtins-constructor-gen.cc',
- 'builtins/builtins-constructor-gen.h',
+ 'builtins/builtins-console.cc',
'builtins/builtins-constructor.h',
- 'builtins/builtins-conversion-gen.cc',
'builtins/builtins-dataview.cc',
'builtins/builtins-date.cc',
- 'builtins/builtins-date-gen.cc',
'builtins/builtins-debug.cc',
'builtins/builtins-definitions.h',
'builtins/builtins-descriptors.h',
'builtins/builtins-error.cc',
- 'builtins/builtins-forin-gen.cc',
- 'builtins/builtins-forin-gen.h',
'builtins/builtins-function.cc',
- 'builtins/builtins-function-gen.cc',
- 'builtins/builtins-generator-gen.cc',
'builtins/builtins-global.cc',
- 'builtins/builtins-global-gen.cc',
- 'builtins/builtins-handler-gen.cc',
- 'builtins/builtins-ic-gen.cc',
'builtins/builtins-internal.cc',
- 'builtins/builtins-internal-gen.cc',
'builtins/builtins-interpreter.cc',
- 'builtins/builtins-interpreter-gen.cc',
'builtins/builtins-json.cc',
'builtins/builtins-math.cc',
- 'builtins/builtins-math-gen.cc',
'builtins/builtins-number.cc',
- 'builtins/builtins-number-gen.cc',
'builtins/builtins-object.cc',
- 'builtins/builtins-object-gen.cc',
- 'builtins/builtins-promise-gen.cc',
- 'builtins/builtins-promise-gen.h',
'builtins/builtins-proxy.cc',
'builtins/builtins-reflect.cc',
'builtins/builtins-regexp.cc',
- 'builtins/builtins-regexp-gen.cc',
- 'builtins/builtins-regexp-gen.h',
'builtins/builtins-sharedarraybuffer.cc',
- 'builtins/builtins-sharedarraybuffer-gen.cc',
'builtins/builtins-string.cc',
- 'builtins/builtins-string-gen.cc',
'builtins/builtins-intl.cc',
'builtins/builtins-symbol.cc',
- 'builtins/builtins-symbol-gen.cc',
'builtins/builtins-typedarray.cc',
- 'builtins/builtins-typedarray-gen.cc',
'builtins/builtins-utils.h',
- 'builtins/builtins-utils-gen.h',
- 'builtins/builtins-wasm-gen.cc',
'builtins/builtins.cc',
'builtins/builtins.h',
- 'builtins/setup-builtins-internal.cc',
'cached-powers.cc',
'cached-powers.h',
'callable.h',
@@ -948,6 +1063,7 @@
'heap/array-buffer-tracker.h',
'heap/code-stats.cc',
'heap/code-stats.h',
+ 'heap/concurrent-marking-deque.h',
'heap/concurrent-marking.cc',
'heap/concurrent-marking.h',
'heap/embedder-tracing.cc',
@@ -966,6 +1082,7 @@
'heap/incremental-marking-job.h',
'heap/incremental-marking.cc',
'heap/incremental-marking.h',
+ 'heap/item-parallel-job.h',
'heap/mark-compact-inl.h',
'heap/mark-compact.cc',
'heap/mark-compact.h',
@@ -982,23 +1099,22 @@
'heap/scavenger-inl.h',
'heap/scavenger.cc',
'heap/scavenger.h',
+ 'heap/sequential-marking-deque.cc',
+ 'heap/sequential-marking-deque.h',
'heap/slot-set.h',
'heap/spaces-inl.h',
'heap/spaces.cc',
'heap/spaces.h',
'heap/store-buffer.cc',
'heap/store-buffer.h',
- 'i18n.cc',
- 'i18n.h',
+ 'heap/workstealing-marking-deque.h',
+ 'intl.cc',
+ 'intl.h',
'icu_util.cc',
'icu_util.h',
'ic/access-compiler-data.h',
'ic/access-compiler.cc',
'ic/access-compiler.h',
- 'ic/accessor-assembler.cc',
- 'ic/accessor-assembler.h',
- 'ic/binary-op-assembler.cc',
- 'ic/binary-op-assembler.h',
'ic/call-optimization.cc',
'ic/call-optimization.h',
'ic/handler-compiler.cc',
@@ -1012,8 +1128,6 @@
'ic/ic-stats.h',
'ic/ic.cc',
'ic/ic.h',
- 'ic/keyed-store-generic.cc',
- 'ic/keyed-store-generic.h',
'identity-map.cc',
'identity-map.h',
'interface-descriptors.cc',
@@ -1038,15 +1152,18 @@
'interpreter/bytecode-generator.h',
'interpreter/bytecode-label.cc',
'interpreter/bytecode-label.h',
+ 'interpreter/bytecode-node.cc',
+ 'interpreter/bytecode-node.h',
'interpreter/bytecode-operands.cc',
'interpreter/bytecode-operands.h',
- 'interpreter/bytecode-pipeline.cc',
- 'interpreter/bytecode-pipeline.h',
'interpreter/bytecode-register.cc',
'interpreter/bytecode-register.h',
'interpreter/bytecode-register-allocator.h',
'interpreter/bytecode-register-optimizer.cc',
'interpreter/bytecode-register-optimizer.h',
+ 'interpreter/bytecode-source-info.cc',
+ 'interpreter/bytecode-source-info.h',
+ 'interpreter/bytecode-jump-table.h',
'interpreter/bytecode-traits.h',
'interpreter/constant-array-builder.cc',
'interpreter/constant-array-builder.h',
@@ -1056,16 +1173,9 @@
'interpreter/handler-table-builder.h',
'interpreter/interpreter.cc',
'interpreter/interpreter.h',
- 'interpreter/interpreter-assembler.cc',
- 'interpreter/interpreter-assembler.h',
- 'interpreter/interpreter-generator.cc',
'interpreter/interpreter-generator.h',
'interpreter/interpreter-intrinsics.cc',
'interpreter/interpreter-intrinsics.h',
- 'interpreter/interpreter-intrinsics-generator.cc',
- 'interpreter/interpreter-intrinsics-generator.h',
- 'interpreter/setup-interpreter.h',
- 'interpreter/setup-interpreter-internal.cc',
'isolate-inl.h',
'isolate.cc',
'isolate.h',
@@ -1118,10 +1228,13 @@
'objects/dictionary.h',
'objects/frame-array.h',
'objects/frame-array-inl.h',
- 'objects/hash-table-inl.h',
'objects/hash-table.h',
+ 'objects/intl-objects.cc',
+ 'objects/intl-objects.h',
'objects/literal-objects.cc',
'objects/literal-objects.h',
+ 'objects/map-inl.h',
+ 'objects/map.h',
'objects/module-info.h',
'objects/object-macros.h',
'objects/object-macros-undef.h',
@@ -1234,7 +1347,7 @@
'runtime/runtime-error.cc',
'runtime/runtime-futex.cc',
'runtime/runtime-generator.cc',
- 'runtime/runtime-i18n.cc',
+ 'runtime/runtime-intl.cc',
'runtime/runtime-internal.cc',
'runtime/runtime-interpreter.cc',
'runtime/runtime-literals.cc',
@@ -1259,7 +1372,6 @@
'safepoint-table.cc',
'safepoint-table.h',
'setup-isolate.h',
- 'setup-isolate-full.cc',
'signature.h',
'simulator.h',
'small-pointer-list.h',
@@ -1293,6 +1405,8 @@
'string-builder.h',
'string-case.cc',
'string-case.h',
+ 'string-hasher-inl.h',
+ 'string-hasher.h',
'string-search.h',
'string-stream.cc',
'string-stream.h',
@@ -1339,6 +1453,8 @@
'vector.h',
'version.cc',
'version.h',
+ 'visitors.cc',
+ 'visitors.h',
'vm-state-inl.h',
'vm-state.h',
'wasm/decoder.h',
@@ -1346,10 +1462,14 @@
'wasm/function-body-decoder.h',
'wasm/function-body-decoder-impl.h',
'wasm/leb-helper.h',
+ 'wasm/local-decl-encoder.cc',
+ 'wasm/local-decl-encoder.h',
'wasm/module-decoder.cc',
'wasm/module-decoder.h',
'wasm/signature-map.cc',
'wasm/signature-map.h',
+ 'wasm/streaming-decoder.cc',
+ 'wasm/streaming-decoder.h',
'wasm/wasm-code-specialization.h',
'wasm/wasm-code-specialization.cc',
'wasm/wasm-debug.cc',
@@ -1358,7 +1478,6 @@
'wasm/wasm-js.cc',
'wasm/wasm-js.h',
'wasm/wasm-limits.h',
- 'wasm/wasm-macro-gen.h',
'wasm/wasm-module.cc',
'wasm/wasm-module.h',
'wasm/wasm-module-builder.cc',
@@ -1415,7 +1534,6 @@
'arm/simulator-arm.cc',
'arm/simulator-arm.h',
'arm/eh-frame-arm.cc',
- 'builtins/arm/builtins-arm.cc',
'compiler/arm/code-generator-arm.cc',
'compiler/arm/instruction-codes-arm.h',
'compiler/arm/instruction-scheduler-arm.cc',
@@ -1470,7 +1588,6 @@
'arm64/utils-arm64.cc',
'arm64/utils-arm64.h',
'arm64/eh-frame-arm64.cc',
- 'builtins/arm64/builtins-arm64.cc',
'compiler/arm64/code-generator-arm64.cc',
'compiler/arm64/instruction-codes-arm64.h',
'compiler/arm64/instruction-scheduler-arm64.cc',
@@ -1515,7 +1632,6 @@
'ia32/simulator-ia32.cc',
'ia32/simulator-ia32.h',
'ia32/sse-instr.h',
- 'builtins/ia32/builtins-ia32.cc',
'compiler/ia32/code-generator-ia32.cc',
'compiler/ia32/instruction-codes-ia32.h',
'compiler/ia32/instruction-scheduler-ia32.cc',
@@ -1554,7 +1670,6 @@
'x87/macro-assembler-x87.h',
'x87/simulator-x87.cc',
'x87/simulator-x87.h',
- 'builtins/x87/builtins-x87.cc',
'compiler/x87/code-generator-x87.cc',
'compiler/x87/instruction-codes-x87.h',
'compiler/x87/instruction-scheduler-x87.cc',
@@ -1595,7 +1710,6 @@
'mips/macro-assembler-mips.h',
'mips/simulator-mips.cc',
'mips/simulator-mips.h',
- 'builtins/mips/builtins-mips.cc',
'compiler/mips/code-generator-mips.cc',
'compiler/mips/instruction-codes-mips.h',
'compiler/mips/instruction-scheduler-mips.cc',
@@ -1636,7 +1750,6 @@
'mips64/macro-assembler-mips64.h',
'mips64/simulator-mips64.cc',
'mips64/simulator-mips64.h',
- 'builtins/mips64/builtins-mips64.cc',
'compiler/mips64/code-generator-mips64.cc',
'compiler/mips64/instruction-codes-mips64.h',
'compiler/mips64/instruction-scheduler-mips64.cc',
@@ -1658,7 +1771,6 @@
}],
['v8_target_arch=="x64"', {
'sources': [ ### gcmole(arch:x64) ###
- 'builtins/x64/builtins-x64.cc',
'compiler/x64/code-generator-x64.cc',
'compiler/x64/instruction-codes-x64.h',
'compiler/x64/instruction-scheduler-x64.cc',
@@ -1705,7 +1817,6 @@
}],
['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
'sources': [ ### gcmole(arch:ppc) ###
- 'builtins/ppc/builtins-ppc.cc',
'compiler/ppc/code-generator-ppc.cc',
'compiler/ppc/instruction-codes-ppc.h',
'compiler/ppc/instruction-scheduler-ppc.cc',
@@ -1746,7 +1857,6 @@
}],
['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
'sources': [ ### gcmole(arch:s390) ###
- 'builtins/s390/builtins-s390.cc',
'compiler/s390/code-generator-s390.cc',
'compiler/s390/instruction-codes-s390.h',
'compiler/s390/instruction-scheduler-s390.cc',
@@ -1794,6 +1904,13 @@
# limit. This breaks it into multiple pieces to avoid the limit.
# See http://crbug.com/485155.
'msvs_shard': 4,
+ # This will prevent V8's .cc files conflicting with the inspector's
+ # .cpp files in the same shard.
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'ObjectFile':'$(IntDir)%(Extension)\\',
+ },
+ },
}],
['component=="shared_library"', {
'defines': [
@@ -1825,8 +1942,12 @@
],
}, { # v8_enable_i18n_support==0
'sources!': [
- 'i18n.cc',
- 'i18n.h',
+ 'builtins/builtins-intl.cc',
+ 'intl.cc',
+ 'intl.h',
+ 'objects/intl-objects.cc',
+ 'objects/intl-objects.h',
+ 'runtime/runtime-intl.cc',
],
}],
['OS=="win" and v8_enable_i18n_support==1', {
@@ -1863,6 +1984,7 @@
'base/division-by-constant.h',
'base/debug/stack_trace.cc',
'base/debug/stack_trace.h',
+ 'base/export-template.h',
'base/file-utils.cc',
'base/file-utils.h',
'base/flags.h',
@@ -2296,7 +2418,7 @@
'js/macros.py',
'messages.h',
'js/prologue.js',
- 'js/runtime.js',
+ 'js/max-min.js',
'js/v8natives.js',
'js/array.js',
'js/string.js',
@@ -2309,7 +2431,6 @@
'js/templates.js',
'js/spread.js',
'js/proxy.js',
- 'js/harmony-string-padding.js',
'debug/mirrors.js',
'debug/debug.js',
'debug/liveedit.js',
@@ -2319,7 +2440,7 @@
'libraries_experimental_extras_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental-extras.bin',
'conditions': [
['v8_enable_i18n_support==1', {
- 'library_files': ['js/i18n.js'],
+ 'library_files': ['js/intl.js'],
}],
],
},
@@ -2431,6 +2552,8 @@
'heapobject_files': [
'objects.h',
'objects-inl.h',
+ 'objects/map.h',
+ 'objects/map-inl.h',
],
},
'actions': [
@@ -2457,9 +2580,10 @@
'type': 'executable',
'dependencies': [
'v8_base',
+ 'v8_builtins_setup',
'v8_libbase',
+ 'v8_libplatform',
'v8_nosnapshot',
- 'v8_libplatform'
],
'include_dirs+': [
'..',
diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/v8threads.cc
index 34d26ece7f..c0470c5b3c 100644
--- a/deps/v8/src/v8threads.cc
+++ b/deps/v8/src/v8threads.cc
@@ -10,6 +10,7 @@
#include "src/execution.h"
#include "src/isolate-inl.h"
#include "src/regexp/regexp-stack.h"
+#include "src/visitors.h"
namespace v8 {
@@ -288,7 +289,7 @@ void ThreadManager::EagerlyArchiveThread() {
state->LinkInto(ThreadState::IN_USE_LIST);
char* to = state->data();
// Ensure that data containing GC roots are archived first, and handle them
- // in ThreadManager::Iterate(ObjectVisitor*).
+ // in ThreadManager::Iterate(RootVisitor*).
to = isolate_->handle_scope_implementer()->ArchiveThread(to);
to = isolate_->ArchiveThread(to);
to = Relocatable::ArchiveState(isolate_, to);
@@ -320,8 +321,7 @@ bool ThreadManager::IsArchived() {
return data != NULL && data->thread_state() != NULL;
}
-
-void ThreadManager::Iterate(ObjectVisitor* v) {
+void ThreadManager::Iterate(RootVisitor* v) {
// Expecting no threads during serialization/deserialization
for (ThreadState* state = FirstThreadStateInUse();
state != NULL;
diff --git a/deps/v8/src/v8threads.h b/deps/v8/src/v8threads.h
index db0ed070fa..8fc6f0c62f 100644
--- a/deps/v8/src/v8threads.h
+++ b/deps/v8/src/v8threads.h
@@ -10,6 +10,8 @@
namespace v8 {
namespace internal {
+class RootVisitor;
+class ThreadLocalTop;
class ThreadState {
public:
@@ -51,11 +53,6 @@ class ThreadState {
friend class ThreadManager;
};
-
-// Defined in isolate.h.
-class ThreadLocalTop;
-
-
class ThreadVisitor {
public:
// ThreadLocalTop may be only available during this call.
@@ -65,7 +62,6 @@ class ThreadVisitor {
virtual ~ThreadVisitor() {}
};
-
class ThreadManager {
public:
void Lock();
@@ -76,7 +72,7 @@ class ThreadManager {
void FreeThreadResources();
bool IsArchived();
- void Iterate(ObjectVisitor* v);
+ void Iterate(RootVisitor* v);
void IterateArchivedThreads(ThreadVisitor* v);
bool IsLockedByCurrentThread() {
return mutex_owner_.Equals(ThreadId::Current());
diff --git a/deps/v8/src/value-serializer.cc b/deps/v8/src/value-serializer.cc
index 44cb8dc188..2ba06c170b 100644
--- a/deps/v8/src/value-serializer.cc
+++ b/deps/v8/src/value-serializer.cc
@@ -6,6 +6,7 @@
#include <type_traits>
+#include "include/v8-value-serializer-version.h"
#include "src/base/logging.h"
#include "src/conversions.h"
#include "src/factory.h"
@@ -29,7 +30,17 @@ namespace internal {
// Version 12: regexp and string objects share normal string encoding
// Version 13: host objects have an explicit tag (rather than handling all
// unknown tags)
+//
+// WARNING: Increasing this value is a change which cannot safely be rolled
+// back without breaking compatibility with data stored on disk. It is
+// strongly recommended that you do not make such changes near a release
+// milestone branch point.
+//
+// Recent changes are routinely reverted in preparation for branch, and this
+// has been the cause of at least one bug in the past.
static const uint32_t kLatestVersion = 13;
+static_assert(kLatestVersion == v8::CurrentValueSerializerFormatVersion(),
+ "Exported format version must match latest version.");
static const int kPretenureThreshold = 100 * KB;
@@ -154,11 +165,6 @@ enum class WasmEncodingTag : uint8_t {
} // namespace
-// static
-uint32_t ValueSerializer::GetCurrentDataFormatVersion() {
- return kLatestVersion;
-}
-
ValueSerializer::ValueSerializer(Isolate* isolate,
v8::ValueSerializer::Delegate* delegate)
: isolate_(isolate),
@@ -1457,11 +1463,22 @@ MaybeHandle<JSRegExp> ValueDeserializer::ReadJSRegExp() {
uint32_t raw_flags;
Handle<JSRegExp> regexp;
if (!ReadString().ToHandle(&pattern) ||
- !ReadVarint<uint32_t>().To(&raw_flags) ||
+ !ReadVarint<uint32_t>().To(&raw_flags)) {
+ return MaybeHandle<JSRegExp>();
+ }
+
+ // Ensure the deserialized flags are valid. The context behind this is that
+ // the JSRegExp::Flags enum statically includes kDotAll, but it is only valid
+ // to set kDotAll if FLAG_harmony_regexp_dotall is enabled. Fuzzers don't
+ // know about this and happily set kDotAll anyways, leading to CHECK failures
+ // later on.
+ uint32_t flags_mask = static_cast<uint32_t>(-1) << JSRegExp::FlagCount();
+ if ((raw_flags & flags_mask) ||
!JSRegExp::New(pattern, static_cast<JSRegExp::Flags>(raw_flags))
.ToHandle(&regexp)) {
return MaybeHandle<JSRegExp>();
}
+
AddObjectWithID(id, regexp);
return regexp;
}
diff --git a/deps/v8/src/value-serializer.h b/deps/v8/src/value-serializer.h
index 47a0722835..ef424698d0 100644
--- a/deps/v8/src/value-serializer.h
+++ b/deps/v8/src/value-serializer.h
@@ -43,8 +43,6 @@ enum class SerializationTag : uint8_t;
*/
class ValueSerializer {
public:
- static uint32_t GetCurrentDataFormatVersion();
-
ValueSerializer(Isolate* isolate, v8::ValueSerializer::Delegate* delegate);
~ValueSerializer();
diff --git a/deps/v8/src/vector.h b/deps/v8/src/vector.h
index 03e5d6cb1e..7ae4f0eb04 100644
--- a/deps/v8/src/vector.h
+++ b/deps/v8/src/vector.h
@@ -20,8 +20,9 @@ template <typename T>
class Vector {
public:
Vector() : start_(NULL), length_(0) {}
- Vector(T* data, int length) : start_(data), length_(length) {
- DCHECK(length == 0 || (length > 0 && data != NULL));
+
+ Vector(T* data, size_t length) : start_(data), length_(length) {
+ DCHECK(length == 0 || data != NULL);
}
template <int N>
@@ -33,15 +34,20 @@ class Vector {
// Returns a vector using the same backing storage as this one,
// spanning from and including 'from', to but not including 'to'.
- Vector<T> SubVector(int from, int to) const {
- DCHECK(0 <= from);
- SLOW_DCHECK(from <= to);
- SLOW_DCHECK(static_cast<unsigned>(to) <= static_cast<unsigned>(length_));
+ Vector<T> SubVector(size_t from, size_t to) const {
+ DCHECK_LE(from, to);
+ DCHECK_LE(to, length_);
return Vector<T>(start() + from, to - from);
}
// Returns the length of the vector.
- int length() const { return length_; }
+ int length() const {
+ DCHECK(length_ <= static_cast<size_t>(std::numeric_limits<int>::max()));
+ return static_cast<int>(length_);
+ }
+
+ // Returns the length of the vector as a size_t.
+ size_t size() const { return length_; }
// Returns whether or not the vector is empty.
bool is_empty() const { return length_ == 0; }
@@ -50,13 +56,12 @@ class Vector {
T* start() const { return start_; }
// Access individual vector elements - checks bounds in debug mode.
- T& operator[](int index) const {
- DCHECK_LE(0, index);
+ T& operator[](size_t index) const {
DCHECK_LT(index, length_);
return start_[index];
}
- const T& at(int index) const { return operator[](index); }
+ const T& at(size_t index) const { return operator[](index); }
T& first() { return start_[0]; }
@@ -69,7 +74,7 @@ class Vector {
// Returns a clone of this vector with a new backing store.
Vector<T> Clone() const {
T* result = NewArray<T>(length_);
- for (int i = 0; i < length_; i++) result[i] = start_[i];
+ for (size_t i = 0; i < length_; i++) result[i] = start_[i];
return Vector<T>(result, length_);
}
@@ -101,7 +106,7 @@ class Vector {
void StableSort() { std::stable_sort(start(), start() + length()); }
- void Truncate(int length) {
+ void Truncate(size_t length) {
DCHECK(length <= length_);
length_ = length;
}
@@ -114,8 +119,8 @@ class Vector {
length_ = 0;
}
- inline Vector<T> operator+(int offset) {
- DCHECK(offset < length_);
+ inline Vector<T> operator+(size_t offset) {
+ DCHECK_LT(offset, length_);
return Vector<T>(start_ + offset, length_ - offset);
}
@@ -134,7 +139,7 @@ class Vector {
bool operator==(const Vector<T>& other) const {
if (length_ != other.length_) return false;
if (start_ == other.start_) return true;
- for (int i = 0; i < length_; ++i) {
+ for (size_t i = 0; i < length_; ++i) {
if (start_[i] != other.start_[i]) {
return false;
}
@@ -147,7 +152,7 @@ class Vector {
private:
T* start_;
- int length_;
+ size_t length_;
template <typename CookedComparer>
class RawComparer {
diff --git a/deps/v8/src/visitors.cc b/deps/v8/src/visitors.cc
new file mode 100644
index 0000000000..4f93c63f0d
--- /dev/null
+++ b/deps/v8/src/visitors.cc
@@ -0,0 +1,22 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/visitors.h"
+
+namespace v8 {
+namespace internal {
+
+#define DECLARE_TAG(ignore1, name, ignore2) name,
+const char* const
+ VisitorSynchronization::kTags[VisitorSynchronization::kNumberOfSyncTags] = {
+ ROOT_ID_LIST(DECLARE_TAG)};
+#undef DECLARE_TAG
+
+#define DECLARE_TAG(ignore1, ignore2, name) name,
+const char* const VisitorSynchronization::kTagNames
+ [VisitorSynchronization::kNumberOfSyncTags] = {ROOT_ID_LIST(DECLARE_TAG)};
+#undef DECLARE_TAG
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/visitors.h b/deps/v8/src/visitors.h
new file mode 100644
index 0000000000..0822d91690
--- /dev/null
+++ b/deps/v8/src/visitors.h
@@ -0,0 +1,79 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_VISITORS_H_
+#define V8_VISITORS_H_
+
+#include "src/allocation.h"
+
+namespace v8 {
+namespace internal {
+
+class Object;
+
+#define ROOT_ID_LIST(V) \
+ V(kStringTable, "string_table", "(Internalized strings)") \
+ V(kExternalStringsTable, "external_strings_table", "(External strings)") \
+ V(kStrongRootList, "strong_root_list", "(Strong roots)") \
+ V(kSmiRootList, "smi_root_list", "(Smi roots)") \
+ V(kBootstrapper, "bootstrapper", "(Bootstrapper)") \
+ V(kTop, "top", "(Isolate)") \
+ V(kRelocatable, "relocatable", "(Relocatable)") \
+ V(kDebug, "debug", "(Debugger)") \
+ V(kCompilationCache, "compilationcache", "(Compilation cache)") \
+ V(kHandleScope, "handlescope", "(Handle scope)") \
+ V(kDispatchTable, "dispatchtable", "(Dispatch table)") \
+ V(kBuiltins, "builtins", "(Builtins)") \
+ V(kGlobalHandles, "globalhandles", "(Global handles)") \
+ V(kEternalHandles, "eternalhandles", "(Eternal handles)") \
+ V(kThreadManager, "threadmanager", "(Thread manager)") \
+ V(kStrongRoots, "strong roots", "(Strong roots)") \
+ V(kExtensions, "Extensions", "(Extensions)")
+
+class VisitorSynchronization : public AllStatic {
+ public:
+#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
+ enum SyncTag { ROOT_ID_LIST(DECLARE_ENUM) kNumberOfSyncTags };
+#undef DECLARE_ENUM
+
+ static const char* const kTags[kNumberOfSyncTags];
+ static const char* const kTagNames[kNumberOfSyncTags];
+};
+
+enum class Root {
+#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
+ ROOT_ID_LIST(DECLARE_ENUM)
+#undef DECLARE_ENUM
+ // TODO(ulan): Merge with the ROOT_ID_LIST.
+ kCodeFlusher,
+ kPartialSnapshotCache,
+ kWeakCollections
+};
+
+// Abstract base class for visiting, and optionally modifying, the
+// pointers contained in roots. Used in GC and serialization/deserialization.
+class RootVisitor BASE_EMBEDDED {
+ public:
+ virtual ~RootVisitor() {}
+
+ // Visits a contiguous arrays of pointers in the half-open range
+ // [start, end). Any or all of the values may be modified on return.
+ virtual void VisitRootPointers(Root root, Object** start, Object** end) = 0;
+
+ // Handy shorthand for visiting a single pointer.
+ virtual void VisitRootPointer(Root root, Object** p) {
+ VisitRootPointers(root, p, p + 1);
+ }
+
+ // Intended for serialization/deserialization checking: insert, or
+ // check for the presence of, a tag at this position in the stream.
+ // Also used for marking up GC roots in heap snapshots.
+ // TODO(ulan): Remove this.
+ virtual void Synchronize(VisitorSynchronization::SyncTag tag) {}
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_VISITORS_H_
diff --git a/deps/v8/src/wasm/OWNERS b/deps/v8/src/wasm/OWNERS
index 4f54661aeb..c698fc4776 100644
--- a/deps/v8/src/wasm/OWNERS
+++ b/deps/v8/src/wasm/OWNERS
@@ -1,8 +1,10 @@
set noparent
ahaas@chromium.org
+bbudge@chromium.org
bradnelson@chromium.org
clemensh@chromium.org
+gdeepti@chromium.org
mtrofin@chromium.org
rossberg@chromium.org
titzer@chromium.org
diff --git a/deps/v8/src/wasm/decoder.h b/deps/v8/src/wasm/decoder.h
index d9d25175ef..5f242ac1aa 100644
--- a/deps/v8/src/wasm/decoder.h
+++ b/deps/v8/src/wasm/decoder.h
@@ -37,14 +37,15 @@ namespace wasm {
// a buffer of bytes.
class Decoder {
public:
- Decoder(const byte* start, const byte* end)
- : start_(start), pc_(start), end_(end), error_pc_(nullptr) {}
- Decoder(const byte* start, const byte* pc, const byte* end)
- : start_(start), pc_(pc), end_(end), error_pc_(nullptr) {}
+ Decoder(const byte* start, const byte* end, uint32_t buffer_offset = 0)
+ : start_(start), pc_(start), end_(end), buffer_offset_(buffer_offset) {}
+ Decoder(const byte* start, const byte* pc, const byte* end,
+ uint32_t buffer_offset = 0)
+ : start_(start), pc_(pc), end_(end), buffer_offset_(buffer_offset) {}
virtual ~Decoder() {}
- inline bool check(const byte* pc, unsigned length, const char* msg) {
+ inline bool check(const byte* pc, uint32_t length, const char* msg) {
DCHECK_LE(start_, pc);
if (V8_UNLIKELY(pc + length > end_)) {
error(pc, msg);
@@ -82,28 +83,28 @@ class Decoder {
// Reads a variable-length unsigned integer (little endian).
template <bool checked>
- uint32_t read_u32v(const byte* pc, unsigned* length,
+ uint32_t read_u32v(const byte* pc, uint32_t* length,
const char* name = "LEB32") {
return read_leb<uint32_t, checked, false, false>(pc, length, name);
}
// Reads a variable-length signed integer (little endian).
template <bool checked>
- int32_t read_i32v(const byte* pc, unsigned* length,
+ int32_t read_i32v(const byte* pc, uint32_t* length,
const char* name = "signed LEB32") {
return read_leb<int32_t, checked, false, false>(pc, length, name);
}
// Reads a variable-length unsigned integer (little endian).
template <bool checked>
- uint64_t read_u64v(const byte* pc, unsigned* length,
+ uint64_t read_u64v(const byte* pc, uint32_t* length,
const char* name = "LEB64") {
return read_leb<uint64_t, checked, false, false>(pc, length, name);
}
// Reads a variable-length signed integer (little endian).
template <bool checked>
- int64_t read_i64v(const byte* pc, unsigned* length,
+ int64_t read_i64v(const byte* pc, uint32_t* length,
const char* name = "signed LEB64") {
return read_leb<int64_t, checked, false, false>(pc, length, name);
}
@@ -125,13 +126,13 @@ class Decoder {
// Reads a LEB128 variable-length unsigned 32-bit integer and advances {pc_}.
uint32_t consume_u32v(const char* name = nullptr) {
- unsigned length = 0;
+ uint32_t length = 0;
return read_leb<uint32_t, true, true, true>(pc_, &length, name);
}
// Reads a LEB128 variable-length signed 32-bit integer and advances {pc_}.
int32_t consume_i32v(const char* name = nullptr) {
- unsigned length = 0;
+ uint32_t length = 0;
return read_leb<int32_t, true, true, true>(pc_, &length, name);
}
@@ -182,7 +183,8 @@ class Decoder {
CHECK_LT(0, len);
va_end(arguments);
error_msg_.assign(buffer.start(), len);
- error_pc_ = pc;
+ DCHECK_GE(pc, start_);
+ error_offset_ = static_cast<uint32_t>(pc - start_) + buffer_offset_;
onFirstError();
}
@@ -206,40 +208,51 @@ class Decoder {
Result<U> toResult(T&& val) {
Result<U> result(std::forward<T>(val));
if (failed()) {
- // The error message must not be empty, otherwise Result::failed() will be
- // false.
- DCHECK(!error_msg_.empty());
TRACE("Result error: %s\n", error_msg_.c_str());
- DCHECK_GE(error_pc_, start_);
- result.error_offset = static_cast<uint32_t>(error_pc_ - start_);
- result.error_msg = std::move(error_msg_);
+ result.error(error_offset_, std::move(error_msg_));
}
return result;
}
// Resets the boundaries of this decoder.
- void Reset(const byte* start, const byte* end) {
+ void Reset(const byte* start, const byte* end, uint32_t buffer_offset = 0) {
start_ = start;
pc_ = start;
end_ = end;
- error_pc_ = nullptr;
+ buffer_offset_ = buffer_offset;
+ error_offset_ = 0;
error_msg_.clear();
}
+ void Reset(Vector<const uint8_t> bytes, uint32_t buffer_offset = 0) {
+ Reset(bytes.begin(), bytes.end(), buffer_offset);
+ }
+
bool ok() const { return error_msg_.empty(); }
bool failed() const { return !ok(); }
bool more() const { return pc_ < end_; }
const byte* start() const { return start_; }
const byte* pc() const { return pc_; }
- uint32_t pc_offset() const { return static_cast<uint32_t>(pc_ - start_); }
+ uint32_t pc_offset() const {
+ return static_cast<uint32_t>(pc_ - start_) + buffer_offset_;
+ }
+ uint32_t buffer_offset() const { return buffer_offset_; }
+ // Takes an offset relative to the module start and returns an offset relative
+ // to the current buffer of the decoder.
+ uint32_t GetBufferRelativeOffset(uint32_t offset) const {
+ DCHECK_LE(buffer_offset_, offset);
+ return offset - buffer_offset_;
+ }
const byte* end() const { return end_; }
protected:
const byte* start_;
const byte* pc_;
const byte* end_;
- const byte* error_pc_;
+ // The offset of the current buffer in the module. Needed for streaming.
+ uint32_t buffer_offset_;
+ uint32_t error_offset_ = 0;
std::string error_msg_;
private:
@@ -269,37 +282,47 @@ class Decoder {
}
template <typename IntType, bool checked, bool advance_pc, bool trace>
- inline IntType read_leb(const byte* pc, unsigned* length,
+ inline IntType read_leb(const byte* pc, uint32_t* length,
const char* name = "varint") {
DCHECK_IMPLIES(advance_pc, pc == pc_);
- constexpr bool is_signed = std::is_signed<IntType>::value;
TRACE_IF(trace, " +%d %-20s: ", static_cast<int>(pc - start_), name);
+ return read_leb_tail<IntType, checked, advance_pc, trace, 0>(pc, length,
+ name, 0);
+ }
+
+ template <typename IntType, bool checked, bool advance_pc, bool trace,
+ int byte_index>
+ IntType read_leb_tail(const byte* pc, uint32_t* length, const char* name,
+ IntType result) {
+ constexpr bool is_signed = std::is_signed<IntType>::value;
constexpr int kMaxLength = (sizeof(IntType) * 8 + 6) / 7;
- const byte* ptr = pc;
- const byte* end = Min(end_, ptr + kMaxLength);
- // The end variable is only used if checked == true. MSVC recognizes this.
- USE(end);
- int shift = 0;
+ static_assert(byte_index < kMaxLength, "invalid template instantiation");
+ constexpr int shift = byte_index * 7;
+ constexpr bool is_last_byte = byte_index == kMaxLength - 1;
+ const bool at_end = checked && pc >= end_;
byte b = 0;
- IntType result = 0;
- do {
- if (checked && V8_UNLIKELY(ptr >= end)) {
- TRACE_IF(trace,
- ptr == pc + kMaxLength ? "<length overflow> " : "<end> ");
- errorf(ptr, "expected %s", name);
- result = 0;
- break;
- }
- DCHECK_GT(end, ptr);
- b = *ptr++;
+ if (!at_end) {
+ DCHECK_LT(pc_, end_);
+ b = *pc;
TRACE_IF(trace, "%02x ", b);
- result = result | ((static_cast<IntType>(b) & 0x7F) << shift);
- shift += 7;
- } while (b & 0x80);
- DCHECK_LE(ptr - pc, kMaxLength);
- *length = static_cast<unsigned>(ptr - pc);
- if (advance_pc) pc_ = ptr;
- if (*length == kMaxLength) {
+ result = result | ((static_cast<IntType>(b) & 0x7f) << shift);
+ }
+ if (!is_last_byte && (b & 0x80)) {
+ // Make sure that we only instantiate the template for valid byte indexes.
+ // Compilers are not smart enough to figure out statically that the
+ // following call is unreachable if is_last_byte is false.
+ constexpr int next_byte_index = byte_index + (is_last_byte ? 0 : 1);
+ return read_leb_tail<IntType, checked, advance_pc, trace,
+ next_byte_index>(pc + 1, length, name, result);
+ }
+ if (advance_pc) pc_ = pc + (at_end ? 0 : 1);
+ *length = byte_index + (at_end ? 0 : 1);
+ if (checked && (at_end || (b & 0x80))) {
+ TRACE_IF(trace, at_end ? "<end> " : "<length overflow> ");
+ errorf(pc, "expected %s", name);
+ result = 0;
+ }
+ if (is_last_byte) {
// A signed-LEB128 must sign-extend the final byte, excluding its
// most-significant bit; e.g. for a 32-bit LEB128:
// kExtraBits = 4 (== 32 - (5-1) * 7)
@@ -316,15 +339,14 @@ class Decoder {
if (!checked) {
DCHECK(valid_extra_bits);
} else if (!valid_extra_bits) {
- error(ptr, "extra bits in varint");
+ error(pc, "extra bits in varint");
result = 0;
}
}
- if (is_signed && *length < kMaxLength) {
- int sign_ext_shift = 8 * sizeof(IntType) - shift;
- // Perform sign extension.
- result = (result << sign_ext_shift) >> sign_ext_shift;
- }
+ constexpr int sign_ext_shift =
+ is_signed && !is_last_byte ? 8 * sizeof(IntType) - shift - 7 : 0;
+ // Perform sign extension.
+ result = (result << sign_ext_shift) >> sign_ext_shift;
if (trace && is_signed) {
TRACE("= %" PRIi64 "\n", static_cast<int64_t>(result));
} else if (trace) {
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 27e95b2138..0df04e7ee0 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -322,6 +322,20 @@ struct SimdShiftOperand {
}
};
+// Operand for SIMD shuffle operations.
+template <bool checked>
+struct SimdShuffleOperand {
+ uint8_t shuffle[16];
+ unsigned lanes;
+
+ inline SimdShuffleOperand(Decoder* decoder, const byte* pc, unsigned lanes_) {
+ lanes = lanes_;
+ for (unsigned i = 0; i < lanes; i++) {
+ shuffle[i] = decoder->read_u8<checked>(pc + 2 + i, "shuffle");
+ }
+ }
+};
+
#undef CHECKED_COND
} // namespace wasm
diff --git a/deps/v8/src/wasm/function-body-decoder.cc b/deps/v8/src/wasm/function-body-decoder.cc
index cae2fcca78..df74485a33 100644
--- a/deps/v8/src/wasm/function-body-decoder.cc
+++ b/deps/v8/src/wasm/function-body-decoder.cc
@@ -146,6 +146,22 @@ struct Control {
}
};
+namespace {
+inline unsigned GetShuffleMaskSize(WasmOpcode opcode) {
+ switch (opcode) {
+ case kExprS32x4Shuffle:
+ return 4;
+ case kExprS16x8Shuffle:
+ return 8;
+ case kExprS8x16Shuffle:
+ return 16;
+ default:
+ UNREACHABLE();
+ return 0;
+ }
+}
+} // namespace
+
// Macros that build nodes only if there is a graph and the current SSA
// environment is reachable from start. This avoids problems with malformed
// TF graphs when decoding inputs that have unreachable code.
@@ -175,6 +191,7 @@ class WasmDecoder : public Decoder {
static bool DecodeLocals(Decoder* decoder, const FunctionSig* sig,
ZoneVector<ValueType>* type_list) {
DCHECK_NOT_NULL(type_list);
+ DCHECK_EQ(0, type_list->size());
// Initialize from signature.
if (sig != nullptr) {
type_list->assign(sig->parameters().begin(), sig->parameters().end());
@@ -345,8 +362,12 @@ class WasmDecoder : public Decoder {
bool Validate(const byte* pc, BranchTableOperand<true>& operand,
size_t block_depth) {
- // TODO(titzer): add extra redundant validation for br_table here?
- return true;
+ if (operand.table_count >= kV8MaxWasmFunctionSize) {
+ errorf(pc + 1, "invalid table count (> max function size): %u",
+ operand.table_count);
+ return false;
+ }
+ return checkAvailable(operand.table_count);
}
inline bool Validate(const byte* pc, WasmOpcode opcode,
@@ -410,8 +431,23 @@ class WasmDecoder : public Decoder {
}
}
+ inline bool Validate(const byte* pc, WasmOpcode opcode,
+ SimdShuffleOperand<true>& operand) {
+ unsigned lanes = GetShuffleMaskSize(opcode);
+ uint8_t max_lane = 0;
+ for (unsigned i = 0; i < lanes; i++)
+ max_lane = std::max(max_lane, operand.shuffle[i]);
+ if (operand.lanes != lanes || max_lane > 2 * lanes) {
+ error(pc_ + 2, "invalid shuffle mask");
+ return false;
+ } else {
+ return true;
+ }
+ }
+
static unsigned OpcodeLength(Decoder* decoder, const byte* pc) {
- switch (static_cast<byte>(*pc)) {
+ WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
+ switch (opcode) {
#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
@@ -494,6 +530,11 @@ class WasmDecoder : public Decoder {
{
return 3;
}
+ // Shuffles contain a byte array to determine the shuffle.
+ case kExprS32x4Shuffle:
+ case kExprS16x8Shuffle:
+ case kExprS8x16Shuffle:
+ return 2 + GetShuffleMaskSize(opcode);
default:
decoder->error(pc, "invalid SIMD opcode");
return 2;
@@ -503,6 +544,68 @@ class WasmDecoder : public Decoder {
return 1;
}
}
+
+ std::pair<uint32_t, uint32_t> StackEffect(const byte* pc) {
+ WasmOpcode opcode = static_cast<WasmOpcode>(*pc);
+ // Handle "simple" opcodes with a fixed signature first.
+ FunctionSig* sig = WasmOpcodes::Signature(opcode);
+ if (!sig) sig = WasmOpcodes::AsmjsSignature(opcode);
+ if (sig) return {sig->parameter_count(), sig->return_count()};
+
+#define DECLARE_OPCODE_CASE(name, opcode, sig) case kExpr##name:
+ // clang-format off
+ switch (opcode) {
+ case kExprSelect:
+ return {3, 1};
+ FOREACH_STORE_MEM_OPCODE(DECLARE_OPCODE_CASE)
+ return {2, 0};
+ FOREACH_LOAD_MEM_OPCODE(DECLARE_OPCODE_CASE)
+ case kExprTeeLocal:
+ case kExprGrowMemory:
+ return {1, 1};
+ case kExprSetLocal:
+ case kExprSetGlobal:
+ case kExprDrop:
+ case kExprBrIf:
+ case kExprBrTable:
+ case kExprIf:
+ return {1, 0};
+ case kExprGetLocal:
+ case kExprGetGlobal:
+ case kExprI32Const:
+ case kExprI64Const:
+ case kExprF32Const:
+ case kExprF64Const:
+ case kExprMemorySize:
+ return {0, 1};
+ case kExprCallFunction: {
+ CallFunctionOperand<true> operand(this, pc);
+ CHECK(Complete(pc, operand));
+ return {operand.sig->parameter_count(), operand.sig->return_count()};
+ }
+ case kExprCallIndirect: {
+ CallIndirectOperand<true> operand(this, pc);
+ CHECK(Complete(pc, operand));
+ // Indirect calls pop an additional argument for the table index.
+ return {operand.sig->parameter_count() + 1,
+ operand.sig->return_count()};
+ }
+ case kExprBr:
+ case kExprBlock:
+ case kExprLoop:
+ case kExprEnd:
+ case kExprElse:
+ case kExprNop:
+ case kExprReturn:
+ case kExprUnreachable:
+ return {0, 0};
+ default:
+ V8_Fatal(__FILE__, __LINE__, "unimplemented opcode: %x", opcode);
+ return {0, 0};
+ }
+#undef DECLARE_OPCODE_CASE
+ // clang-format on
+ }
};
static const int32_t kNullCatch = -1;
@@ -571,8 +674,8 @@ class WasmFullDecoder : public WasmDecoder {
}
bool TraceFailed() {
- TRACE("wasm-error module+%-6d func+%d: %s\n\n", baserel(error_pc_),
- startrel(error_pc_), error_msg_.c_str());
+ TRACE("wasm-error module+%-6d func+%d: %s\n\n",
+ baserel(start_ + error_offset_), error_offset_, error_msg_.c_str());
return false;
}
@@ -1474,6 +1577,19 @@ class WasmFullDecoder : public WasmDecoder {
return operand.length;
}
+ unsigned SimdShuffleOp(WasmOpcode opcode) {
+ SimdShuffleOperand<true> operand(this, pc_, GetShuffleMaskSize(opcode));
+ if (Validate(pc_, opcode, operand)) {
+ compiler::NodeVector inputs(2, zone_);
+ inputs[1] = Pop(1, ValueType::kSimd128).node;
+ inputs[0] = Pop(0, ValueType::kSimd128).node;
+ TFNode* node =
+ BUILD(SimdShuffleOp, operand.shuffle, operand.lanes, inputs);
+ Push(ValueType::kSimd128, node);
+ }
+ return operand.lanes;
+ }
+
unsigned DecodeSimdOpcode(WasmOpcode opcode) {
unsigned len = 0;
switch (opcode) {
@@ -1509,6 +1625,12 @@ class WasmFullDecoder : public WasmDecoder {
len = SimdShiftOp(opcode);
break;
}
+ case kExprS32x4Shuffle:
+ case kExprS16x8Shuffle:
+ case kExprS8x16Shuffle: {
+ len = SimdShuffleOp(opcode);
+ break;
+ }
default: {
FunctionSig* sig = WasmOpcodes::Signature(opcode);
if (sig != nullptr) {
@@ -1721,7 +1843,7 @@ class WasmFullDecoder : public WasmDecoder {
PrintF(", control = ");
compiler::WasmGraphBuilder::PrintDebugName(env->control);
}
- PrintF("}");
+ PrintF("}\n");
}
#endif
ssa_env_ = env;
@@ -2026,6 +2148,13 @@ unsigned OpcodeLength(const byte* pc, const byte* end) {
return WasmDecoder::OpcodeLength(&decoder, pc);
}
+std::pair<uint32_t, uint32_t> StackEffect(const WasmModule* module,
+ FunctionSig* sig, const byte* pc,
+ const byte* end) {
+ WasmDecoder decoder(module, sig, pc, end);
+ return decoder.StackEffect(pc);
+}
+
void PrintRawWasmCode(const byte* start, const byte* end) {
AccountingAllocator allocator;
PrintRawWasmCode(&allocator, FunctionBodyForTesting(start, end), nullptr);
diff --git a/deps/v8/src/wasm/function-body-decoder.h b/deps/v8/src/wasm/function-body-decoder.h
index 336b78afd9..ef3998f0e1 100644
--- a/deps/v8/src/wasm/function-body-decoder.h
+++ b/deps/v8/src/wasm/function-body-decoder.h
@@ -81,12 +81,12 @@ struct BodyLocalDecls {
ZoneVector<ValueType> type_list;
- // Constructor initializes the vector.
explicit BodyLocalDecls(Zone* zone) : encoded_size(0), type_list(zone) {}
};
V8_EXPORT_PRIVATE bool DecodeLocalDecls(BodyLocalDecls* decls,
const byte* start, const byte* end);
+
V8_EXPORT_PRIVATE BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone,
size_t num_locals,
const byte* start,
@@ -95,6 +95,15 @@ V8_EXPORT_PRIVATE BitVector* AnalyzeLoopAssignmentForTesting(Zone* zone,
// Computes the length of the opcode at the given address.
V8_EXPORT_PRIVATE unsigned OpcodeLength(const byte* pc, const byte* end);
+// Computes the stack effect of the opcode at the given address.
+// Returns <pop count, push count>.
+// Be cautious with control opcodes: This function only covers their immediate,
+// local stack effect (e.g. BrIf pops 1, Br pops 0). Those opcodes can have
+// non-local stack effect though, which are not covered here.
+std::pair<uint32_t, uint32_t> StackEffect(const WasmModule* module,
+ FunctionSig* sig, const byte* pc,
+ const byte* end);
+
// A simple forward iterator for bytecodes.
class V8_EXPORT_PRIVATE BytecodeIterator : public NON_EXPORTED_BASE(Decoder) {
// Base class for both iterators defined below.
diff --git a/deps/v8/src/wasm/leb-helper.h b/deps/v8/src/wasm/leb-helper.h
index 0e4ba3418c..b598ee8578 100644
--- a/deps/v8/src/wasm/leb-helper.h
+++ b/deps/v8/src/wasm/leb-helper.h
@@ -5,12 +5,16 @@
#ifndef V8_WASM_LEB_HELPER_H_
#define V8_WASM_LEB_HELPER_H_
+#include <cstddef>
+#include <cstdint>
+
namespace v8 {
namespace internal {
namespace wasm {
-static const size_t kPaddedVarInt32Size = 5;
-static const size_t kMaxVarInt32Size = 5;
+constexpr size_t kPaddedVarInt32Size = 5;
+constexpr size_t kMaxVarInt32Size = 5;
+constexpr size_t kMaxVarInt64Size = 10;
class LEBHelper {
public:
diff --git a/deps/v8/src/wasm/local-decl-encoder.cc b/deps/v8/src/wasm/local-decl-encoder.cc
new file mode 100644
index 0000000000..cf50cd0df4
--- /dev/null
+++ b/deps/v8/src/wasm/local-decl-encoder.cc
@@ -0,0 +1,51 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/local-decl-encoder.h"
+
+#include "src/wasm/leb-helper.h"
+
+using namespace v8::internal;
+using namespace v8::internal::wasm;
+
+void LocalDeclEncoder::Prepend(Zone* zone, const byte** start,
+ const byte** end) const {
+ size_t size = (*end - *start);
+ byte* buffer = reinterpret_cast<byte*>(zone->New(Size() + size));
+ size_t pos = Emit(buffer);
+ memcpy(buffer + pos, *start, size);
+ pos += size;
+ *start = buffer;
+ *end = buffer + pos;
+}
+
+size_t LocalDeclEncoder::Emit(byte* buffer) const {
+ byte* pos = buffer;
+ LEBHelper::write_u32v(&pos, static_cast<uint32_t>(local_decls.size()));
+ for (auto& local_decl : local_decls) {
+ LEBHelper::write_u32v(&pos, local_decl.first);
+ *pos = WasmOpcodes::ValueTypeCodeFor(local_decl.second);
+ ++pos;
+ }
+ DCHECK_EQ(Size(), pos - buffer);
+ return static_cast<size_t>(pos - buffer);
+}
+
+uint32_t LocalDeclEncoder::AddLocals(uint32_t count, ValueType type) {
+ uint32_t result =
+ static_cast<uint32_t>(total + (sig ? sig->parameter_count() : 0));
+ total += count;
+ if (local_decls.size() > 0 && local_decls.back().second == type) {
+ count += local_decls.back().first;
+ local_decls.pop_back();
+ }
+ local_decls.push_back(std::pair<uint32_t, ValueType>(count, type));
+ return result;
+}
+
+size_t LocalDeclEncoder::Size() const {
+ size_t size = LEBHelper::sizeof_u32v(local_decls.size());
+ for (auto p : local_decls) size += 1 + LEBHelper::sizeof_u32v(p.first);
+ return size;
+}
diff --git a/deps/v8/src/wasm/local-decl-encoder.h b/deps/v8/src/wasm/local-decl-encoder.h
new file mode 100644
index 0000000000..e0725efe9b
--- /dev/null
+++ b/deps/v8/src/wasm/local-decl-encoder.h
@@ -0,0 +1,50 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_LOCAL_DECL_ENCODER_H_
+#define V8_WASM_LOCAL_DECL_ENCODER_H_
+
+#include "src/globals.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "src/zone/zone-containers.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// A helper for encoding local declarations prepended to the body of a
+// function.
+class V8_EXPORT_PRIVATE LocalDeclEncoder {
+ public:
+ explicit LocalDeclEncoder(Zone* zone, FunctionSig* s = nullptr)
+ : sig(s), local_decls(zone), total(0) {}
+
+ // Prepend local declarations by creating a new buffer and copying data
+ // over. The new buffer must be delete[]'d by the caller.
+ void Prepend(Zone* zone, const byte** start, const byte** end) const;
+
+ size_t Emit(byte* buffer) const;
+
+ // Add locals declarations to this helper. Return the index of the newly added
+ // local(s), with an optional adjustment for the parameters.
+ uint32_t AddLocals(uint32_t count, ValueType type);
+
+ size_t Size() const;
+
+ bool has_sig() const { return sig != nullptr; }
+ FunctionSig* get_sig() const { return sig; }
+ void set_sig(FunctionSig* s) { sig = s; }
+
+ private:
+ FunctionSig* sig;
+ ZoneVector<std::pair<uint32_t, ValueType>> local_decls;
+ size_t total;
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_LOCAL_DECL_ENCODER_H_
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index b05840975b..83cafbd0d8 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -120,9 +120,7 @@ class WasmSectionIterator {
next();
}
- inline bool more() const {
- return section_code_ != kUnknownSectionCode && decoder_.more();
- }
+ inline bool more() const { return decoder_.ok() && decoder_.more(); }
inline SectionCode section_code() const { return section_code_; }
@@ -132,6 +130,10 @@ class WasmSectionIterator {
return static_cast<uint32_t>(section_end_ - section_start_);
}
+ inline Vector<const uint8_t> payload() const {
+ return {payload_start_, payload_length()};
+ }
+
inline const byte* payload_start() const { return payload_start_; }
inline uint32_t payload_length() const {
@@ -142,7 +144,11 @@ class WasmSectionIterator {
// Advances to the next section, checking that decoding the current section
// stopped at {section_end_}.
- void advance() {
+ void advance(bool move_to_section_end = false) {
+ if (move_to_section_end && decoder_.pc() < section_end_) {
+ decoder_.consume_bytes(
+ static_cast<uint32_t>(section_end_ - decoder_.pc()));
+ }
if (decoder_.pc() != section_end_) {
const char* msg = decoder_.pc() < section_end_ ? "shorter" : "longer";
decoder_.errorf(decoder_.pc(),
@@ -164,65 +170,59 @@ class WasmSectionIterator {
// Reads the section code/name at the current position and sets up
// the embedder fields.
void next() {
- while (true) {
- if (!decoder_.more()) {
+ if (!decoder_.more()) {
+ section_code_ = kUnknownSectionCode;
+ return;
+ }
+ section_start_ = decoder_.pc();
+ uint8_t section_code = decoder_.consume_u8("section code");
+ // Read and check the section size.
+ uint32_t section_length = decoder_.consume_u32v("section length");
+
+ payload_start_ = decoder_.pc();
+ if (decoder_.checkAvailable(section_length)) {
+ // Get the limit of the section within the module.
+ section_end_ = payload_start_ + section_length;
+ } else {
+ // The section would extend beyond the end of the module.
+ section_end_ = payload_start_;
+ }
+
+ if (section_code == kUnknownSectionCode) {
+ // Check for the known "name" section.
+ uint32_t string_length;
+ uint32_t string_offset =
+ wasm::consume_string(decoder_, &string_length, true, "section name");
+ if (decoder_.failed() || decoder_.pc() > section_end_) {
section_code_ = kUnknownSectionCode;
return;
}
- uint8_t section_code = decoder_.consume_u8("section code");
- // Read and check the section size.
- uint32_t section_length = decoder_.consume_u32v("section length");
- section_start_ = decoder_.pc();
- payload_start_ = section_start_;
- if (decoder_.checkAvailable(section_length)) {
- // Get the limit of the section within the module.
- section_end_ = section_start_ + section_length;
- } else {
- // The section would extend beyond the end of the module.
- section_end_ = section_start_;
- }
-
- if (section_code == kUnknownSectionCode) {
- // Check for the known "name" section.
- uint32_t string_length;
- uint32_t string_offset = wasm::consume_string(decoder_, &string_length,
- true, "section name");
- if (decoder_.failed() || decoder_.pc() > section_end_) {
- section_code_ = kUnknownSectionCode;
- return;
- }
- const byte* section_name_start = decoder_.start() + string_offset;
- payload_start_ = decoder_.pc();
-
- TRACE(" +%d section name : \"%.*s\"\n",
- static_cast<int>(section_name_start - decoder_.start()),
- string_length < 20 ? string_length : 20, section_name_start);
-
- if (string_length == kNameStringLength &&
- strncmp(reinterpret_cast<const char*>(section_name_start),
- kNameString, kNameStringLength) == 0) {
- section_code = kNameSectionCode;
- }
- } else if (!IsValidSectionCode(section_code)) {
- decoder_.errorf(decoder_.pc(), "unknown section code #0x%02x",
- section_code);
- section_code = kUnknownSectionCode;
- }
- section_code_ = decoder_.failed()
- ? kUnknownSectionCode
- : static_cast<SectionCode>(section_code);
-
- TRACE("Section: %s\n", SectionName(section_code_));
- if (section_code_ == kUnknownSectionCode &&
- section_end_ >= decoder_.pc()) {
- // skip to the end of the unknown section.
- uint32_t remaining =
- static_cast<uint32_t>(section_end_ - decoder_.pc());
- decoder_.consume_bytes(remaining, "section payload");
- // fall through and continue to the next section.
- } else {
- return;
+ const byte* section_name_start =
+ decoder_.start() + decoder_.GetBufferRelativeOffset(string_offset);
+ payload_start_ = decoder_.pc();
+
+ TRACE(" +%d section name : \"%.*s\"\n",
+ static_cast<int>(section_name_start - decoder_.start()),
+ string_length < 20 ? string_length : 20, section_name_start);
+
+ if (string_length == kNameStringLength &&
+ strncmp(reinterpret_cast<const char*>(section_name_start),
+ kNameString, kNameStringLength) == 0) {
+ section_code = kNameSectionCode;
}
+ } else if (!IsValidSectionCode(section_code)) {
+ decoder_.errorf(decoder_.pc(), "unknown section code #0x%02x",
+ section_code);
+ section_code = kUnknownSectionCode;
+ }
+ section_code_ = decoder_.failed() ? kUnknownSectionCode
+ : static_cast<SectionCode>(section_code);
+
+ TRACE("Section: %s\n", SectionName(section_code_));
+ if (section_code_ == kUnknownSectionCode && section_end_ > decoder_.pc()) {
+ // skip to the end of the unknown section.
+ uint32_t remaining = static_cast<uint32_t>(section_end_ - decoder_.pc());
+ decoder_.consume_bytes(remaining, "section payload");
}
}
};
@@ -230,10 +230,9 @@ class WasmSectionIterator {
// The main logic for decoding the bytes of a module.
class ModuleDecoder : public Decoder {
public:
- ModuleDecoder(Zone* zone, const byte* module_start, const byte* module_end,
+ ModuleDecoder(const byte* module_start, const byte* module_end,
ModuleOrigin origin)
: Decoder(module_start, module_end),
- module_zone_(zone),
origin_(FLAG_assume_asmjs_origin ? kAsmJsOrigin : origin) {
if (end_ < start_) {
error(start_, "end is less than start");
@@ -264,19 +263,27 @@ class ModuleDecoder : public Decoder {
result.ok() ? "ok" : "failed");
std::string name(buf);
if (FILE* wasm_file = base::OS::FOpen((path + name).c_str(), "wb")) {
- fwrite(start_, end_ - start_, 1, wasm_file);
+ if (fwrite(start_, end_ - start_, 1, wasm_file) != 1) {
+ OFStream os(stderr);
+ os << "Error while dumping wasm file" << std::endl;
+ }
fclose(wasm_file);
}
}
- // Decodes an entire module.
- ModuleResult DecodeModule(bool verify_functions = true) {
- pc_ = start_;
- WasmModule* module = new WasmModule(module_zone_);
- module->min_mem_pages = 0;
- module->max_mem_pages = 0;
- module->mem_export = false;
- module->set_origin(origin_);
+ void StartDecoding(Isolate* isolate) {
+ CHECK_NULL(module_);
+ module_.reset(new WasmModule(
+ std::unique_ptr<Zone>(new Zone(isolate->allocator(), "signatures"))));
+ module_->min_mem_pages = 0;
+ module_->max_mem_pages = 0;
+ module_->mem_export = false;
+ module_->set_origin(origin_);
+ }
+
+ void DecodeModuleHeader(Vector<const uint8_t> bytes, uint8_t offset) {
+ if (failed()) return;
+ Reset(bytes, offset);
const byte* pos = pc_;
uint32_t magic_word = consume_u32("wasm magic");
@@ -298,426 +305,465 @@ class ModuleDecoder : public Decoder {
BYTES(kWasmVersion), BYTES(magic_version));
}
}
+ }
- WasmSectionIterator section_iter(*this);
-
- // ===== Type section ====================================================
- if (section_iter.section_code() == kTypeSectionCode) {
- uint32_t signatures_count = consume_count("types count", kV8MaxWasmTypes);
- module->signatures.reserve(signatures_count);
- for (uint32_t i = 0; ok() && i < signatures_count; ++i) {
- TRACE("DecodeSignature[%d] module+%d\n", i,
- static_cast<int>(pc_ - start_));
- FunctionSig* s = consume_sig();
- module->signatures.push_back(s);
- }
- section_iter.advance();
+ void DecodeSection(SectionCode section_code, Vector<const uint8_t> bytes,
+ uint32_t offset, bool verify_functions = true) {
+ if (failed()) return;
+ Reset(bytes, offset);
+
+ // Check if the section is out-of-order.
+ if (section_code < next_section_) {
+ errorf(pc(), "unexpected section: %s", SectionName(section_code));
+ return;
+ }
+ if (section_code != kUnknownSectionCode) {
+ next_section_ = section_code;
+ ++next_section_;
}
- // ===== Import section ==================================================
- if (section_iter.section_code() == kImportSectionCode) {
- uint32_t import_table_count =
- consume_count("imports count", kV8MaxWasmImports);
- module->import_table.reserve(import_table_count);
- for (uint32_t i = 0; ok() && i < import_table_count; ++i) {
- TRACE("DecodeImportTable[%d] module+%d\n", i,
- static_cast<int>(pc_ - start_));
-
- module->import_table.push_back({
- 0, // module_name_length
- 0, // module_name_offset
- 0, // field_name_offset
- 0, // field_name_length
- kExternalFunction, // kind
- 0 // index
- });
- WasmImport* import = &module->import_table.back();
- const byte* pos = pc_;
- import->module_name_offset =
- consume_string(&import->module_name_length, true, "module name");
- import->field_name_offset =
- consume_string(&import->field_name_length, true, "field name");
-
- import->kind = static_cast<WasmExternalKind>(consume_u8("import kind"));
- switch (import->kind) {
- case kExternalFunction: {
- // ===== Imported function =======================================
- import->index = static_cast<uint32_t>(module->functions.size());
- module->num_imported_functions++;
- module->functions.push_back({nullptr, // sig
- import->index, // func_index
- 0, // sig_index
- 0, // name_offset
- 0, // name_length
- 0, // code_start_offset
- 0, // code_end_offset
- true, // imported
- false}); // exported
- WasmFunction* function = &module->functions.back();
- function->sig_index = consume_sig_index(module, &function->sig);
- break;
- }
- case kExternalTable: {
- // ===== Imported table ==========================================
- if (!AddTable(module)) break;
- import->index =
- static_cast<uint32_t>(module->function_tables.size());
- module->function_tables.push_back({0, 0, false,
- std::vector<int32_t>(), true,
- false, SignatureMap()});
- expect_u8("element type", kWasmAnyFunctionTypeForm);
- WasmIndirectFunctionTable* table = &module->function_tables.back();
- consume_resizable_limits("element count", "elements",
- FLAG_wasm_max_table_size, &table->min_size,
- &table->has_max, FLAG_wasm_max_table_size,
- &table->max_size);
- break;
- }
- case kExternalMemory: {
- // ===== Imported memory =========================================
- if (!AddMemory(module)) break;
- consume_resizable_limits(
- "memory", "pages", FLAG_wasm_max_mem_pages,
- &module->min_mem_pages, &module->has_max_mem,
- kSpecMaxWasmMemoryPages, &module->max_mem_pages);
- break;
- }
- case kExternalGlobal: {
- // ===== Imported global =========================================
- import->index = static_cast<uint32_t>(module->globals.size());
- module->globals.push_back(
- {kWasmStmt, false, WasmInitExpr(), 0, true, false});
- WasmGlobal* global = &module->globals.back();
- global->type = consume_value_type();
- global->mutability = consume_mutability();
- if (global->mutability) {
- error("mutable globals cannot be imported");
- }
- break;
+ switch (section_code) {
+ case kUnknownSectionCode:
+ break;
+ case kTypeSectionCode:
+ DecodeTypeSection();
+ break;
+ case kImportSectionCode:
+ DecodeImportSection();
+ break;
+ case kFunctionSectionCode:
+ DecodeFunctionSection();
+ break;
+ case kTableSectionCode:
+ DecodeTableSection();
+ break;
+ case kMemorySectionCode:
+ DecodeMemorySection();
+ break;
+ case kGlobalSectionCode:
+ DecodeGlobalSection();
+ break;
+ case kExportSectionCode:
+ DecodeExportSection();
+ break;
+ case kStartSectionCode:
+ DecodeStartSection();
+ break;
+ case kCodeSectionCode:
+ DecodeCodeSection(verify_functions);
+ break;
+ case kElementSectionCode:
+ DecodeElementSection();
+ break;
+ case kDataSectionCode:
+ DecodeDataSection();
+ break;
+ case kNameSectionCode:
+ DecodeNameSection();
+ break;
+ default:
+ errorf(pc(), "unexpected section: %s", SectionName(section_code));
+ return;
+ }
+
+ if (pc() != bytes.end()) {
+ const char* msg = pc() < bytes.end() ? "shorter" : "longer";
+ errorf(pc(),
+ "section was %s than expected size "
+ "(%zu bytes expected, %zu decoded)",
+ msg, bytes.size(), static_cast<size_t>(pc() - bytes.begin()));
+ }
+ }
+
+ void DecodeTypeSection() {
+ uint32_t signatures_count = consume_count("types count", kV8MaxWasmTypes);
+ module_->signatures.reserve(signatures_count);
+ for (uint32_t i = 0; ok() && i < signatures_count; ++i) {
+ TRACE("DecodeSignature[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ FunctionSig* s = consume_sig(module_->signature_zone.get());
+ module_->signatures.push_back(s);
+ }
+ }
+
+ void DecodeImportSection() {
+ uint32_t import_table_count =
+ consume_count("imports count", kV8MaxWasmImports);
+ module_->import_table.reserve(import_table_count);
+ for (uint32_t i = 0; ok() && i < import_table_count; ++i) {
+ TRACE("DecodeImportTable[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+
+ module_->import_table.push_back({
+ 0, // module_name_length
+ 0, // module_name_offset
+ 0, // field_name_offset
+ 0, // field_name_length
+ kExternalFunction, // kind
+ 0 // index
+ });
+ WasmImport* import = &module_->import_table.back();
+ const byte* pos = pc_;
+ import->module_name_offset =
+ consume_string(&import->module_name_length, true, "module name");
+ import->field_name_offset =
+ consume_string(&import->field_name_length, true, "field name");
+ import->kind = static_cast<WasmExternalKind>(consume_u8("import kind"));
+ switch (import->kind) {
+ case kExternalFunction: {
+ // ===== Imported function =======================================
+ import->index = static_cast<uint32_t>(module_->functions.size());
+ module_->num_imported_functions++;
+ module_->functions.push_back({nullptr, // sig
+ import->index, // func_index
+ 0, // sig_index
+ 0, // name_offset
+ 0, // name_length
+ 0, // code_start_offset
+ 0, // code_end_offset
+ true, // imported
+ false}); // exported
+ WasmFunction* function = &module_->functions.back();
+ function->sig_index =
+ consume_sig_index(module_.get(), &function->sig);
+ break;
+ }
+ case kExternalTable: {
+ // ===== Imported table ==========================================
+ if (!AddTable(module_.get())) break;
+ import->index =
+ static_cast<uint32_t>(module_->function_tables.size());
+ module_->function_tables.push_back({0, 0, false,
+ std::vector<int32_t>(), true,
+ false, SignatureMap()});
+ expect_u8("element type", kWasmAnyFunctionTypeForm);
+ WasmIndirectFunctionTable* table = &module_->function_tables.back();
+ consume_resizable_limits("element count", "elements",
+ FLAG_wasm_max_table_size, &table->min_size,
+ &table->has_max, FLAG_wasm_max_table_size,
+ &table->max_size);
+ break;
+ }
+ case kExternalMemory: {
+ // ===== Imported memory =========================================
+ if (!AddMemory(module_.get())) break;
+ consume_resizable_limits(
+ "memory", "pages", FLAG_wasm_max_mem_pages,
+ &module_->min_mem_pages, &module_->has_max_mem,
+ kSpecMaxWasmMemoryPages, &module_->max_mem_pages);
+ break;
+ }
+ case kExternalGlobal: {
+ // ===== Imported global =========================================
+ import->index = static_cast<uint32_t>(module_->globals.size());
+ module_->globals.push_back(
+ {kWasmStmt, false, WasmInitExpr(), 0, true, false});
+ WasmGlobal* global = &module_->globals.back();
+ global->type = consume_value_type();
+ global->mutability = consume_mutability();
+ if (global->mutability) {
+ error("mutable globals cannot be imported");
}
- default:
- errorf(pos, "unknown import kind 0x%02x", import->kind);
- break;
+ break;
}
+ default:
+ errorf(pos, "unknown import kind 0x%02x", import->kind);
+ break;
}
- section_iter.advance();
}
+ }
- // ===== Function section ================================================
- if (section_iter.section_code() == kFunctionSectionCode) {
- uint32_t functions_count =
- consume_count("functions count", kV8MaxWasmFunctions);
- module->functions.reserve(functions_count);
- module->num_declared_functions = functions_count;
- for (uint32_t i = 0; ok() && i < functions_count; ++i) {
- uint32_t func_index = static_cast<uint32_t>(module->functions.size());
- module->functions.push_back({nullptr, // sig
- func_index, // func_index
- 0, // sig_index
- 0, // name_offset
- 0, // name_length
- 0, // code_start_offset
- 0, // code_end_offset
- false, // imported
- false}); // exported
- WasmFunction* function = &module->functions.back();
- function->sig_index = consume_sig_index(module, &function->sig);
- }
- section_iter.advance();
+ void DecodeFunctionSection() {
+ uint32_t functions_count =
+ consume_count("functions count", kV8MaxWasmFunctions);
+ module_->functions.reserve(functions_count);
+ module_->num_declared_functions = functions_count;
+ for (uint32_t i = 0; ok() && i < functions_count; ++i) {
+ uint32_t func_index = static_cast<uint32_t>(module_->functions.size());
+ module_->functions.push_back({nullptr, // sig
+ func_index, // func_index
+ 0, // sig_index
+ 0, // name_offset
+ 0, // name_length
+ 0, // code_start_offset
+ 0, // code_end_offset
+ false, // imported
+ false}); // exported
+ WasmFunction* function = &module_->functions.back();
+ function->sig_index = consume_sig_index(module_.get(), &function->sig);
}
+ }
- // ===== Table section ===================================================
- if (section_iter.section_code() == kTableSectionCode) {
- uint32_t table_count = consume_count("table count", kV8MaxWasmTables);
-
- for (uint32_t i = 0; ok() && i < table_count; i++) {
- if (!AddTable(module)) break;
- module->function_tables.push_back({0, 0, false, std::vector<int32_t>(),
- false, false, SignatureMap()});
- WasmIndirectFunctionTable* table = &module->function_tables.back();
- expect_u8("table type", kWasmAnyFunctionTypeForm);
- consume_resizable_limits("table elements", "elements",
- FLAG_wasm_max_table_size, &table->min_size,
- &table->has_max, FLAG_wasm_max_table_size,
- &table->max_size);
- }
- section_iter.advance();
+ void DecodeTableSection() {
+ uint32_t table_count = consume_count("table count", kV8MaxWasmTables);
+
+ for (uint32_t i = 0; ok() && i < table_count; i++) {
+ if (!AddTable(module_.get())) break;
+ module_->function_tables.push_back(
+ {0, 0, false, std::vector<int32_t>(), false, false, SignatureMap()});
+ WasmIndirectFunctionTable* table = &module_->function_tables.back();
+ expect_u8("table type", kWasmAnyFunctionTypeForm);
+ consume_resizable_limits("table elements", "elements",
+ FLAG_wasm_max_table_size, &table->min_size,
+ &table->has_max, FLAG_wasm_max_table_size,
+ &table->max_size);
}
+ }
- // ===== Memory section ==================================================
- if (section_iter.section_code() == kMemorySectionCode) {
- uint32_t memory_count = consume_count("memory count", kV8MaxWasmMemories);
+ void DecodeMemorySection() {
+ uint32_t memory_count = consume_count("memory count", kV8MaxWasmMemories);
- for (uint32_t i = 0; ok() && i < memory_count; i++) {
- if (!AddMemory(module)) break;
- consume_resizable_limits("memory", "pages", FLAG_wasm_max_mem_pages,
- &module->min_mem_pages, &module->has_max_mem,
- kSpecMaxWasmMemoryPages,
- &module->max_mem_pages);
- }
- section_iter.advance();
+ for (uint32_t i = 0; ok() && i < memory_count; i++) {
+ if (!AddMemory(module_.get())) break;
+ consume_resizable_limits("memory", "pages", FLAG_wasm_max_mem_pages,
+ &module_->min_mem_pages, &module_->has_max_mem,
+ kSpecMaxWasmMemoryPages,
+ &module_->max_mem_pages);
}
+ }
- // ===== Global section ==================================================
- if (section_iter.section_code() == kGlobalSectionCode) {
- uint32_t globals_count =
- consume_count("globals count", kV8MaxWasmGlobals);
- uint32_t imported_globals = static_cast<uint32_t>(module->globals.size());
- module->globals.reserve(imported_globals + globals_count);
- for (uint32_t i = 0; ok() && i < globals_count; ++i) {
- TRACE("DecodeGlobal[%d] module+%d\n", i,
- static_cast<int>(pc_ - start_));
- // Add an uninitialized global and pass a pointer to it.
- module->globals.push_back(
- {kWasmStmt, false, WasmInitExpr(), 0, false, false});
- WasmGlobal* global = &module->globals.back();
- DecodeGlobalInModule(module, i + imported_globals, global);
- }
- section_iter.advance();
+ void DecodeGlobalSection() {
+ uint32_t globals_count = consume_count("globals count", kV8MaxWasmGlobals);
+ uint32_t imported_globals = static_cast<uint32_t>(module_->globals.size());
+ module_->globals.reserve(imported_globals + globals_count);
+ for (uint32_t i = 0; ok() && i < globals_count; ++i) {
+ TRACE("DecodeGlobal[%d] module+%d\n", i, static_cast<int>(pc_ - start_));
+ // Add an uninitialized global and pass a pointer to it.
+ module_->globals.push_back(
+ {kWasmStmt, false, WasmInitExpr(), 0, false, false});
+ WasmGlobal* global = &module_->globals.back();
+ DecodeGlobalInModule(module_.get(), i + imported_globals, global);
}
+ }
- // ===== Export section ==================================================
- if (section_iter.section_code() == kExportSectionCode) {
- uint32_t export_table_count =
- consume_count("exports count", kV8MaxWasmImports);
- module->export_table.reserve(export_table_count);
- for (uint32_t i = 0; ok() && i < export_table_count; ++i) {
- TRACE("DecodeExportTable[%d] module+%d\n", i,
- static_cast<int>(pc_ - start_));
-
- module->export_table.push_back({
- 0, // name_length
- 0, // name_offset
- kExternalFunction, // kind
- 0 // index
- });
- WasmExport* exp = &module->export_table.back();
-
- exp->name_offset =
- consume_string(&exp->name_length, true, "field name");
- const byte* pos = pc();
- exp->kind = static_cast<WasmExternalKind>(consume_u8("export kind"));
- switch (exp->kind) {
- case kExternalFunction: {
- WasmFunction* func = nullptr;
- exp->index = consume_func_index(module, &func);
- module->num_exported_functions++;
- if (func) func->exported = true;
- break;
- }
- case kExternalTable: {
- WasmIndirectFunctionTable* table = nullptr;
- exp->index = consume_table_index(module, &table);
- if (table) table->exported = true;
- break;
- }
- case kExternalMemory: {
- uint32_t index = consume_u32v("memory index");
- // TODO(titzer): This should become more regular
- // once we support multiple memories.
- if (!module->has_memory || index != 0) {
- error("invalid memory index != 0");
- }
- module->mem_export = true;
- break;
- }
- case kExternalGlobal: {
- WasmGlobal* global = nullptr;
- exp->index = consume_global_index(module, &global);
- if (global) {
- if (global->mutability) {
- error("mutable globals cannot be exported");
- }
- global->exported = true;
- }
- break;
- }
- default:
- errorf(pos, "invalid export kind 0x%02x", exp->kind);
- break;
+ void DecodeExportSection() {
+ uint32_t export_table_count =
+ consume_count("exports count", kV8MaxWasmImports);
+ module_->export_table.reserve(export_table_count);
+ for (uint32_t i = 0; ok() && i < export_table_count; ++i) {
+ TRACE("DecodeExportTable[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+
+ module_->export_table.push_back({
+ 0, // name_length
+ 0, // name_offset
+ kExternalFunction, // kind
+ 0 // index
+ });
+ WasmExport* exp = &module_->export_table.back();
+
+ exp->name_offset = consume_string(&exp->name_length, true, "field name");
+
+ const byte* pos = pc();
+ exp->kind = static_cast<WasmExternalKind>(consume_u8("export kind"));
+ switch (exp->kind) {
+ case kExternalFunction: {
+ WasmFunction* func = nullptr;
+ exp->index = consume_func_index(module_.get(), &func);
+ module_->num_exported_functions++;
+ if (func) func->exported = true;
+ break;
}
- }
- // Check for duplicate exports (except for asm.js).
- if (ok() && origin_ != kAsmJsOrigin && module->export_table.size() > 1) {
- std::vector<WasmExport> sorted_exports(module->export_table);
- const byte* base = start_;
- auto cmp_less = [base](const WasmExport& a, const WasmExport& b) {
- // Return true if a < b.
- if (a.name_length != b.name_length) {
- return a.name_length < b.name_length;
+ case kExternalTable: {
+ WasmIndirectFunctionTable* table = nullptr;
+ exp->index = consume_table_index(module_.get(), &table);
+ if (table) table->exported = true;
+ break;
+ }
+ case kExternalMemory: {
+ uint32_t index = consume_u32v("memory index");
+ // TODO(titzer): This should become more regular
+ // once we support multiple memories.
+ if (!module_->has_memory || index != 0) {
+ error("invalid memory index != 0");
}
- return memcmp(base + a.name_offset, base + b.name_offset,
- a.name_length) < 0;
- };
- std::stable_sort(sorted_exports.begin(), sorted_exports.end(),
- cmp_less);
- auto it = sorted_exports.begin();
- WasmExport* last = &*it++;
- for (auto end = sorted_exports.end(); it != end; last = &*it++) {
- DCHECK(!cmp_less(*it, *last)); // Vector must be sorted.
- if (!cmp_less(*last, *it)) {
- const byte* pc = start_ + it->name_offset;
- errorf(pc, "Duplicate export name '%.*s' for functions %d and %d",
- it->name_length, pc, last->index, it->index);
- break;
+ module_->mem_export = true;
+ break;
+ }
+ case kExternalGlobal: {
+ WasmGlobal* global = nullptr;
+ exp->index = consume_global_index(module_.get(), &global);
+ if (global) {
+ if (global->mutability) {
+ error("mutable globals cannot be exported");
+ }
+ global->exported = true;
}
+ break;
}
+ default:
+ errorf(pos, "invalid export kind 0x%02x", exp->kind);
+ break;
}
- section_iter.advance();
- }
-
- // ===== Start section ===================================================
- if (section_iter.section_code() == kStartSectionCode) {
- WasmFunction* func;
- const byte* pos = pc_;
- module->start_function_index = consume_func_index(module, &func);
- if (func &&
- (func->sig->parameter_count() > 0 || func->sig->return_count() > 0)) {
- error(pos,
- "invalid start function: non-zero parameter or return count");
- }
- section_iter.advance();
}
-
- // ===== Elements section ================================================
- if (section_iter.section_code() == kElementSectionCode) {
- uint32_t element_count =
- consume_count("element count", FLAG_wasm_max_table_size);
- for (uint32_t i = 0; ok() && i < element_count; ++i) {
- const byte* pos = pc();
- uint32_t table_index = consume_u32v("table index");
- if (table_index != 0) {
- errorf(pos, "illegal table index %u != 0", table_index);
+ // Check for duplicate exports (except for asm.js).
+ if (ok() && origin_ != kAsmJsOrigin && module_->export_table.size() > 1) {
+ std::vector<WasmExport> sorted_exports(module_->export_table);
+
+ auto cmp_less = [this](const WasmExport& a, const WasmExport& b) {
+ // Return true if a < b.
+ if (a.name_length != b.name_length) {
+ return a.name_length < b.name_length;
}
- WasmIndirectFunctionTable* table = nullptr;
- if (table_index >= module->function_tables.size()) {
- errorf(pos, "out of bounds table index %u", table_index);
+ const byte* left = start() + GetBufferRelativeOffset(a.name_offset);
+ const byte* right = start() + GetBufferRelativeOffset(b.name_offset);
+ return memcmp(left, right, a.name_length) < 0;
+ };
+ std::stable_sort(sorted_exports.begin(), sorted_exports.end(), cmp_less);
+
+ auto it = sorted_exports.begin();
+ WasmExport* last = &*it++;
+ for (auto end = sorted_exports.end(); it != end; last = &*it++) {
+ DCHECK(!cmp_less(*it, *last)); // Vector must be sorted.
+ if (!cmp_less(*last, *it)) {
+ const byte* pc = start() + GetBufferRelativeOffset(it->name_offset);
+ errorf(pc, "Duplicate export name '%.*s' for functions %d and %d",
+ it->name_length, pc, last->index, it->index);
break;
}
- table = &module->function_tables[table_index];
- WasmInitExpr offset = consume_init_expr(module, kWasmI32);
- uint32_t num_elem =
- consume_count("number of elements", kV8MaxWasmTableEntries);
- std::vector<uint32_t> vector;
- module->table_inits.push_back({table_index, offset, vector});
- WasmTableInit* init = &module->table_inits.back();
- for (uint32_t j = 0; ok() && j < num_elem; j++) {
- WasmFunction* func = nullptr;
- uint32_t index = consume_func_index(module, &func);
- DCHECK_EQ(func != nullptr, ok());
- if (!func) break;
- DCHECK_EQ(index, func->func_index);
- init->entries.push_back(index);
- // Canonicalize signature indices during decoding.
- table->map.FindOrInsert(func->sig);
- }
}
+ }
+ }
- section_iter.advance();
+ void DecodeStartSection() {
+ WasmFunction* func;
+ const byte* pos = pc_;
+ module_->start_function_index = consume_func_index(module_.get(), &func);
+ if (func &&
+ (func->sig->parameter_count() > 0 || func->sig->return_count() > 0)) {
+ error(pos, "invalid start function: non-zero parameter or return count");
}
+ }
- // ===== Code section ====================================================
- if (section_iter.section_code() == kCodeSectionCode) {
- const byte* pos = pc_;
- uint32_t functions_count = consume_u32v("functions count");
- if (functions_count != module->num_declared_functions) {
- errorf(pos, "function body count %u mismatch (%u expected)",
- functions_count, module->num_declared_functions);
+ void DecodeElementSection() {
+ uint32_t element_count =
+ consume_count("element count", FLAG_wasm_max_table_size);
+ for (uint32_t i = 0; ok() && i < element_count; ++i) {
+ const byte* pos = pc();
+ uint32_t table_index = consume_u32v("table index");
+ if (table_index != 0) {
+ errorf(pos, "illegal table index %u != 0", table_index);
}
- for (uint32_t i = 0; ok() && i < functions_count; ++i) {
- WasmFunction* function =
- &module->functions[i + module->num_imported_functions];
- uint32_t size = consume_u32v("body size");
- function->code_start_offset = pc_offset();
- function->code_end_offset = pc_offset() + size;
- if (verify_functions) {
- ModuleBytesEnv module_env(module, nullptr,
- ModuleWireBytes(start_, end_));
- VerifyFunctionBody(i + module->num_imported_functions, &module_env,
- function);
- }
- consume_bytes(size, "function body");
+ WasmIndirectFunctionTable* table = nullptr;
+ if (table_index >= module_->function_tables.size()) {
+ errorf(pos, "out of bounds table index %u", table_index);
+ break;
+ }
+ table = &module_->function_tables[table_index];
+ WasmInitExpr offset = consume_init_expr(module_.get(), kWasmI32);
+ uint32_t num_elem =
+ consume_count("number of elements", kV8MaxWasmTableEntries);
+ std::vector<uint32_t> vector;
+ module_->table_inits.push_back({table_index, offset, vector});
+ WasmTableInit* init = &module_->table_inits.back();
+ for (uint32_t j = 0; j < num_elem; j++) {
+ WasmFunction* func = nullptr;
+ uint32_t index = consume_func_index(module_.get(), &func);
+ DCHECK_IMPLIES(ok(), func != nullptr);
+ if (!ok()) break;
+ DCHECK_EQ(index, func->func_index);
+ init->entries.push_back(index);
+ // Canonicalize signature indices during decoding.
+ table->map.FindOrInsert(func->sig);
}
- section_iter.advance();
}
+ }
- // ===== Data section ====================================================
- if (section_iter.section_code() == kDataSectionCode) {
- uint32_t data_segments_count =
- consume_count("data segments count", kV8MaxWasmDataSegments);
- module->data_segments.reserve(data_segments_count);
- for (uint32_t i = 0; ok() && i < data_segments_count; ++i) {
- if (!module->has_memory) {
- error("cannot load data without memory");
- break;
- }
- TRACE("DecodeDataSegment[%d] module+%d\n", i,
- static_cast<int>(pc_ - start_));
- module->data_segments.push_back({
- WasmInitExpr(), // dest_addr
- 0, // source_offset
- 0 // source_size
- });
- WasmDataSegment* segment = &module->data_segments.back();
- DecodeDataSegmentInModule(module, segment);
+ void DecodeCodeSection(bool verify_functions) {
+ const byte* pos = pc_;
+ uint32_t functions_count = consume_u32v("functions count");
+ if (functions_count != module_->num_declared_functions) {
+ errorf(pos, "function body count %u mismatch (%u expected)",
+ functions_count, module_->num_declared_functions);
+ }
+ for (uint32_t i = 0; ok() && i < functions_count; ++i) {
+ WasmFunction* function =
+ &module_->functions[i + module_->num_imported_functions];
+ uint32_t size = consume_u32v("body size");
+ uint32_t offset = pc_offset();
+ consume_bytes(size, "function body");
+ if (failed()) break;
+ function->code_start_offset = offset;
+ function->code_end_offset = offset + size;
+ if (verify_functions) {
+ ModuleBytesEnv module_env(module_.get(), nullptr,
+ ModuleWireBytes(start_, end_));
+ VerifyFunctionBody(module_->signature_zone->allocator(),
+ i + module_->num_imported_functions, &module_env,
+ function);
}
- section_iter.advance();
}
+ }
- // ===== Name section ====================================================
- if (section_iter.section_code() == kNameSectionCode) {
- // TODO(titzer): find a way to report name errors as warnings.
- // Use an inner decoder so that errors don't fail the outer decoder.
- Decoder inner(start_, pc_, end_);
- // Decode all name subsections.
- // Be lenient with their order.
- while (inner.ok() && inner.more()) {
- uint8_t name_type = inner.consume_u8("name type");
- if (name_type & 0x80) inner.error("name type if not varuint7");
-
- uint32_t name_payload_len = inner.consume_u32v("name payload length");
- if (!inner.checkAvailable(name_payload_len)) break;
-
- // Decode function names, ignore the rest.
- // Local names will be decoded when needed.
- if (name_type == NameSectionType::kFunction) {
- uint32_t functions_count = inner.consume_u32v("functions count");
-
- for (; inner.ok() && functions_count > 0; --functions_count) {
- uint32_t function_index = inner.consume_u32v("function index");
- uint32_t name_length = 0;
- uint32_t name_offset = wasm::consume_string(inner, &name_length,
- false, "function name");
- // Be lenient with errors in the name section: Ignore illegal
- // or out-of-order indexes and non-UTF8 names. You can even assign
- // to the same function multiple times (last valid one wins).
- if (inner.ok() && function_index < module->functions.size() &&
- unibrow::Utf8::Validate(inner.start() + name_offset,
- name_length)) {
- module->functions[function_index].name_offset = name_offset;
- module->functions[function_index].name_length = name_length;
- }
- }
- } else {
- inner.consume_bytes(name_payload_len, "name subsection payload");
- }
+ void DecodeDataSection() {
+ uint32_t data_segments_count =
+ consume_count("data segments count", kV8MaxWasmDataSegments);
+ module_->data_segments.reserve(data_segments_count);
+ for (uint32_t i = 0; ok() && i < data_segments_count; ++i) {
+ if (!module_->has_memory) {
+ error("cannot load data without memory");
+ break;
}
- // Skip the whole names section in the outer decoder.
- consume_bytes(section_iter.payload_length(), nullptr);
- section_iter.advance();
+ TRACE("DecodeDataSegment[%d] module+%d\n", i,
+ static_cast<int>(pc_ - start_));
+ module_->data_segments.push_back({
+ WasmInitExpr(), // dest_addr
+ 0, // source_offset
+ 0 // source_size
+ });
+ WasmDataSegment* segment = &module_->data_segments.back();
+ DecodeDataSegmentInModule(module_.get(), segment);
}
+ }
- // ===== Remaining sections ==============================================
- if (section_iter.more() && ok()) {
- errorf(pc(), "unexpected section: %s",
- SectionName(section_iter.section_code()));
+ void DecodeNameSection() {
+ // TODO(titzer): find a way to report name errors as warnings.
+ // Use an inner decoder so that errors don't fail the outer decoder.
+ Decoder inner(start_, pc_, end_, buffer_offset_);
+ // Decode all name subsections.
+ // Be lenient with their order.
+ while (inner.ok() && inner.more()) {
+ uint8_t name_type = inner.consume_u8("name type");
+ if (name_type & 0x80) inner.error("name type if not varuint7");
+
+ uint32_t name_payload_len = inner.consume_u32v("name payload length");
+ if (!inner.checkAvailable(name_payload_len)) break;
+
+ // Decode function names, ignore the rest.
+ // Local names will be decoded when needed.
+ if (name_type == NameSectionType::kFunction) {
+ uint32_t functions_count = inner.consume_u32v("functions count");
+
+ for (; inner.ok() && functions_count > 0; --functions_count) {
+ uint32_t function_index = inner.consume_u32v("function index");
+ uint32_t name_length = 0;
+ uint32_t name_offset =
+ wasm::consume_string(inner, &name_length, false, "function name");
+
+ // Be lenient with errors in the name section: Ignore illegal
+ // or out-of-order indexes and non-UTF8 names. You can even assign
+ // to the same function multiple times (last valid one wins).
+ if (inner.ok() && function_index < module_->functions.size() &&
+ unibrow::Utf8::Validate(
+ inner.start() + inner.GetBufferRelativeOffset(name_offset),
+ name_length)) {
+ module_->functions[function_index].name_offset = name_offset;
+ module_->functions[function_index].name_length = name_length;
+ }
+ }
+ } else {
+ inner.consume_bytes(name_payload_len, "name subsection payload");
+ }
}
+ // Skip the whole names section in the outer decoder.
+ consume_bytes(static_cast<uint32_t>(end_ - start_), nullptr);
+ }
+ ModuleResult FinishDecoding(bool verify_functions = true) {
if (ok()) {
- CalculateGlobalOffsets(module);
+ CalculateGlobalOffsets(module_.get());
}
- const WasmModule* finished_module = module;
- ModuleResult result = toResult(finished_module);
+ ModuleResult result = toResult(std::move(module_));
if (verify_functions && result.ok()) {
// Copy error code and location.
result.MoveErrorFrom(intermediate_result_);
@@ -726,29 +772,62 @@ class ModuleDecoder : public Decoder {
return result;
}
+ // Decodes an entire module.
+ ModuleResult DecodeModule(Isolate* isolate, bool verify_functions = true) {
+ StartDecoding(isolate);
+ uint32_t offset = 0;
+ DecodeModuleHeader(Vector<const uint8_t>(start(), end() - start()), offset);
+ if (failed()) {
+ return FinishDecoding(verify_functions);
+ }
+ // Size of the module header.
+ offset += 8;
+ Decoder decoder(start_ + offset, end_, offset);
+
+ WasmSectionIterator section_iter(decoder);
+
+ while (ok() && section_iter.more()) {
+ // Shift the offset by the section header length
+ offset += section_iter.payload_start() - section_iter.section_start();
+ if (section_iter.section_code() != SectionCode::kUnknownSectionCode) {
+ DecodeSection(section_iter.section_code(), section_iter.payload(),
+ offset, verify_functions);
+ }
+ // Shift the offset by the remaining section payload
+ offset += section_iter.payload_length();
+ section_iter.advance(true);
+ }
+
+ if (decoder.failed()) {
+ return decoder.toResult<std::unique_ptr<WasmModule>>(nullptr);
+ }
+
+ return FinishDecoding(verify_functions);
+ }
+
// Decodes a single anonymous function starting at {start_}.
- FunctionResult DecodeSingleFunction(ModuleBytesEnv* module_env,
- WasmFunction* function) {
+ FunctionResult DecodeSingleFunction(Zone* zone, ModuleBytesEnv* module_env,
+ std::unique_ptr<WasmFunction> function) {
pc_ = start_;
- function->sig = consume_sig(); // read signature
- function->name_offset = 0; // ---- name
- function->name_length = 0; // ---- name length
- function->code_start_offset = off(pc_); // ---- code start
- function->code_end_offset = off(end_); // ---- code end
+ function->sig = consume_sig(zone); // read signature
+ function->name_offset = 0; // ---- name
+ function->name_length = 0; // ---- name length
+ function->code_start_offset = off(pc_); // ---- code start
+ function->code_end_offset = off(end_); // ---- code end
- if (ok()) VerifyFunctionBody(0, module_env, function);
+ if (ok())
+ VerifyFunctionBody(zone->allocator(), 0, module_env, function.get());
- FunctionResult result;
+ FunctionResult result(std::move(function));
// Copy error code and location.
result.MoveErrorFrom(intermediate_result_);
- result.val = function;
return result;
}
// Decodes a single function signature at {start}.
- FunctionSig* DecodeFunctionSignature(const byte* start) {
+ FunctionSig* DecodeFunctionSignature(Zone* zone, const byte* start) {
pc_ = start;
- FunctionSig* result = consume_sig();
+ FunctionSig* result = consume_sig(zone);
return ok() ? result : nullptr;
}
@@ -758,11 +837,20 @@ class ModuleDecoder : public Decoder {
}
private:
- Zone* module_zone_;
+ std::unique_ptr<WasmModule> module_;
+ // The type section is the first section in a module.
+ uint8_t next_section_ = kFirstSectionInModule;
+ // We store next_section_ as uint8_t instead of SectionCode so that we can
+ // increment it. This static_assert should make sure that SectionCode does not
+ // get bigger than uint8_t accidentially.
+ static_assert(sizeof(ModuleDecoder::next_section_) == sizeof(SectionCode),
+ "type mismatch");
Result<bool> intermediate_result_;
ModuleOrigin origin_;
- uint32_t off(const byte* ptr) { return static_cast<uint32_t>(ptr - start_); }
+ uint32_t off(const byte* ptr) {
+ return static_cast<uint32_t>(ptr - start_) + buffer_offset_;
+ }
bool AddTable(WasmModule* module) {
if (module->function_tables.size() > 0) {
@@ -829,13 +917,13 @@ class ModuleDecoder : public Decoder {
expect_u8("linear memory index", 0);
segment->dest_addr = consume_init_expr(module, kWasmI32);
segment->source_size = consume_u32v("source size");
- segment->source_offset = static_cast<uint32_t>(pc_ - start_);
+ segment->source_offset = pc_offset();
- // Validate the data is in the module.
- uint32_t module_limit = static_cast<uint32_t>(end_ - start_);
- if (!IsWithinLimit(module_limit, segment->source_offset,
+ // Validate the data is in the decoder buffer.
+ uint32_t limit = static_cast<uint32_t>(end_ - start_);
+ if (!IsWithinLimit(limit, GetBufferRelativeOffset(segment->source_offset),
segment->source_size)) {
- error(start, "segment out of bounds of module");
+ error(start, "segment out of bounds of the section");
}
consume_bytes(segment->source_size, "segment data");
@@ -859,24 +947,24 @@ class ModuleDecoder : public Decoder {
}
// Verifies the body (code) of a given function.
- void VerifyFunctionBody(uint32_t func_num, ModuleBytesEnv* menv,
- WasmFunction* function) {
+ void VerifyFunctionBody(AccountingAllocator* allocator, uint32_t func_num,
+ ModuleBytesEnv* menv, WasmFunction* function) {
WasmFunctionName func_name(function,
menv->wire_bytes.GetNameOrNull(function));
if (FLAG_trace_wasm_decoder || FLAG_trace_wasm_decode_time) {
OFStream os(stdout);
os << "Verifying WASM function " << func_name << std::endl;
}
- FunctionBody body = {function->sig, start_,
- start_ + function->code_start_offset,
- start_ + function->code_end_offset};
+ FunctionBody body = {
+ function->sig, start_,
+ start_ + GetBufferRelativeOffset(function->code_start_offset),
+ start_ + GetBufferRelativeOffset(function->code_end_offset)};
DecodeResult result = VerifyWasmCode(
- module_zone_->allocator(),
- menv == nullptr ? nullptr : menv->module_env.module, body);
+ allocator, menv == nullptr ? nullptr : menv->module_env.module, body);
if (result.failed()) {
// Wrap the error message from the function decoder.
std::ostringstream str;
- str << "in function " << func_name << ": " << result.error_msg;
+ str << "in function " << func_name << ": " << result.error_msg();
// Set error code and location, if this is the first error.
if (intermediate_result_.ok()) {
@@ -1098,7 +1186,7 @@ class ModuleDecoder : public Decoder {
}
// Parses a type entry, which is currently limited to functions only.
- FunctionSig* consume_sig() {
+ FunctionSig* consume_sig(Zone* zone) {
if (!expect_u8("type form", kWasmFunctionTypeForm)) return nullptr;
// parse parameter types
uint32_t param_count =
@@ -1125,80 +1213,129 @@ class ModuleDecoder : public Decoder {
if (failed()) return nullptr;
// FunctionSig stores the return types first.
- ValueType* buffer =
- module_zone_->NewArray<ValueType>(param_count + return_count);
+ ValueType* buffer = zone->NewArray<ValueType>(param_count + return_count);
uint32_t b = 0;
for (uint32_t i = 0; i < return_count; ++i) buffer[b++] = returns[i];
for (uint32_t i = 0; i < param_count; ++i) buffer[b++] = params[i];
- return new (module_zone_) FunctionSig(return_count, param_count, buffer);
+ return new (zone) FunctionSig(return_count, param_count, buffer);
}
};
-} // namespace
-
-ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start,
- const byte* module_end, bool verify_functions,
- ModuleOrigin origin) {
- HistogramTimerScope wasm_decode_module_time_scope(
- IsWasm(origin) ? isolate->counters()->wasm_decode_wasm_module_time()
- : isolate->counters()->wasm_decode_asm_module_time());
+ModuleResult DecodeWasmModuleInternal(Isolate* isolate,
+ const byte* module_start,
+ const byte* module_end,
+ bool verify_functions,
+ ModuleOrigin origin, bool is_sync) {
size_t size = module_end - module_start;
if (module_start > module_end) return ModuleResult::Error("start > end");
if (size >= kV8MaxWasmModuleSize)
return ModuleResult::Error("size > maximum module size: %zu", size);
// TODO(bradnelson): Improve histogram handling of size_t.
- (IsWasm(origin) ? isolate->counters()->wasm_wasm_module_size_bytes()
- : isolate->counters()->wasm_asm_module_size_bytes())
- ->AddSample(static_cast<int>(size));
+ if (is_sync) {
+ // TODO(karlschimpf): Make this work when asynchronous.
+ // https://bugs.chromium.org/p/v8/issues/detail?id=6361
+ (IsWasm(origin) ? isolate->counters()->wasm_wasm_module_size_bytes()
+ : isolate->counters()->wasm_asm_module_size_bytes())
+ ->AddSample(static_cast<int>(size));
+ }
// Signatures are stored in zone memory, which have the same lifetime
// as the {module}.
- Zone* zone = new Zone(isolate->allocator(), ZONE_NAME);
- ModuleDecoder decoder(zone, module_start, module_end, origin);
- ModuleResult result = decoder.DecodeModule(verify_functions);
+ ModuleDecoder decoder(module_start, module_end, origin);
+ ModuleResult result = decoder.DecodeModule(isolate, verify_functions);
// TODO(bradnelson): Improve histogram handling of size_t.
// TODO(titzer): this isn't accurate, since it doesn't count the data
// allocated on the C++ heap.
// https://bugs.chromium.org/p/chromium/issues/detail?id=657320
- (IsWasm(origin)
- ? isolate->counters()->wasm_decode_wasm_module_peak_memory_bytes()
- : isolate->counters()->wasm_decode_asm_module_peak_memory_bytes())
- ->AddSample(static_cast<int>(zone->allocation_size()));
+ if (is_sync && result.ok()) {
+ // TODO(karlschimpf): Make this work when asynchronous.
+ // https://bugs.chromium.org/p/v8/issues/detail?id=6361
+ (IsWasm(origin)
+ ? isolate->counters()->wasm_decode_wasm_module_peak_memory_bytes()
+ : isolate->counters()->wasm_decode_asm_module_peak_memory_bytes())
+ ->AddSample(
+ static_cast<int>(result.val->signature_zone->allocation_size()));
+ }
return result;
}
+} // namespace
+
+ModuleResult DecodeWasmModule(Isolate* isolate, const byte* module_start,
+ const byte* module_end, bool verify_functions,
+ ModuleOrigin origin, bool is_sync) {
+ if (is_sync) {
+ // TODO(karlschimpf): Make this work when asynchronous.
+ // https://bugs.chromium.org/p/v8/issues/detail?id=6361
+ HistogramTimerScope wasm_decode_module_time_scope(
+ IsWasm(origin) ? isolate->counters()->wasm_decode_wasm_module_time()
+ : isolate->counters()->wasm_decode_asm_module_time());
+ return DecodeWasmModuleInternal(isolate, module_start, module_end,
+ verify_functions, origin, true);
+ }
+ return DecodeWasmModuleInternal(isolate, module_start, module_end,
+ verify_functions, origin, false);
+}
+
FunctionSig* DecodeWasmSignatureForTesting(Zone* zone, const byte* start,
const byte* end) {
- ModuleDecoder decoder(zone, start, end, kWasmOrigin);
- return decoder.DecodeFunctionSignature(start);
+ ModuleDecoder decoder(start, end, kWasmOrigin);
+ return decoder.DecodeFunctionSignature(zone, start);
}
WasmInitExpr DecodeWasmInitExprForTesting(const byte* start, const byte* end) {
AccountingAllocator allocator;
- Zone zone(&allocator, ZONE_NAME);
- ModuleDecoder decoder(&zone, start, end, kWasmOrigin);
+ ModuleDecoder decoder(start, end, kWasmOrigin);
return decoder.DecodeInitExpr(start);
}
-FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
- ModuleBytesEnv* module_env,
- const byte* function_start,
- const byte* function_end) {
- bool is_wasm = module_env->module_env.is_wasm();
- HistogramTimerScope wasm_decode_function_time_scope(
- is_wasm ? isolate->counters()->wasm_decode_wasm_function_time()
- : isolate->counters()->wasm_decode_asm_function_time());
+namespace {
+
+FunctionResult DecodeWasmFunctionInternal(Isolate* isolate, Zone* zone,
+ ModuleBytesEnv* module_env,
+ const byte* function_start,
+ const byte* function_end,
+ bool is_sync) {
size_t size = function_end - function_start;
if (function_start > function_end)
return FunctionResult::Error("start > end");
if (size > kV8MaxWasmFunctionSize)
return FunctionResult::Error("size > maximum function size: %zu", size);
- (is_wasm ? isolate->counters()->wasm_wasm_function_size_bytes()
- : isolate->counters()->wasm_asm_function_size_bytes())
- ->AddSample(static_cast<int>(size));
- WasmFunction* function = new WasmFunction();
- ModuleDecoder decoder(zone, function_start, function_end, kWasmOrigin);
- return decoder.DecodeSingleFunction(module_env, function);
+ if (is_sync) {
+ // TODO(karlschimpf): Make this work when asynchronous.
+ // https://bugs.chromium.org/p/v8/issues/detail?id=6361
+ bool is_wasm = module_env->module_env.is_wasm();
+ (is_wasm ? isolate->counters()->wasm_wasm_function_size_bytes()
+ : isolate->counters()->wasm_asm_function_size_bytes())
+ ->AddSample(static_cast<int>(size));
+ }
+ ModuleDecoder decoder(function_start, function_end, kWasmOrigin);
+ return decoder.DecodeSingleFunction(
+ zone, module_env, std::unique_ptr<WasmFunction>(new WasmFunction()));
+}
+
+} // namespace
+
+FunctionResult DecodeWasmFunction(Isolate* isolate, Zone* zone,
+ ModuleBytesEnv* module_env,
+ const byte* function_start,
+ const byte* function_end, bool is_sync) {
+ if (is_sync) {
+ // TODO(karlschimpf): Make this work when asynchronous.
+ // https://bugs.chromium.org/p/v8/issues/detail?id=6361
+ size_t size = function_end - function_start;
+ bool is_wasm = module_env->module_env.is_wasm();
+ (is_wasm ? isolate->counters()->wasm_wasm_function_size_bytes()
+ : isolate->counters()->wasm_asm_function_size_bytes())
+ ->AddSample(static_cast<int>(size));
+ HistogramTimerScope wasm_decode_function_time_scope(
+ is_wasm ? isolate->counters()->wasm_decode_wasm_function_time()
+ : isolate->counters()->wasm_decode_asm_function_time());
+ return DecodeWasmFunctionInternal(isolate, zone, module_env, function_start,
+ function_end, true);
+ }
+ return DecodeWasmFunctionInternal(isolate, zone, module_env, function_start,
+ function_end, false);
}
AsmJsOffsetsResult DecodeAsmJsOffsets(const byte* tables_start,
diff --git a/deps/v8/src/wasm/module-decoder.h b/deps/v8/src/wasm/module-decoder.h
index b29dfb196b..91169d8eca 100644
--- a/deps/v8/src/wasm/module-decoder.h
+++ b/deps/v8/src/wasm/module-decoder.h
@@ -20,7 +20,7 @@ const uint8_t kWasmFunctionTypeForm = 0x60;
const uint8_t kWasmAnyFunctionTypeForm = 0x70;
const uint8_t kResizableMaximumFlag = 1;
-enum SectionCode {
+enum SectionCode : int8_t {
kUnknownSectionCode = 0, // code for unknown sections
kTypeSectionCode = 1, // Function signature declarations
kImportSectionCode = 2, // Import declarations
@@ -34,6 +34,9 @@ enum SectionCode {
kCodeSectionCode = 10, // Function code
kDataSectionCode = 11, // Data segments
kNameSectionCode = 12, // Name section (encoded as a string)
+
+ // Helper values
+ kFirstSectionInModule = kTypeSectionCode,
};
enum NameSectionType : uint8_t { kFunction = 1, kLocal = 2 };
@@ -44,8 +47,8 @@ inline bool IsValidSectionCode(uint8_t byte) {
const char* SectionName(SectionCode code);
-typedef Result<const WasmModule*> ModuleResult;
-typedef Result<WasmFunction*> FunctionResult;
+typedef Result<std::unique_ptr<WasmModule>> ModuleResult;
+typedef Result<std::unique_ptr<WasmFunction>> FunctionResult;
typedef std::vector<std::pair<int, int>> FunctionOffsets;
typedef Result<FunctionOffsets> FunctionOffsetsResult;
struct AsmJsOffsetEntry {
@@ -57,11 +60,9 @@ typedef std::vector<std::vector<AsmJsOffsetEntry>> AsmJsOffsets;
typedef Result<AsmJsOffsets> AsmJsOffsetsResult;
// Decodes the bytes of a WASM module between {module_start} and {module_end}.
-V8_EXPORT_PRIVATE ModuleResult DecodeWasmModule(Isolate* isolate,
- const byte* module_start,
- const byte* module_end,
- bool verify_functions,
- ModuleOrigin origin);
+V8_EXPORT_PRIVATE ModuleResult DecodeWasmModule(
+ Isolate* isolate, const byte* module_start, const byte* module_end,
+ bool verify_functions, ModuleOrigin origin, bool is_sync = true);
// Exposed for testing. Decodes a single function signature, allocating it
// in the given zone. Returns {nullptr} upon failure.
@@ -71,11 +72,9 @@ V8_EXPORT_PRIVATE FunctionSig* DecodeWasmSignatureForTesting(Zone* zone,
// Decodes the bytes of a WASM function between
// {function_start} and {function_end}.
-V8_EXPORT_PRIVATE FunctionResult DecodeWasmFunction(Isolate* isolate,
- Zone* zone,
- ModuleBytesEnv* env,
- const byte* function_start,
- const byte* function_end);
+V8_EXPORT_PRIVATE FunctionResult DecodeWasmFunction(
+ Isolate* isolate, Zone* zone, ModuleBytesEnv* env,
+ const byte* function_start, const byte* function_end, bool is_sync = true);
V8_EXPORT_PRIVATE WasmInitExpr DecodeWasmInitExprForTesting(const byte* start,
const byte* end);
diff --git a/deps/v8/src/wasm/streaming-decoder.cc b/deps/v8/src/wasm/streaming-decoder.cc
new file mode 100644
index 0000000000..2772cd5945
--- /dev/null
+++ b/deps/v8/src/wasm/streaming-decoder.cc
@@ -0,0 +1,379 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/streaming-decoder.h"
+
+#include "src/objects-inl.h"
+
+#include "src/handles.h"
+#include "src/wasm/decoder.h"
+#include "src/wasm/leb-helper.h"
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-objects.h"
+#include "src/wasm/wasm-result.h"
+
+#include "src/objects/descriptor-array.h"
+#include "src/objects/dictionary.h"
+
+using namespace v8::internal;
+using namespace v8::internal::wasm;
+
+void StreamingDecoder::OnBytesReceived(Vector<const uint8_t> bytes) {
+ size_t current = 0;
+ while (decoder()->ok() && current < bytes.size()) {
+ size_t num_bytes =
+ state_->ReadBytes(this, bytes.SubVector(current, bytes.size()));
+ current += num_bytes;
+ if (state_->is_finished()) {
+ state_ = state_->Next(this);
+ }
+ }
+ total_size_ += bytes.size();
+}
+
+size_t StreamingDecoder::DecodingState::ReadBytes(StreamingDecoder* streaming,
+ Vector<const uint8_t> bytes) {
+ size_t num_bytes = std::min(bytes.size(), remaining());
+ memcpy(buffer() + offset(), &bytes.first(), num_bytes);
+ set_offset(offset() + num_bytes);
+ return num_bytes;
+}
+
+MaybeHandle<WasmModuleObject> StreamingDecoder::Finish() {
+ UNIMPLEMENTED();
+ return Handle<WasmModuleObject>::null();
+}
+
+bool StreamingDecoder::FinishForTesting() {
+ return decoder_.ok() && state_->is_finishing_allowed();
+}
+
+// An abstract class to share code among the states which decode VarInts. This
+// class takes over the decoding of the VarInt and then calls the actual decode
+// code with the decoded value.
+class StreamingDecoder::DecodeVarInt32 : public DecodingState {
+ public:
+ explicit DecodeVarInt32(size_t max_value) : max_value_(max_value) {}
+ uint8_t* buffer() override { return byte_buffer_; }
+ size_t size() const override { return kMaxVarInt32Size; }
+
+ size_t ReadBytes(StreamingDecoder* streaming,
+ Vector<const uint8_t> bytes) override;
+
+ std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
+
+ virtual std::unique_ptr<DecodingState> NextWithValue(
+ StreamingDecoder* streaming) = 0;
+
+ size_t value() const { return value_; }
+ size_t bytes_needed() const { return bytes_needed_; }
+
+ private:
+ uint8_t byte_buffer_[kMaxVarInt32Size];
+ // The maximum valid value decoded in this state. {Next} returns an error if
+ // this value is exceeded.
+ size_t max_value_;
+ size_t value_ = 0;
+ size_t bytes_needed_ = 0;
+};
+
+class StreamingDecoder::DecodeModuleHeader : public DecodingState {
+ public:
+ size_t size() const override { return kModuleHeaderSize; }
+ uint8_t* buffer() override { return byte_buffer_; }
+
+ std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
+
+ private:
+ // Checks if the magic bytes of the module header are correct.
+ void CheckHeader(Decoder* decoder);
+
+ // The size of the module header.
+ static constexpr size_t kModuleHeaderSize = 8;
+ uint8_t byte_buffer_[kModuleHeaderSize];
+};
+
+class StreamingDecoder::DecodeSectionID : public DecodingState {
+ public:
+ size_t size() const override { return 1; }
+ uint8_t* buffer() override { return &id_; }
+ bool is_finishing_allowed() const override { return true; }
+
+ uint8_t id() const { return id_; }
+
+ std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
+
+ private:
+ uint8_t id_ = 0;
+};
+
+class StreamingDecoder::DecodeSectionLength : public DecodeVarInt32 {
+ public:
+ explicit DecodeSectionLength(uint8_t id)
+ : DecodeVarInt32(kV8MaxWasmModuleSize), section_id_(id) {}
+
+ uint8_t section_id() const { return section_id_; }
+
+ std::unique_ptr<DecodingState> NextWithValue(
+ StreamingDecoder* streaming) override;
+
+ private:
+ uint8_t section_id_;
+};
+
+class StreamingDecoder::DecodeSectionPayload : public DecodingState {
+ public:
+ explicit DecodeSectionPayload(SectionBuffer* section_buffer)
+ : section_buffer_(section_buffer) {}
+
+ size_t size() const override { return section_buffer_->payload_length(); }
+ uint8_t* buffer() override {
+ return section_buffer_->bytes() + section_buffer_->payload_offset();
+ }
+
+ std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
+
+ private:
+ SectionBuffer* section_buffer_;
+};
+
+class StreamingDecoder::DecodeNumberOfFunctions : public DecodeVarInt32 {
+ public:
+ explicit DecodeNumberOfFunctions(SectionBuffer* section_buffer)
+ : DecodeVarInt32(kV8MaxWasmFunctions), section_buffer_(section_buffer) {}
+
+ SectionBuffer* section_buffer() const { return section_buffer_; }
+
+ std::unique_ptr<DecodingState> NextWithValue(
+ StreamingDecoder* streaming) override;
+
+ private:
+ SectionBuffer* section_buffer_;
+};
+
+class StreamingDecoder::DecodeFunctionLength : public DecodeVarInt32 {
+ public:
+ explicit DecodeFunctionLength(SectionBuffer* section_buffer,
+ size_t buffer_offset,
+ size_t num_remaining_functions)
+ : DecodeVarInt32(kV8MaxWasmFunctionSize),
+ section_buffer_(section_buffer),
+ buffer_offset_(buffer_offset),
+ // We are reading a new function, so one function less is remaining.
+ num_remaining_functions_(num_remaining_functions - 1) {
+ DCHECK_GT(num_remaining_functions, 0);
+ }
+
+ size_t num_remaining_functions() const { return num_remaining_functions_; }
+ size_t buffer_offset() const { return buffer_offset_; }
+ SectionBuffer* section_buffer() const { return section_buffer_; }
+
+ std::unique_ptr<DecodingState> NextWithValue(
+ StreamingDecoder* streaming) override;
+
+ private:
+ SectionBuffer* section_buffer_;
+ size_t buffer_offset_;
+ size_t num_remaining_functions_;
+};
+
+class StreamingDecoder::DecodeFunctionBody : public DecodingState {
+ public:
+ explicit DecodeFunctionBody(SectionBuffer* section_buffer,
+ size_t buffer_offset, size_t function_length,
+ size_t num_remaining_functions)
+ : section_buffer_(section_buffer),
+ buffer_offset_(buffer_offset),
+ size_(function_length),
+ num_remaining_functions_(num_remaining_functions) {}
+
+ size_t size() const override { return size_; }
+ uint8_t* buffer() override {
+ return section_buffer_->bytes() + buffer_offset_;
+ }
+ size_t num_remaining_functions() const { return num_remaining_functions_; }
+ size_t buffer_offset() const { return buffer_offset_; }
+ SectionBuffer* section_buffer() const { return section_buffer_; }
+
+ std::unique_ptr<DecodingState> Next(StreamingDecoder* streaming) override;
+
+ private:
+ SectionBuffer* section_buffer_;
+ size_t buffer_offset_;
+ size_t size_;
+ size_t num_remaining_functions_;
+};
+
+size_t StreamingDecoder::DecodeVarInt32::ReadBytes(
+ StreamingDecoder* streaming, Vector<const uint8_t> bytes) {
+ size_t bytes_read = std::min(bytes.size(), remaining());
+ memcpy(buffer() + offset(), &bytes.first(), bytes_read);
+ streaming->decoder()->Reset(buffer(), buffer() + offset() + bytes_read);
+ value_ = streaming->decoder()->consume_i32v();
+ // The number of bytes we actually needed to read.
+ DCHECK_GT(streaming->decoder()->pc(), buffer());
+ bytes_needed_ = static_cast<size_t>(streaming->decoder()->pc() - buffer());
+
+ if (streaming->decoder()->failed()) {
+ if (offset() + bytes_read < size()) {
+ // We did not decode a full buffer, so we ignore errors. Maybe the
+ // decoding will succeed when we have more bytes.
+ streaming->decoder()->Reset(nullptr, nullptr);
+ }
+ set_offset(offset() + bytes_read);
+ return bytes_read;
+ } else {
+ DCHECK_GT(bytes_needed_, offset());
+ size_t result = bytes_needed_ - offset();
+ // We read all the bytes we needed.
+ set_offset(size());
+ return result;
+ }
+}
+
+std::unique_ptr<StreamingDecoder::DecodingState>
+StreamingDecoder::DecodeVarInt32::Next(StreamingDecoder* streaming) {
+ if (streaming->decoder()->failed()) {
+ return std::unique_ptr<DecodingState>(nullptr);
+ }
+ if (value() > max_value_) {
+ streaming->decoder()->errorf(buffer(), "size > maximum function size: %zu",
+ value());
+ return std::unique_ptr<DecodingState>(nullptr);
+ }
+
+ return NextWithValue(streaming);
+}
+
+#define BYTES(x) (x & 0xff), (x >> 8) & 0xff, (x >> 16) & 0xff, (x >> 24) & 0xff
+// Decode the module header. The error state of the decoder stores the result.
+void StreamingDecoder::DecodeModuleHeader::CheckHeader(Decoder* decoder) {
+ // TODO(ahaas): Share code with the module-decoder.
+ decoder->Reset(buffer(), buffer() + size());
+ uint32_t magic_word = decoder->consume_u32("wasm magic");
+ if (magic_word != kWasmMagic) {
+ decoder->errorf(buffer(),
+ "expected magic word %02x %02x %02x %02x, "
+ "found %02x %02x %02x %02x",
+ BYTES(kWasmMagic), BYTES(magic_word));
+ }
+ uint32_t magic_version = decoder->consume_u32("wasm version");
+ if (magic_version != kWasmVersion) {
+ decoder->errorf(buffer(),
+ "expected version %02x %02x %02x %02x, "
+ "found %02x %02x %02x %02x",
+ BYTES(kWasmVersion), BYTES(magic_version));
+ }
+}
+#undef BYTES
+
+std::unique_ptr<StreamingDecoder::DecodingState>
+StreamingDecoder::DecodeModuleHeader::Next(StreamingDecoder* streaming) {
+ CheckHeader(streaming->decoder());
+ return std::unique_ptr<DecodingState>(new DecodeSectionID());
+}
+
+std::unique_ptr<StreamingDecoder::DecodingState>
+StreamingDecoder::DecodeSectionID::Next(StreamingDecoder* streaming) {
+ return std::unique_ptr<DecodingState>(new DecodeSectionLength(id()));
+}
+
+std::unique_ptr<StreamingDecoder::DecodingState>
+StreamingDecoder::DecodeSectionLength::NextWithValue(
+ StreamingDecoder* streaming) {
+ SectionBuffer* buf = streaming->CreateNewBuffer(
+ section_id(), value(),
+ Vector<const uint8_t>(buffer(), static_cast<int>(bytes_needed())));
+ if (value() == 0) {
+ // There is no payload, we go to the next section immediately.
+ return std::unique_ptr<DecodingState>(new DecodeSectionID());
+ } else if (section_id() == SectionCode::kCodeSectionCode) {
+ // We reached the code section. All functions of the code section are put
+ // into the same SectionBuffer.
+ return std::unique_ptr<DecodingState>(new DecodeNumberOfFunctions(buf));
+ } else {
+ return std::unique_ptr<DecodingState>(new DecodeSectionPayload(buf));
+ }
+}
+
+std::unique_ptr<StreamingDecoder::DecodingState>
+StreamingDecoder::DecodeSectionPayload::Next(StreamingDecoder* streaming) {
+ return std::unique_ptr<DecodingState>(new DecodeSectionID());
+}
+
+std::unique_ptr<StreamingDecoder::DecodingState>
+StreamingDecoder::DecodeNumberOfFunctions::NextWithValue(
+ StreamingDecoder* streaming) {
+ // Copy the bytes we read into the section buffer.
+ if (section_buffer_->payload_length() >= bytes_needed()) {
+ memcpy(section_buffer_->bytes() + section_buffer_->payload_offset(),
+ buffer(), bytes_needed());
+ } else {
+ streaming->decoder()->error("Invalid code section length");
+ return std::unique_ptr<DecodingState>(new DecodeSectionID());
+ }
+
+ // {value} is the number of functions.
+ if (value() > 0) {
+ return std::unique_ptr<DecodingState>(new DecodeFunctionLength(
+ section_buffer(), section_buffer()->payload_offset() + bytes_needed(),
+ value()));
+ } else {
+ return std::unique_ptr<DecodingState>(new DecodeSectionID());
+ }
+}
+
+std::unique_ptr<StreamingDecoder::DecodingState>
+StreamingDecoder::DecodeFunctionLength::NextWithValue(
+ StreamingDecoder* streaming) {
+ // Copy the bytes we read into the section buffer.
+ if (section_buffer_->length() >= buffer_offset_ + bytes_needed()) {
+ memcpy(section_buffer_->bytes() + buffer_offset_, buffer(), bytes_needed());
+ } else {
+ streaming->decoder()->error("Invalid code section length");
+ return std::unique_ptr<DecodingState>(new DecodeSectionID());
+ }
+
+ // {value} is the length of the function.
+ if (value() == 0) {
+ streaming->decoder()->errorf(buffer(), "Invalid function length (0)");
+ return std::unique_ptr<DecodingState>(nullptr);
+ } else if (buffer_offset() + bytes_needed() + value() >
+ section_buffer()->length()) {
+ streaming->decoder()->errorf(buffer(), "not enough code section bytes");
+ return std::unique_ptr<DecodingState>(nullptr);
+ }
+
+ return std::unique_ptr<DecodingState>(
+ new DecodeFunctionBody(section_buffer(), buffer_offset() + bytes_needed(),
+ value(), num_remaining_functions()));
+}
+
+std::unique_ptr<StreamingDecoder::DecodingState>
+StreamingDecoder::DecodeFunctionBody::Next(StreamingDecoder* streaming) {
+ // TODO(ahaas): Start compilation of the function here.
+ if (num_remaining_functions() != 0) {
+ return std::unique_ptr<DecodingState>(new DecodeFunctionLength(
+ section_buffer(), buffer_offset() + size(), num_remaining_functions()));
+ } else {
+ if (buffer_offset() + size() != section_buffer()->length()) {
+ streaming->decoder()->Reset(
+ section_buffer()->bytes(),
+ section_buffer()->bytes() + section_buffer()->length());
+ streaming->decoder()->errorf(
+ section_buffer()->bytes() + buffer_offset() + size(),
+ "not all code section bytes were used");
+ return std::unique_ptr<DecodingState>(nullptr);
+ }
+ return std::unique_ptr<DecodingState>(new DecodeSectionID());
+ }
+}
+
+StreamingDecoder::StreamingDecoder(Isolate* isolate)
+ : isolate_(isolate),
+ // A module always starts with a module header.
+ state_(new DecodeModuleHeader()),
+ decoder_(nullptr, nullptr) {
+ USE(isolate_);
+}
diff --git a/deps/v8/src/wasm/streaming-decoder.h b/deps/v8/src/wasm/streaming-decoder.h
new file mode 100644
index 0000000000..349e013e6c
--- /dev/null
+++ b/deps/v8/src/wasm/streaming-decoder.h
@@ -0,0 +1,152 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_STREAMING_DECODER_H_
+#define V8_WASM_STREAMING_DECODER_H_
+
+#include <vector>
+#include "src/isolate.h"
+#include "src/wasm/decoder.h"
+#include "src/wasm/wasm-objects.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+// The StreamingDecoder takes a sequence of byte arrays, each received by a call
+// of {OnBytesReceived}, and extracts the bytes which belong to section payloads
+// and function bodies.
+class V8_EXPORT_PRIVATE StreamingDecoder {
+ public:
+ explicit StreamingDecoder(Isolate* isolate);
+
+ // The buffer passed into OnBytesReceived is owned by the caller.
+ void OnBytesReceived(Vector<const uint8_t> bytes);
+
+ // Finishes the stream and returns compiled WasmModuleObject.
+ MaybeHandle<WasmModuleObject> Finish();
+
+ // Finishes the streaming and returns true if no error was detected.
+ bool FinishForTesting();
+
+ private:
+ // The SectionBuffer is the data object for the content of a single section.
+ // It stores all bytes of the section (including section id and section
+ // length), and the offset where the actual payload starts.
+ class SectionBuffer {
+ public:
+ // id: The section id.
+ // payload_length: The length of the payload.
+ // length_bytes: The section length, as it is encoded in the module bytes.
+ SectionBuffer(uint8_t id, size_t payload_length,
+ Vector<const uint8_t> length_bytes)
+ : // ID + length + payload
+ length_(1 + length_bytes.length() + payload_length),
+ bytes_(new uint8_t[length_]),
+ payload_offset_(1 + length_bytes.length()) {
+ bytes_[0] = id;
+ memcpy(bytes_.get() + 1, &length_bytes.first(), length_bytes.length());
+ }
+ uint8_t* bytes() const { return bytes_.get(); }
+ size_t length() const { return length_; }
+ size_t payload_offset() const { return payload_offset_; }
+ size_t payload_length() const { return length_ - payload_offset_; }
+
+ private:
+ size_t length_;
+ std::unique_ptr<uint8_t[]> bytes_;
+ size_t payload_offset_;
+ };
+
+ // The decoding of a stream of wasm module bytes is organized in states. Each
+ // state provides a buffer to store the bytes required for the current state,
+ // information on how many bytes have already been received, how many bytes
+ // are needed, and a {Next} function which starts the next state once all
+ // bytes of the current state were received.
+ //
+ // The states change according to the following state diagram:
+ //
+ // Start
+ // |
+ // |
+ // v
+ // DecodeModuleHeader
+ // | _________________________________________
+ // | | |
+ // v v |
+ // DecodeSectionID --> DecodeSectionLength --> DecodeSectionPayload
+ // A |
+ // | | (if the section id == code)
+ // | v
+ // | DecodeNumberOfFunctions -- > DecodeFunctionLength
+ // | A |
+ // | | |
+ // | (after all functions were read) | v
+ // ------------------------------------- DecodeFunctionBody
+ //
+ class DecodingState {
+ public:
+ virtual ~DecodingState() = default;
+
+ // Reads the bytes for the current state and returns the number of read
+ // bytes.
+ virtual size_t ReadBytes(StreamingDecoder* streaming,
+ Vector<const uint8_t> bytes);
+
+ // Returns the next state of the streaming decoding.
+ virtual std::unique_ptr<DecodingState> Next(
+ StreamingDecoder* streaming) = 0;
+ // The number of bytes to be received.
+ virtual size_t size() const = 0;
+ // The buffer to store the received bytes.
+ virtual uint8_t* buffer() = 0;
+ // The number of bytes which were already received.
+ size_t offset() const { return offset_; }
+ void set_offset(size_t value) { offset_ = value; }
+ // The number of bytes which are still needed.
+ size_t remaining() const { return size() - offset(); }
+ bool is_finished() const { return offset() == size(); }
+ // A flag to indicate if finishing the streaming decoder is allowed without
+ // error.
+ virtual bool is_finishing_allowed() const { return false; }
+
+ private:
+ size_t offset_ = 0;
+ };
+
+ // Forward declarations of the concrete states. This is needed so that they
+ // can access private members of the StreamingDecoder.
+ class DecodeVarInt32;
+ class DecodeModuleHeader;
+ class DecodeSectionID;
+ class DecodeSectionLength;
+ class DecodeSectionPayload;
+ class DecodeNumberOfFunctions;
+ class DecodeFunctionLength;
+ class DecodeFunctionBody;
+
+ // Creates a buffer for the next section of the module.
+ SectionBuffer* CreateNewBuffer(uint8_t id, size_t length,
+ Vector<const uint8_t> length_bytes) {
+ section_buffers_.emplace_back(new SectionBuffer(id, length, length_bytes));
+ return section_buffers_.back().get();
+ }
+
+ Decoder* decoder() { return &decoder_; }
+
+ Isolate* isolate_;
+ std::unique_ptr<DecodingState> state_;
+ // The decoder is an instance variable because we use it for error handling.
+ Decoder decoder_;
+ std::vector<std::unique_ptr<SectionBuffer>> section_buffers_;
+ size_t total_size_ = 0;
+
+ DISALLOW_COPY_AND_ASSIGN(StreamingDecoder);
+};
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_STREAMING_DECODER_H_
diff --git a/deps/v8/src/wasm/wasm-code-specialization.cc b/deps/v8/src/wasm/wasm-code-specialization.cc
index 1b6a81900b..53e3fe699c 100644
--- a/deps/v8/src/wasm/wasm-code-specialization.cc
+++ b/deps/v8/src/wasm/wasm-code-specialization.cc
@@ -41,8 +41,7 @@ int AdvanceSourcePositionTableIterator(SourcePositionTableIterator& iterator,
class PatchDirectCallsHelper {
public:
PatchDirectCallsHelper(WasmInstanceObject* instance, Code* code)
- : source_pos_it(code->source_position_table()),
- decoder(nullptr, nullptr) {
+ : source_pos_it(code->SourcePositionTable()), decoder(nullptr, nullptr) {
FixedArray* deopt_data = code->deoptimization_data();
DCHECK_EQ(2, deopt_data->length());
WasmCompiledModule* comp_mod = instance->compiled_module();
@@ -133,7 +132,7 @@ bool CodeSpecialization::ApplyToWholeInstance(
for (int num_wasm_functions = static_cast<int>(wasm_functions->size());
func_index < num_wasm_functions; ++func_index) {
Code* wasm_function = Code::cast(code_table->get(func_index));
- if (wasm_function->builtin_index() == Builtins::kWasmCompileLazy) continue;
+ if (wasm_function->kind() != Code::WASM_FUNCTION) continue;
changed |= ApplyToWasmCode(wasm_function, icache_flush_mode);
}
diff --git a/deps/v8/src/wasm/wasm-debug.cc b/deps/v8/src/wasm/wasm-debug.cc
index adfbd0c168..f942c92127 100644
--- a/deps/v8/src/wasm/wasm-debug.cc
+++ b/deps/v8/src/wasm/wasm-debug.cc
@@ -7,6 +7,7 @@
#include "src/assembler-inl.h"
#include "src/assert-scope.h"
#include "src/compiler/wasm-compiler.h"
+#include "src/debug/debug-scopes.h"
#include "src/debug/debug.h"
#include "src/factory.h"
#include "src/frames-inl.h"
@@ -24,6 +25,42 @@ using namespace v8::internal::wasm;
namespace {
+template <bool internal, typename... Args>
+Handle<String> PrintFToOneByteString(Isolate* isolate, const char* format,
+ Args... args) {
+ // Maximum length of a formatted value name ("param#%d", "local#%d",
+ // "global#%d").
+ constexpr int kMaxStrLen = 18;
+ EmbeddedVector<char, kMaxStrLen> value;
+ int len = SNPrintF(value, format, args...);
+ CHECK(len > 0 && len < value.length());
+ Vector<uint8_t> name = Vector<uint8_t>::cast(value.SubVector(0, len));
+ return internal
+ ? isolate->factory()->InternalizeOneByteString(name)
+ : isolate->factory()->NewStringFromOneByte(name).ToHandleChecked();
+}
+
+Handle<Object> WasmValToValueObject(Isolate* isolate, WasmVal value) {
+ switch (value.type) {
+ case kWasmI32:
+ if (Smi::IsValid(value.to<int32_t>()))
+ return handle(Smi::FromInt(value.to<int32_t>()), isolate);
+ return PrintFToOneByteString<false>(isolate, "%d", value.to<int32_t>());
+ case kWasmI64:
+ if (Smi::IsValid(value.to<int64_t>()))
+ return handle(Smi::FromIntptr(value.to<int64_t>()), isolate);
+ return PrintFToOneByteString<false>(isolate, "%" PRId64,
+ value.to<int64_t>());
+ case kWasmF32:
+ return isolate->factory()->NewNumber(value.to<float>());
+ case kWasmF64:
+ return isolate->factory()->NewNumber(value.to<double>());
+ default:
+ UNIMPLEMENTED();
+ return isolate->factory()->undefined_value();
+ }
+}
+
// Forward declaration.
class InterpreterHandle;
InterpreterHandle* GetInterpreterHandle(WasmDebugInfo* debug_info);
@@ -68,9 +105,12 @@ class InterpreterHandle {
public:
// Initialize in the right order, using helper methods to make this possible.
// WasmInterpreter has to be allocated in place, since it is not movable.
- InterpreterHandle(Isolate* isolate, WasmDebugInfo* debug_info)
+ InterpreterHandle(Isolate* isolate, WasmDebugInfo* debug_info,
+ WasmInstance* external_instance = nullptr)
: instance_(debug_info->wasm_instance()->compiled_module()->module()),
- interpreter_(isolate, GetBytesEnv(&instance_, debug_info)),
+ interpreter_(isolate, GetBytesEnv(external_instance ? external_instance
+ : &instance_,
+ debug_info)),
isolate_(isolate) {
DisallowHeapAllocation no_gc;
@@ -303,10 +343,9 @@ class InterpreterHandle {
WasmInterpreter::Thread* thread = interpreter()->GetThread(0);
DCHECK_LT(0, thread->GetFrameCount());
- wasm::InterpretedFrame frame =
- thread->GetFrame(thread->GetFrameCount() - 1);
- return compiled_module->GetFunctionOffset(frame.function()->func_index) +
- frame.pc();
+ auto frame = thread->GetFrame(thread->GetFrameCount() - 1);
+ return compiled_module->GetFunctionOffset(frame->function()->func_index) +
+ frame->pc();
}
std::vector<std::pair<uint32_t, int>> GetInterpretedStack(
@@ -320,8 +359,8 @@ class InterpreterHandle {
std::vector<std::pair<uint32_t, int>> stack;
stack.reserve(frame_range.second - frame_range.first);
for (uint32_t fp = frame_range.first; fp < frame_range.second; ++fp) {
- wasm::InterpretedFrame frame = thread->GetFrame(fp);
- stack.emplace_back(frame.function()->func_index, frame.pc());
+ auto frame = thread->GetFrame(fp);
+ stack.emplace_back(frame->function()->func_index, frame->pc());
}
return stack;
}
@@ -336,8 +375,7 @@ class InterpreterHandle {
DCHECK_LE(0, idx);
DCHECK_GT(frame_range.second - frame_range.first, idx);
- return std::unique_ptr<wasm::InterpretedFrame>(new wasm::InterpretedFrame(
- thread->GetMutableFrame(frame_range.first + idx)));
+ return thread->GetFrame(frame_range.first + idx);
}
void Unwind(Address frame_pointer) {
@@ -366,8 +404,92 @@ class InterpreterHandle {
}
void UpdateMemory(JSArrayBuffer* new_memory) {
- instance_.mem_start = reinterpret_cast<byte*>(new_memory->backing_store());
- CHECK(new_memory->byte_length()->ToUint32(&instance_.mem_size));
+ byte* mem_start = reinterpret_cast<byte*>(new_memory->backing_store());
+ uint32_t mem_size;
+ CHECK(new_memory->byte_length()->ToUint32(&mem_size));
+ interpreter()->UpdateMemory(mem_start, mem_size);
+ }
+
+ Handle<JSArray> GetScopeDetails(Address frame_pointer, int frame_index,
+ Handle<WasmInstanceObject> instance) {
+ auto frame = GetInterpretedFrame(frame_pointer, frame_index);
+
+ Handle<FixedArray> global_scope =
+ isolate_->factory()->NewFixedArray(ScopeIterator::kScopeDetailsSize);
+ global_scope->set(ScopeIterator::kScopeDetailsTypeIndex,
+ Smi::FromInt(ScopeIterator::ScopeTypeGlobal));
+ Handle<JSObject> global_scope_object =
+ isolate_->factory()->NewJSObjectWithNullProto();
+ global_scope->set(ScopeIterator::kScopeDetailsObjectIndex,
+ *global_scope_object);
+
+ // TODO(clemensh): Add globals to the global scope.
+
+ if (instance->has_memory_buffer()) {
+ Handle<String> name = isolate_->factory()->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("memory"));
+ Handle<JSArrayBuffer> memory_buffer(instance->memory_buffer(), isolate_);
+ uint32_t byte_length;
+ CHECK(memory_buffer->byte_length()->ToUint32(&byte_length));
+ Handle<JSTypedArray> uint8_array = isolate_->factory()->NewJSTypedArray(
+ kExternalUint8Array, memory_buffer, 0, byte_length);
+ JSObject::SetOwnPropertyIgnoreAttributes(global_scope_object, name,
+ uint8_array, NONE)
+ .Check();
+ }
+
+ Handle<FixedArray> local_scope =
+ isolate_->factory()->NewFixedArray(ScopeIterator::kScopeDetailsSize);
+ local_scope->set(ScopeIterator::kScopeDetailsTypeIndex,
+ Smi::FromInt(ScopeIterator::ScopeTypeLocal));
+ Handle<JSObject> local_scope_object =
+ isolate_->factory()->NewJSObjectWithNullProto();
+ local_scope->set(ScopeIterator::kScopeDetailsObjectIndex,
+ *local_scope_object);
+
+ // Fill parameters and locals.
+ int num_params = frame->GetParameterCount();
+ int num_locals = frame->GetLocalCount();
+ DCHECK_LE(num_params, num_locals);
+ for (int i = 0; i < num_locals; ++i) {
+ // TODO(clemensh): Use names from name section if present.
+ const char* label = i < num_params ? "param#%d" : "local#%d";
+ Handle<String> name = PrintFToOneByteString<true>(isolate_, label, i);
+ WasmVal value = frame->GetLocalValue(i);
+ Handle<Object> value_obj = WasmValToValueObject(isolate_, value);
+ JSObject::SetOwnPropertyIgnoreAttributes(local_scope_object, name,
+ value_obj, NONE)
+ .Check();
+ }
+
+ // Fill stack values.
+ int stack_count = frame->GetStackHeight();
+ // Use an object without prototype instead of an Array, for nicer displaying
+ // in DevTools. For Arrays, the length field and prototype is displayed,
+ // which does not make too much sense here.
+ Handle<JSObject> stack_obj =
+ isolate_->factory()->NewJSObjectWithNullProto();
+ for (int i = 0; i < stack_count; ++i) {
+ WasmVal value = frame->GetStackValue(i);
+ Handle<Object> value_obj = WasmValToValueObject(isolate_, value);
+ JSObject::SetOwnElementIgnoreAttributes(
+ stack_obj, static_cast<uint32_t>(i), value_obj, NONE)
+ .Check();
+ }
+ Handle<String> stack_name = isolate_->factory()->InternalizeOneByteString(
+ STATIC_CHAR_VECTOR("stack"));
+ JSObject::SetOwnPropertyIgnoreAttributes(local_scope_object, stack_name,
+ stack_obj, NONE)
+ .Check();
+
+ Handle<JSArray> global_jsarr =
+ isolate_->factory()->NewJSArrayWithElements(global_scope);
+ Handle<JSArray> local_jsarr =
+ isolate_->factory()->NewJSArrayWithElements(local_scope);
+ Handle<FixedArray> all_scopes = isolate_->factory()->NewFixedArray(2);
+ all_scopes->set(0, *global_jsarr);
+ all_scopes->set(1, *local_jsarr);
+ return isolate_->factory()->NewJSArrayWithElements(all_scopes);
}
};
@@ -453,12 +575,25 @@ void RedirectCallsitesInInstance(Isolate* isolate, WasmInstanceObject* instance,
} // namespace
Handle<WasmDebugInfo> WasmDebugInfo::New(Handle<WasmInstanceObject> instance) {
- Isolate* isolate = instance->GetIsolate();
- Factory* factory = isolate->factory();
+ DCHECK(!instance->has_debug_info());
+ Factory* factory = instance->GetIsolate()->factory();
Handle<FixedArray> arr = factory->NewFixedArray(kFieldCount, TENURED);
arr->set(kWrapperTracerHeader, Smi::kZero);
arr->set(kInstance, *instance);
- return Handle<WasmDebugInfo>::cast(arr);
+ Handle<WasmDebugInfo> debug_info = Handle<WasmDebugInfo>::cast(arr);
+ instance->set_debug_info(*debug_info);
+ return debug_info;
+}
+
+WasmInterpreter* WasmDebugInfo::SetupForTesting(
+ Handle<WasmInstanceObject> instance_obj, WasmInstance* instance) {
+ Handle<WasmDebugInfo> debug_info = WasmDebugInfo::New(instance_obj);
+ Isolate* isolate = instance_obj->GetIsolate();
+ InterpreterHandle* cpp_handle =
+ new InterpreterHandle(isolate, *debug_info, instance);
+ Handle<Object> handle = Managed<InterpreterHandle>::New(isolate, cpp_handle);
+ debug_info->set(kInterpreterHandle, *handle);
+ return cpp_handle->interpreter();
}
bool WasmDebugInfo::IsDebugInfo(Object* object) {
@@ -555,3 +690,12 @@ void WasmDebugInfo::UpdateMemory(JSArrayBuffer* new_memory) {
if (!interp_handle) return;
interp_handle->UpdateMemory(new_memory);
}
+
+// static
+Handle<JSArray> WasmDebugInfo::GetScopeDetails(Handle<WasmDebugInfo> debug_info,
+ Address frame_pointer,
+ int frame_index) {
+ InterpreterHandle* interp_handle = GetInterpreterHandle(*debug_info);
+ Handle<WasmInstanceObject> instance(debug_info->wasm_instance());
+ return interp_handle->GetScopeDetails(frame_pointer, frame_index, instance);
+}
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index e982cc7f99..8c2547bf3e 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -42,7 +42,7 @@ void f64_nearest_int_wrapper(double* param) {
}
void int64_to_float32_wrapper(int64_t* input, float* output) {
- *output = static_cast<float>(*input);
+ *output = static_cast<float>(ReadUnalignedValue<int64_t>(input));
}
void uint64_to_float32_wrapper(uint64_t* input, float* output) {
@@ -75,12 +75,13 @@ void uint64_to_float32_wrapper(uint64_t* input, float* output) {
*output = result;
#else
- *output = static_cast<float>(*input);
+ *output = static_cast<float>(ReadUnalignedValue<uint64_t>(input));
#endif
}
void int64_to_float64_wrapper(int64_t* input, double* output) {
- *output = static_cast<double>(*input);
+ WriteDoubleValue(output,
+ static_cast<double>(ReadUnalignedValue<int64_t>(input)));
}
void uint64_to_float64_wrapper(uint64_t* input, double* output) {
@@ -100,7 +101,8 @@ void uint64_to_float64_wrapper(uint64_t* input, double* output) {
*output = result;
#else
- *output = static_cast<double>(*input);
+ WriteDoubleValue(output,
+ static_cast<double>(ReadUnalignedValue<uint64_t>(input)));
#endif
}
@@ -110,7 +112,7 @@ int32_t float32_to_int64_wrapper(float* input, int64_t* output) {
// not within int64 range.
if (*input >= static_cast<float>(std::numeric_limits<int64_t>::min()) &&
*input < static_cast<float>(std::numeric_limits<int64_t>::max())) {
- *output = static_cast<int64_t>(*input);
+ WriteUnalignedValue<int64_t>(output, static_cast<int64_t>(*input));
return 1;
}
return 0;
@@ -122,7 +124,7 @@ int32_t float32_to_uint64_wrapper(float* input, uint64_t* output) {
// not within uint64 range.
if (*input > -1.0 &&
*input < static_cast<float>(std::numeric_limits<uint64_t>::max())) {
- *output = static_cast<uint64_t>(*input);
+ WriteUnalignedValue<uint64_t>(output, static_cast<uint64_t>(*input));
return 1;
}
return 0;
@@ -132,9 +134,10 @@ int32_t float64_to_int64_wrapper(double* input, int64_t* output) {
// We use "<" here to check the upper bound because of rounding problems: With
// "<=" some inputs would be considered within int64 range which are actually
// not within int64 range.
- if (*input >= static_cast<double>(std::numeric_limits<int64_t>::min()) &&
- *input < static_cast<double>(std::numeric_limits<int64_t>::max())) {
- *output = static_cast<int64_t>(*input);
+ double input_val = ReadDoubleValue(input);
+ if (input_val >= static_cast<double>(std::numeric_limits<int64_t>::min()) &&
+ input_val < static_cast<double>(std::numeric_limits<int64_t>::max())) {
+ WriteUnalignedValue<int64_t>(output, static_cast<int64_t>(input_val));
return 1;
}
return 0;
@@ -144,46 +147,55 @@ int32_t float64_to_uint64_wrapper(double* input, uint64_t* output) {
// We use "<" here to check the upper bound because of rounding problems: With
// "<=" some inputs would be considered within uint64 range which are actually
// not within uint64 range.
- if (*input > -1.0 &&
- *input < static_cast<double>(std::numeric_limits<uint64_t>::max())) {
- *output = static_cast<uint64_t>(*input);
+ double input_val = ReadDoubleValue(input);
+ if (input_val > -1.0 &&
+ input_val < static_cast<double>(std::numeric_limits<uint64_t>::max())) {
+ WriteUnalignedValue<uint64_t>(output, static_cast<uint64_t>(input_val));
return 1;
}
return 0;
}
int32_t int64_div_wrapper(int64_t* dst, int64_t* src) {
- if (*src == 0) {
+ int64_t src_val = ReadUnalignedValue<int64_t>(src);
+ int64_t dst_val = ReadUnalignedValue<int64_t>(dst);
+ if (src_val == 0) {
return 0;
}
- if (*src == -1 && *dst == std::numeric_limits<int64_t>::min()) {
+ if (src_val == -1 && dst_val == std::numeric_limits<int64_t>::min()) {
return -1;
}
- *dst /= *src;
+ WriteUnalignedValue<int64_t>(dst, dst_val / src_val);
return 1;
}
int32_t int64_mod_wrapper(int64_t* dst, int64_t* src) {
- if (*src == 0) {
+ int64_t src_val = ReadUnalignedValue<int64_t>(src);
+ int64_t dst_val = ReadUnalignedValue<int64_t>(dst);
+ if (src_val == 0) {
return 0;
}
- *dst %= *src;
+ WriteUnalignedValue<int64_t>(dst, dst_val % src_val);
return 1;
}
int32_t uint64_div_wrapper(uint64_t* dst, uint64_t* src) {
- if (*src == 0) {
+ uint64_t src_val = ReadUnalignedValue<uint64_t>(src);
+ uint64_t dst_val = ReadUnalignedValue<uint64_t>(dst);
+ if (src_val == 0) {
return 0;
}
- *dst /= *src;
+ WriteUnalignedValue<uint64_t>(dst, dst_val / src_val);
return 1;
}
int32_t uint64_mod_wrapper(uint64_t* dst, uint64_t* src) {
- if (*src == 0) {
+ uint64_t src_val = ReadUnalignedValue<uint64_t>(src);
+ uint64_t dst_val = ReadUnalignedValue<uint64_t>(dst);
+ if (src_val == 0) {
return 0;
}
- *dst %= *src;
+ WriteUnalignedValue<uint64_t>(dst, dst_val % src_val);
return 1;
}
@@ -192,7 +204,8 @@ uint32_t word32_ctz_wrapper(uint32_t* input) {
}
uint32_t word64_ctz_wrapper(uint64_t* input) {
- return static_cast<uint32_t>(base::bits::CountTrailingZeros64(*input));
+ return static_cast<uint32_t>(
+ base::bits::CountTrailingZeros64(ReadUnalignedValue<uint64_t>(input)));
}
uint32_t word32_popcnt_wrapper(uint32_t* input) {
@@ -200,7 +213,8 @@ uint32_t word32_popcnt_wrapper(uint32_t* input) {
}
uint32_t word64_popcnt_wrapper(uint64_t* input) {
- return static_cast<uint32_t>(base::bits::CountPopulation(*input));
+ return static_cast<uint32_t>(
+ base::bits::CountPopulation(ReadUnalignedValue<uint64_t>(input)));
}
void float64_pow_wrapper(double* param0, double* param1) {
diff --git a/deps/v8/src/wasm/wasm-interpreter.cc b/deps/v8/src/wasm/wasm-interpreter.cc
index 66e4172850..d344d1fae4 100644
--- a/deps/v8/src/wasm/wasm-interpreter.cc
+++ b/deps/v8/src/wasm/wasm-interpreter.cc
@@ -601,56 +601,15 @@ inline int32_t ExecuteGrowMemory(uint32_t delta_pages,
DCHECK_EQ(0, instance->mem_size % WasmModule::kPageSize);
uint32_t old_pages = instance->mem_size / WasmModule::kPageSize;
- // If an instance is set, execute GrowMemory on the instance. This will also
- // update the WasmInstance struct used here.
- if (!instance_obj.is_null()) {
- Isolate* isolate = instance_obj.ToHandleChecked()->GetIsolate();
- int32_t ret = WasmInstanceObject::GrowMemory(
- isolate, instance_obj.ToHandleChecked(), delta_pages);
- // Some sanity checks.
- DCHECK_EQ(ret == -1 ? old_pages : old_pages + delta_pages,
- instance->mem_size / WasmModule::kPageSize);
- DCHECK(ret == -1 || static_cast<uint32_t>(ret) == old_pages);
- return ret;
- }
-
- // TODO(ahaas): Move memory allocation to wasm-module.cc for better
- // encapsulation.
- if (delta_pages > FLAG_wasm_max_mem_pages ||
- delta_pages > instance->module->max_mem_pages) {
- return -1;
- }
-
- uint32_t new_pages = old_pages + delta_pages;
- if (new_pages > FLAG_wasm_max_mem_pages ||
- new_pages > instance->module->max_mem_pages) {
- return -1;
- }
-
- byte* new_mem_start;
- if (instance->mem_size == 0) {
- // TODO(gdeepti): Fix bounds check to take into account size of memtype.
- new_mem_start = static_cast<byte*>(
- calloc(new_pages * WasmModule::kPageSize, sizeof(byte)));
- if (!new_mem_start) return -1;
- } else {
- DCHECK_NOT_NULL(instance->mem_start);
- if (EnableGuardRegions()) {
- v8::base::OS::Unprotect(instance->mem_start,
- new_pages * WasmModule::kPageSize);
- new_mem_start = instance->mem_start;
- } else {
- new_mem_start = static_cast<byte*>(
- realloc(instance->mem_start, new_pages * WasmModule::kPageSize));
- if (!new_mem_start) return -1;
- }
- // Zero initializing uninitialized memory from realloc
- memset(new_mem_start + old_pages * WasmModule::kPageSize, 0,
- delta_pages * WasmModule::kPageSize);
- }
- instance->mem_start = new_mem_start;
- instance->mem_size = new_pages * WasmModule::kPageSize;
- return static_cast<int32_t>(old_pages);
+ Isolate* isolate = instance_obj.ToHandleChecked()->GetIsolate();
+ int32_t ret = WasmInstanceObject::GrowMemory(
+ isolate, instance_obj.ToHandleChecked(), delta_pages);
+ // Some sanity checks.
+ DCHECK_EQ(ret == -1 ? old_pages : old_pages + delta_pages,
+ instance->mem_size / WasmModule::kPageSize);
+ DCHECK(ret == -1 || static_cast<uint32_t>(ret) == old_pages);
+ USE(old_pages);
+ return ret;
}
enum InternalOpcode {
@@ -694,45 +653,84 @@ Handle<HeapObject> UnwrapWasmToJSWrapper(Isolate* isolate,
return Handle<HeapObject>::null();
}
+class SideTable;
+
+// Code and metadata needed to execute a function.
+struct InterpreterCode {
+ const WasmFunction* function; // wasm function
+ BodyLocalDecls locals; // local declarations
+ const byte* orig_start; // start of original code
+ const byte* orig_end; // end of original code
+ byte* start; // start of (maybe altered) code
+ byte* end; // end of (maybe altered) code
+ SideTable* side_table; // precomputed side table for control flow.
+
+ const byte* at(pc_t pc) { return start + pc; }
+};
+
// A helper class to compute the control transfers for each bytecode offset.
// Control transfers allow Br, BrIf, BrTable, If, Else, and End bytecodes to
// be directly executed without the need to dynamically track blocks.
-class ControlTransfers : public ZoneObject {
+class SideTable : public ZoneObject {
public:
ControlTransferMap map_;
+ uint32_t max_stack_height_;
+
+ SideTable(Zone* zone, const WasmModule* module, InterpreterCode* code)
+ : map_(zone), max_stack_height_(0) {
+ // Create a zone for all temporary objects.
+ Zone control_transfer_zone(zone->allocator(), ZONE_NAME);
- ControlTransfers(Zone* zone, BodyLocalDecls* locals, const byte* start,
- const byte* end)
- : map_(zone) {
// Represents a control flow label.
- struct CLabel : public ZoneObject {
+ class CLabel : public ZoneObject {
+ explicit CLabel(Zone* zone, uint32_t target_stack_height, uint32_t arity)
+ : target(nullptr),
+ target_stack_height(target_stack_height),
+ arity(arity),
+ refs(zone) {}
+
+ public:
+ struct Ref {
+ const byte* from_pc;
+ const uint32_t stack_height;
+ };
const byte* target;
- ZoneVector<const byte*> refs;
+ uint32_t target_stack_height;
+ // Arity when branching to this label.
+ const uint32_t arity;
+ ZoneVector<Ref> refs;
- explicit CLabel(Zone* zone) : target(nullptr), refs(zone) {}
+ static CLabel* New(Zone* zone, uint32_t stack_height, uint32_t arity) {
+ return new (zone) CLabel(zone, stack_height, arity);
+ }
// Bind this label to the given PC.
- void Bind(ControlTransferMap* map, const byte* start, const byte* pc) {
+ void Bind(const byte* pc) {
DCHECK_NULL(target);
target = pc;
- for (auto from_pc : refs) {
- auto pcdiff = static_cast<pcdiff_t>(target - from_pc);
- size_t offset = static_cast<size_t>(from_pc - start);
- (*map)[offset] = pcdiff;
- }
}
// Reference this label from the given location.
- void Ref(ControlTransferMap* map, const byte* start,
- const byte* from_pc) {
- if (target) {
- // Target being bound before a reference means this is a loop.
- DCHECK_EQ(kExprLoop, *target);
- auto pcdiff = static_cast<pcdiff_t>(target - from_pc);
- size_t offset = static_cast<size_t>(from_pc - start);
- (*map)[offset] = pcdiff;
- } else {
- refs.push_back(from_pc);
+ void Ref(const byte* from_pc, uint32_t stack_height) {
+ // Target being bound before a reference means this is a loop.
+ DCHECK_IMPLIES(target, *target == kExprLoop);
+ refs.push_back({from_pc, stack_height});
+ }
+
+ void Finish(ControlTransferMap* map, const byte* start) {
+ DCHECK_NOT_NULL(target);
+ for (auto ref : refs) {
+ size_t offset = static_cast<size_t>(ref.from_pc - start);
+ auto pcdiff = static_cast<pcdiff_t>(target - ref.from_pc);
+ DCHECK_GE(ref.stack_height, target_stack_height);
+ spdiff_t spdiff =
+ static_cast<spdiff_t>(ref.stack_height - target_stack_height);
+ TRACE("control transfer @%zu: Δpc %d, stack %u->%u = -%u\n", offset,
+ pcdiff, ref.stack_height, target_stack_height, spdiff);
+ ControlTransferEntry& entry = (*map)[offset];
+ entry.pc_diff = pcdiff;
+ entry.sp_diff = spdiff;
+ entry.target_arity = arity;
}
}
};
@@ -742,10 +740,25 @@ class ControlTransfers : public ZoneObject {
const byte* pc;
CLabel* end_label;
CLabel* else_label;
-
- void Ref(ControlTransferMap* map, const byte* start,
- const byte* from_pc) {
- end_label->Ref(map, start, from_pc);
+ // Arity (number of values on the stack) when exiting this control
+ // structure via |end|.
+ uint32_t exit_arity;
+ // Track whether this block was already left, i.e. all further
+ // instructions are unreachable.
+ bool unreachable = false;
+
+ Control(const byte* pc, CLabel* end_label, CLabel* else_label,
+ uint32_t exit_arity)
+ : pc(pc),
+ end_label(end_label),
+ else_label(else_label),
+ exit_arity(exit_arity) {}
+ Control(const byte* pc, CLabel* end_label, uint32_t exit_arity)
+ : Control(pc, end_label, nullptr, exit_arity) {}
+
+ void Finish(ControlTransferMap* map, const byte* start) {
+ end_label->Finish(map, start);
+ if (else_label) else_label->Finish(map, start);
}
};
@@ -754,54 +767,93 @@ class ControlTransfers : public ZoneObject {
// AST decoder. The {control_stack} allows matching {br,br_if,br_table}
// bytecodes with their target, as well as determining whether the current
// bytecodes are within the true or false block of an else.
- std::vector<Control> control_stack;
- CLabel* func_label = new (zone) CLabel(zone);
- control_stack.push_back({start, func_label, nullptr});
- for (BytecodeIterator i(start, end, locals); i.has_next(); i.next()) {
+ ZoneVector<Control> control_stack(&control_transfer_zone);
+ uint32_t stack_height = 0;
+ uint32_t func_arity =
+ static_cast<uint32_t>(code->function->sig->return_count());
+ CLabel* func_label =
+ CLabel::New(&control_transfer_zone, stack_height, func_arity);
+ control_stack.emplace_back(code->orig_start, func_label, func_arity);
+ auto control_parent = [&]() -> Control& {
+ DCHECK_LE(2, control_stack.size());
+ return control_stack[control_stack.size() - 2];
+ };
+ auto copy_unreachable = [&] {
+ control_stack.back().unreachable = control_parent().unreachable;
+ };
+ for (BytecodeIterator i(code->orig_start, code->orig_end, &code->locals);
+ i.has_next(); i.next()) {
WasmOpcode opcode = i.current();
- TRACE("@%u: control %s\n", i.pc_offset(),
- WasmOpcodes::OpcodeName(opcode));
+ bool unreachable = control_stack.back().unreachable;
+ if (unreachable) {
+ TRACE("@%u: %s (is unreachable)\n", i.pc_offset(),
+ WasmOpcodes::OpcodeName(opcode));
+ } else {
+ auto stack_effect =
+ StackEffect(module, code->function->sig, i.pc(), i.end());
+ TRACE("@%u: %s (sp %d - %d + %d)\n", i.pc_offset(),
+ WasmOpcodes::OpcodeName(opcode), stack_height, stack_effect.first,
+ stack_effect.second);
+ DCHECK_GE(stack_height, stack_effect.first);
+ DCHECK_GE(kMaxUInt32, static_cast<uint64_t>(stack_height) -
+ stack_effect.first + stack_effect.second);
+ stack_height = stack_height - stack_effect.first + stack_effect.second;
+ if (stack_height > max_stack_height_) max_stack_height_ = stack_height;
+ }
switch (opcode) {
- case kExprBlock: {
- TRACE("control @%u: Block\n", i.pc_offset());
- CLabel* label = new (zone) CLabel(zone);
- control_stack.push_back({i.pc(), label, nullptr});
- break;
- }
+ case kExprBlock:
case kExprLoop: {
- TRACE("control @%u: Loop\n", i.pc_offset());
- CLabel* label = new (zone) CLabel(zone);
- control_stack.push_back({i.pc(), label, nullptr});
- label->Bind(&map_, start, i.pc());
+ bool is_loop = opcode == kExprLoop;
+ BlockTypeOperand<false> operand(&i, i.pc());
+ TRACE("control @%u: %s, arity %d\n", i.pc_offset(),
+ is_loop ? "Loop" : "Block", operand.arity);
+ CLabel* label = CLabel::New(&control_transfer_zone, stack_height,
+ is_loop ? 0 : operand.arity);
+ control_stack.emplace_back(i.pc(), label, operand.arity);
+ copy_unreachable();
+ if (is_loop) label->Bind(i.pc());
break;
}
case kExprIf: {
TRACE("control @%u: If\n", i.pc_offset());
- CLabel* end_label = new (zone) CLabel(zone);
- CLabel* else_label = new (zone) CLabel(zone);
- control_stack.push_back({i.pc(), end_label, else_label});
- else_label->Ref(&map_, start, i.pc());
+ BlockTypeOperand<false> operand(&i, i.pc());
+ CLabel* end_label =
+ CLabel::New(&control_transfer_zone, stack_height, operand.arity);
+ CLabel* else_label =
+ CLabel::New(&control_transfer_zone, stack_height, 0);
+ control_stack.emplace_back(i.pc(), end_label, else_label,
+ operand.arity);
+ copy_unreachable();
+ if (!unreachable) else_label->Ref(i.pc(), stack_height);
break;
}
case kExprElse: {
Control* c = &control_stack.back();
+ copy_unreachable();
TRACE("control @%u: Else\n", i.pc_offset());
- c->end_label->Ref(&map_, start, i.pc());
+ if (!control_parent().unreachable) {
+ c->end_label->Ref(i.pc(), stack_height);
+ }
DCHECK_NOT_NULL(c->else_label);
- c->else_label->Bind(&map_, start, i.pc() + 1);
+ c->else_label->Bind(i.pc() + 1);
+ c->else_label->Finish(&map_, code->orig_start);
c->else_label = nullptr;
+ DCHECK_GE(stack_height, c->end_label->target_stack_height);
+ stack_height = c->end_label->target_stack_height;
break;
}
case kExprEnd: {
Control* c = &control_stack.back();
TRACE("control @%u: End\n", i.pc_offset());
- if (c->end_label->target) {
- // only loops have bound labels.
- DCHECK_EQ(kExprLoop, *c->pc);
- } else {
- if (c->else_label) c->else_label->Bind(&map_, start, i.pc());
- c->end_label->Bind(&map_, start, i.pc() + 1);
+ // Only loops have bound labels.
+ DCHECK_IMPLIES(c->end_label->target, *c->pc == kExprLoop);
+ if (!c->end_label->target) {
+ if (c->else_label) c->else_label->Bind(i.pc());
+ c->end_label->Bind(i.pc() + 1);
}
+ c->Finish(&map_, code->orig_start);
+ DCHECK_GE(stack_height, c->end_label->target_stack_height);
+ stack_height = c->end_label->target_stack_height + c->exit_arity;
control_stack.pop_back();
break;
}
@@ -809,14 +861,14 @@ class ControlTransfers : public ZoneObject {
BreakDepthOperand<false> operand(&i, i.pc());
TRACE("control @%u: Br[depth=%u]\n", i.pc_offset(), operand.depth);
Control* c = &control_stack[control_stack.size() - operand.depth - 1];
- c->Ref(&map_, start, i.pc());
+ if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
break;
}
case kExprBrIf: {
BreakDepthOperand<false> operand(&i, i.pc());
TRACE("control @%u: BrIf[depth=%u]\n", i.pc_offset(), operand.depth);
Control* c = &control_stack[control_stack.size() - operand.depth - 1];
- c->Ref(&map_, start, i.pc());
+ if (!unreachable) c->end_label->Ref(i.pc(), stack_height);
break;
}
case kExprBrTable: {
@@ -824,44 +876,34 @@ class ControlTransfers : public ZoneObject {
BranchTableIterator<false> iterator(&i, operand);
TRACE("control @%u: BrTable[count=%u]\n", i.pc_offset(),
operand.table_count);
- while (iterator.has_next()) {
- uint32_t j = iterator.cur_index();
- uint32_t target = iterator.next();
- Control* c = &control_stack[control_stack.size() - target - 1];
- c->Ref(&map_, start, i.pc() + j);
+ if (!unreachable) {
+ while (iterator.has_next()) {
+ uint32_t j = iterator.cur_index();
+ uint32_t target = iterator.next();
+ Control* c = &control_stack[control_stack.size() - target - 1];
+ c->end_label->Ref(i.pc() + j, stack_height);
+ }
}
break;
}
- default: {
+ default:
break;
- }
+ }
+ if (WasmOpcodes::IsUnconditionalJump(opcode)) {
+ control_stack.back().unreachable = true;
}
}
- if (!func_label->target) func_label->Bind(&map_, start, end);
+ DCHECK_EQ(0, control_stack.size());
+ DCHECK_EQ(func_arity, stack_height);
}
- pcdiff_t Lookup(pc_t from) {
+ ControlTransferEntry& Lookup(pc_t from) {
auto result = map_.find(from);
- if (result == map_.end()) {
- V8_Fatal(__FILE__, __LINE__, "no control target for pc %zu", from);
- }
+ DCHECK(result != map_.end());
return result->second;
}
};
-// Code and metadata needed to execute a function.
-struct InterpreterCode {
- const WasmFunction* function; // wasm function
- BodyLocalDecls locals; // local declarations
- const byte* orig_start; // start of original code
- const byte* orig_end; // end of original code
- byte* start; // start of (maybe altered) code
- byte* end; // end of (maybe altered) code
- ControlTransfers* targets; // helper for control flow.
-
- const byte* at(pc_t pc) { return start + pc; }
-};
-
struct ExternalCallResult {
enum Type {
// The function should be executed inside this interpreter.
@@ -980,11 +1022,9 @@ class CodeMap {
InterpreterCode* Preprocess(InterpreterCode* code) {
DCHECK_EQ(code->function->imported, code->start == nullptr);
- if (code->targets == nullptr && code->start != nullptr) {
+ if (!code->side_table && code->start) {
// Compute the control targets map and the local declarations.
- CHECK(DecodeLocalDecls(&code->locals, code->start, code->end));
- code->targets = new (zone_) ControlTransfers(
- zone_, &code->locals, code->orig_start, code->orig_end);
+ code->side_table = new (zone_) SideTable(zone_, module_, code);
}
return code;
}
@@ -1005,11 +1045,11 @@ class CodeMap {
DCHECK_LT(function->func_index, interpreter_code_.size());
InterpreterCode* code = &interpreter_code_[function->func_index];
DCHECK_EQ(function, code->function);
- code->targets = nullptr;
code->orig_start = start;
code->orig_end = end;
code->start = const_cast<byte*>(start);
code->end = const_cast<byte*>(end);
+ code->side_table = nullptr;
Preprocess(code);
}
@@ -1113,17 +1153,16 @@ WasmVal ToWebAssemblyValue(Isolate* isolate, Handle<Object> value,
class ThreadImpl {
struct Activation {
uint32_t fp;
- uint32_t sp;
- Activation(uint32_t fp, uint32_t sp) : fp(fp), sp(sp) {}
+ sp_t sp;
+ Activation(uint32_t fp, sp_t sp) : fp(fp), sp(sp) {}
};
public:
ThreadImpl(Zone* zone, CodeMap* codemap, WasmInstance* instance)
: codemap_(codemap),
instance_(instance),
- stack_(zone),
+ zone_(zone),
frames_(zone),
- blocks_(zone),
activations_(zone) {}
//==========================================================================
@@ -1135,9 +1174,9 @@ class ThreadImpl {
void InitFrame(const WasmFunction* function, WasmVal* args) {
DCHECK_EQ(current_activation().fp, frames_.size());
InterpreterCode* code = codemap()->GetCode(function);
- for (size_t i = 0; i < function->sig->parameter_count(); ++i) {
- stack_.push_back(args[i]);
- }
+ size_t num_params = function->sig->parameter_count();
+ EnsureStackSpace(num_params);
+ Push(args, num_params);
PushFrame(code);
}
@@ -1164,7 +1203,7 @@ class ThreadImpl {
void Reset() {
TRACE("----- RESET -----\n");
- stack_.clear();
+ sp_ = stack_start_;
frames_.clear();
state_ = WasmInterpreter::STOPPED;
trap_reason_ = kTrapCount;
@@ -1176,27 +1215,23 @@ class ThreadImpl {
return static_cast<int>(frames_.size());
}
- template <typename FrameCons>
- InterpretedFrame GetMutableFrame(int index, FrameCons frame_cons) {
- DCHECK_LE(0, index);
- DCHECK_GT(frames_.size(), index);
- Frame* frame = &frames_[index];
- DCHECK_GE(kMaxInt, frame->pc);
- DCHECK_GE(kMaxInt, frame->sp);
- DCHECK_GE(kMaxInt, frame->llimit());
- return frame_cons(frame->code->function, static_cast<int>(frame->pc),
- static_cast<int>(frame->sp),
- static_cast<int>(frame->llimit()));
- }
-
WasmVal GetReturnValue(uint32_t index) {
if (state_ == WasmInterpreter::TRAPPED) return WasmVal(0xdeadbeef);
DCHECK_EQ(WasmInterpreter::FINISHED, state_);
Activation act = current_activation();
// Current activation must be finished.
DCHECK_EQ(act.fp, frames_.size());
- DCHECK_GT(stack_.size(), act.sp + index);
- return stack_[act.sp + index];
+ return GetStackValue(act.sp + index);
+ }
+
+ WasmVal GetStackValue(sp_t index) {
+ DCHECK_GT(StackHeight(), index);
+ return stack_start_[index];
+ }
+
+ void SetStackValue(sp_t index, WasmVal value) {
+ DCHECK_GT(StackHeight(), index);
+ stack_start_[index] = value;
}
TrapReason GetTrapReason() { return trap_reason_; }
@@ -1219,10 +1254,10 @@ class ThreadImpl {
TRACE("----- START ACTIVATION %zu -----\n", activations_.size());
// If you use activations, use them consistently:
DCHECK_IMPLIES(activations_.empty(), frames_.empty());
- DCHECK_IMPLIES(activations_.empty(), stack_.empty());
+ DCHECK_IMPLIES(activations_.empty(), StackHeight() == 0);
uint32_t activation_id = static_cast<uint32_t>(activations_.size());
activations_.emplace_back(static_cast<uint32_t>(frames_.size()),
- static_cast<uint32_t>(stack_.size()));
+ StackHeight());
state_ = WasmInterpreter::STOPPED;
return activation_id;
}
@@ -1234,8 +1269,8 @@ class ThreadImpl {
// Stack height must match the start of this activation (otherwise unwind
// first).
DCHECK_EQ(activations_.back().fp, frames_.size());
- DCHECK_LE(activations_.back().sp, stack_.size());
- stack_.resize(activations_.back().sp);
+ DCHECK_LE(activations_.back().sp, StackHeight());
+ sp_ = stack_start_ + activations_.back().sp;
activations_.pop_back();
}
@@ -1256,8 +1291,8 @@ class ThreadImpl {
Activation& act = activations_.back();
DCHECK_LE(act.fp, frames_.size());
frames_.resize(act.fp);
- DCHECK_LE(act.sp, stack_.size());
- stack_.resize(act.sp);
+ DCHECK_LE(act.sp, StackHeight());
+ sp_ = stack_start_ + act.sp;
state_ = WasmInterpreter::STOPPED;
return WasmInterpreter::Thread::UNWOUND;
}
@@ -1282,11 +1317,15 @@ class ThreadImpl {
unsigned arity;
};
+ friend class InterpretedFrameImpl;
+
CodeMap* codemap_;
WasmInstance* instance_;
- ZoneVector<WasmVal> stack_;
+ Zone* zone_;
+ WasmVal* stack_start_ = nullptr; // Start of allocated stack space.
+ WasmVal* stack_limit_ = nullptr; // End of allocated stack space.
+ WasmVal* sp_ = nullptr; // Current stack pointer.
ZoneVector<Frame> frames_;
- ZoneVector<Block> blocks_;
WasmInterpreter::State state_ = WasmInterpreter::STOPPED;
pc_t break_pc_ = kInvalidPc;
TrapReason trap_reason_ = kTrapCount;
@@ -1310,14 +1349,15 @@ class ThreadImpl {
// Push a frame with arguments already on the stack.
void PushFrame(InterpreterCode* code) {
DCHECK_NOT_NULL(code);
+ DCHECK_NOT_NULL(code->side_table);
+ EnsureStackSpace(code->side_table->max_stack_height_ +
+ code->locals.type_list.size());
+
++num_interpreted_calls_;
size_t arity = code->function->sig->parameter_count();
// The parameters will overlap the arguments already on the stack.
- DCHECK_GE(stack_.size(), arity);
- frames_.push_back({code, 0, stack_.size() - arity});
- blocks_.push_back(
- {0, stack_.size(), frames_.size(),
- static_cast<uint32_t>(code->function->sig->return_count())});
+ DCHECK_GE(StackHeight(), arity);
+ frames_.push_back({code, 0, StackHeight() - arity});
frames_.back().pc = InitLocals(code);
TRACE(" => PushFrame #%zu (#%u @%zu)\n", frames_.size() - 1,
code->function->func_index, frames_.back().pc);
@@ -1337,7 +1377,7 @@ class ThreadImpl {
UNREACHABLE();
break;
}
- stack_.push_back(val);
+ Push(val);
}
return code->locals.encoded_size;
}
@@ -1356,16 +1396,15 @@ class ThreadImpl {
return false;
}
- int LookupTarget(InterpreterCode* code, pc_t pc) {
- return static_cast<int>(code->targets->Lookup(pc));
+ int LookupTargetDelta(InterpreterCode* code, pc_t pc) {
+ return static_cast<int>(code->side_table->Lookup(pc).pc_diff);
}
int DoBreak(InterpreterCode* code, pc_t pc, size_t depth) {
- size_t bp = blocks_.size() - depth - 1;
- Block* target = &blocks_[bp];
- DoStackTransfer(target->sp, target->arity);
- blocks_.resize(bp);
- return LookupTarget(code, pc);
+ ControlTransferEntry& control_transfer_entry = code->side_table->Lookup(pc);
+ DoStackTransfer(sp_ - control_transfer_entry.sp_diff,
+ control_transfer_entry.target_arity);
+ return control_transfer_entry.pc_diff;
}
pc_t ReturnPc(Decoder* decoder, InterpreterCode* code, pc_t pc) {
@@ -1387,17 +1426,12 @@ class ThreadImpl {
bool DoReturn(Decoder* decoder, InterpreterCode** code, pc_t* pc, pc_t* limit,
size_t arity) {
DCHECK_GT(frames_.size(), 0);
- // Pop all blocks for this frame.
- while (!blocks_.empty() && blocks_.back().fp == frames_.size()) {
- blocks_.pop_back();
- }
-
- sp_t dest = frames_.back().sp;
+ WasmVal* sp_dest = stack_start_ + frames_.back().sp;
frames_.pop_back();
if (frames_.size() == current_activation().fp) {
// A return from the last frame terminates the execution.
state_ = WasmInterpreter::FINISHED;
- DoStackTransfer(dest, arity);
+ DoStackTransfer(sp_dest, arity);
TRACE(" => finish\n");
return false;
} else {
@@ -1409,7 +1443,7 @@ class ThreadImpl {
*limit = top->code->end - top->code->start;
TRACE(" => Return to #%zu (#%u @%zu)\n", frames_.size() - 1,
(*code)->function->func_index, *pc);
- DoStackTransfer(dest, arity);
+ DoStackTransfer(sp_dest, arity);
return true;
}
}
@@ -1429,19 +1463,16 @@ class ThreadImpl {
// Copies {arity} values on the top of the stack down the stack to {dest},
// dropping the values in-between.
- void DoStackTransfer(sp_t dest, size_t arity) {
+ void DoStackTransfer(WasmVal* dest, size_t arity) {
// before: |---------------| pop_count | arity |
- // ^ 0 ^ dest ^ stack_.size()
+ // ^ 0 ^ dest ^ sp_
//
// after: |---------------| arity |
- // ^ 0 ^ stack_.size()
- DCHECK_LE(dest, stack_.size());
- DCHECK_LE(dest + arity, stack_.size());
- size_t pop_count = stack_.size() - dest - arity;
- for (size_t i = 0; i < arity; i++) {
- stack_[dest + i] = stack_[dest + pop_count + i];
- }
- stack_.resize(stack_.size() - pop_count);
+ // ^ 0 ^ sp_
+ DCHECK_LE(dest, sp_);
+ DCHECK_LE(dest + arity, sp_);
+ if (arity) memcpy(dest, sp_ - arity, arity * sizeof(*sp_));
+ sp_ = dest + arity;
}
template <typename mtype>
@@ -1461,7 +1492,7 @@ class ThreadImpl {
byte* addr = instance()->mem_start + operand.offset + index;
WasmVal result(static_cast<ctype>(ReadLittleEndianValue<mtype>(addr)));
- Push(pc, result);
+ Push(result);
len = 1 + operand.length;
return true;
}
@@ -1514,6 +1545,15 @@ class ThreadImpl {
}
void Execute(InterpreterCode* code, pc_t pc, int max) {
+ DCHECK_NOT_NULL(code->side_table);
+ DCHECK(!frames_.empty());
+ // There must be enough space on the stack to hold the arguments, locals,
+ // and the value stack.
+ DCHECK_LE(code->function->sig->parameter_count() +
+ code->locals.type_list.size() +
+ code->side_table->max_stack_height_,
+ stack_limit_ - stack_start_ - frames_.back().sp);
+
Decoder decoder(code->start, code->end);
pc_t limit = code->end - code->start;
bool hit_break = false;
@@ -1558,18 +1598,26 @@ class ThreadImpl {
TraceValueStack();
TRACE("\n");
+#ifdef DEBUG
+ // Compute the stack effect of this opcode, and verify later that the
+ // stack was modified accordingly.
+ std::pair<uint32_t, uint32_t> stack_effect = wasm::StackEffect(
+ codemap_->module(), frames_.back().code->function->sig,
+ code->orig_start + pc, code->orig_end);
+ sp_t expected_new_stack_height =
+ StackHeight() - stack_effect.first + stack_effect.second;
+#endif
+
switch (orig) {
case kExprNop:
break;
case kExprBlock: {
BlockTypeOperand<false> operand(&decoder, code->at(pc));
- blocks_.push_back({pc, stack_.size(), frames_.size(), operand.arity});
len = 1 + operand.length;
break;
}
case kExprLoop: {
BlockTypeOperand<false> operand(&decoder, code->at(pc));
- blocks_.push_back({pc, stack_.size(), frames_.size(), 0});
len = 1 + operand.length;
break;
}
@@ -1577,20 +1625,18 @@ class ThreadImpl {
BlockTypeOperand<false> operand(&decoder, code->at(pc));
WasmVal cond = Pop();
bool is_true = cond.to<uint32_t>() != 0;
- blocks_.push_back({pc, stack_.size(), frames_.size(), operand.arity});
if (is_true) {
// fall through to the true block.
len = 1 + operand.length;
TRACE(" true => fallthrough\n");
} else {
- len = LookupTarget(code, pc);
+ len = LookupTargetDelta(code, pc);
TRACE(" false => @%zu\n", pc + len);
}
break;
}
case kExprElse: {
- blocks_.pop_back();
- len = LookupTarget(code, pc);
+ len = LookupTargetDelta(code, pc);
TRACE(" end => @%zu\n", pc + len);
break;
}
@@ -1598,7 +1644,7 @@ class ThreadImpl {
WasmVal cond = Pop();
WasmVal fval = Pop();
WasmVal tval = Pop();
- Push(pc, cond.to<int32_t>() != 0 ? tval : fval);
+ Push(cond.to<int32_t>() != 0 ? tval : fval);
break;
}
case kExprBr: {
@@ -1644,51 +1690,50 @@ class ThreadImpl {
return DoTrap(kTrapUnreachable, pc);
}
case kExprEnd: {
- blocks_.pop_back();
break;
}
case kExprI32Const: {
ImmI32Operand<false> operand(&decoder, code->at(pc));
- Push(pc, WasmVal(operand.value));
+ Push(WasmVal(operand.value));
len = 1 + operand.length;
break;
}
case kExprI64Const: {
ImmI64Operand<false> operand(&decoder, code->at(pc));
- Push(pc, WasmVal(operand.value));
+ Push(WasmVal(operand.value));
len = 1 + operand.length;
break;
}
case kExprF32Const: {
ImmF32Operand<false> operand(&decoder, code->at(pc));
- Push(pc, WasmVal(operand.value));
+ Push(WasmVal(operand.value));
len = 1 + operand.length;
break;
}
case kExprF64Const: {
ImmF64Operand<false> operand(&decoder, code->at(pc));
- Push(pc, WasmVal(operand.value));
+ Push(WasmVal(operand.value));
len = 1 + operand.length;
break;
}
case kExprGetLocal: {
LocalIndexOperand<false> operand(&decoder, code->at(pc));
- Push(pc, stack_[frames_.back().sp + operand.index]);
+ Push(GetStackValue(frames_.back().sp + operand.index));
len = 1 + operand.length;
break;
}
case kExprSetLocal: {
LocalIndexOperand<false> operand(&decoder, code->at(pc));
WasmVal val = Pop();
- stack_[frames_.back().sp + operand.index] = val;
+ SetStackValue(frames_.back().sp + operand.index, val);
len = 1 + operand.length;
break;
}
case kExprTeeLocal: {
LocalIndexOperand<false> operand(&decoder, code->at(pc));
WasmVal val = Pop();
- stack_[frames_.back().sp + operand.index] = val;
- Push(pc, val);
+ SetStackValue(frames_.back().sp + operand.index, val);
+ Push(val);
len = 1 + operand.length;
break;
}
@@ -1770,7 +1815,7 @@ class ThreadImpl {
default:
UNREACHABLE();
}
- Push(pc, val);
+ Push(val);
len = 1 + operand.length;
break;
}
@@ -1843,7 +1888,7 @@ class ThreadImpl {
/* TODO(titzer): alignment for asmjs load mem? */ \
result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \
} \
- Push(pc, WasmVal(result)); \
+ Push(WasmVal(result)); \
break; \
}
ASMJS_LOAD_CASE(I32AsmjsLoadMem8S, int32_t, int8_t, 0);
@@ -1866,7 +1911,7 @@ class ThreadImpl {
/* TODO(titzer): alignment for asmjs store mem? */ \
*(reinterpret_cast<mtype*>(addr)) = static_cast<mtype>(val.to<ctype>()); \
} \
- Push(pc, val); \
+ Push(val); \
break; \
}
@@ -1879,15 +1924,15 @@ class ThreadImpl {
case kExprGrowMemory: {
MemoryIndexOperand<false> operand(&decoder, code->at(pc));
uint32_t delta_pages = Pop().to<uint32_t>();
- Push(pc, WasmVal(ExecuteGrowMemory(
- delta_pages, codemap_->maybe_instance(), instance())));
+ Push(WasmVal(ExecuteGrowMemory(
+ delta_pages, codemap_->maybe_instance(), instance())));
len = 1 + operand.length;
break;
}
case kExprMemorySize: {
MemoryIndexOperand<false> operand(&decoder, code->at(pc));
- Push(pc, WasmVal(static_cast<uint32_t>(instance()->mem_size /
- WasmModule::kPageSize)));
+ Push(WasmVal(static_cast<uint32_t>(instance()->mem_size /
+ WasmModule::kPageSize)));
len = 1 + operand.length;
break;
}
@@ -1896,15 +1941,13 @@ class ThreadImpl {
// ia32 by the reinterpret casts.
case kExprI32ReinterpretF32: {
WasmVal val = Pop();
- WasmVal result(ExecuteI32ReinterpretF32(val));
- Push(pc, result);
+ Push(WasmVal(ExecuteI32ReinterpretF32(val)));
possible_nondeterminism_ |= std::isnan(val.to<float>());
break;
}
case kExprI64ReinterpretF64: {
WasmVal val = Pop();
- WasmVal result(ExecuteI64ReinterpretF64(val));
- Push(pc, result);
+ Push(WasmVal(ExecuteI64ReinterpretF64(val)));
possible_nondeterminism_ |= std::isnan(val.to<double>());
break;
}
@@ -1913,7 +1956,7 @@ class ThreadImpl {
WasmVal rval = Pop(); \
WasmVal lval = Pop(); \
WasmVal result(lval.to<ctype>() op rval.to<ctype>()); \
- Push(pc, result); \
+ Push(result); \
break; \
}
FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
@@ -1926,7 +1969,7 @@ class ThreadImpl {
volatile ctype lval = Pop().to<ctype>(); \
WasmVal result(Execute##name(lval, rval, &trap)); \
if (trap != kTrapCount) return DoTrap(trap, pc); \
- Push(pc, result); \
+ Push(result); \
break; \
}
FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
@@ -1939,7 +1982,7 @@ class ThreadImpl {
volatile float rval = Pop().to<float>();
volatile float lval = Pop().to<float>();
WasmVal result(ExecuteF32CopySign(lval, rval, &trap));
- Push(pc, result);
+ Push(result);
possible_nondeterminism_ |= std::isnan(rval);
break;
}
@@ -1950,7 +1993,7 @@ class ThreadImpl {
volatile double rval = Pop().to<double>();
volatile double lval = Pop().to<double>();
WasmVal result(ExecuteF64CopySign(lval, rval, &trap));
- Push(pc, result);
+ Push(result);
possible_nondeterminism_ |= std::isnan(rval);
break;
}
@@ -1960,7 +2003,7 @@ class ThreadImpl {
volatile ctype val = Pop().to<ctype>(); \
WasmVal result(Execute##name(val, &trap)); \
if (trap != kTrapCount) return DoTrap(trap, pc); \
- Push(pc, result); \
+ Push(result); \
break; \
}
FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
@@ -1972,6 +2015,12 @@ class ThreadImpl {
UNREACHABLE();
}
+#ifdef DEBUG
+ if (!WasmOpcodes::IsControlOpcode(static_cast<WasmOpcode>(opcode))) {
+ DCHECK_EQ(expected_new_stack_height, StackHeight());
+ }
+#endif
+
pc += len;
if (pc == limit) {
// Fell off end of code; do an implicit return.
@@ -1989,20 +2038,17 @@ class ThreadImpl {
}
WasmVal Pop() {
- DCHECK_GT(stack_.size(), 0);
DCHECK_GT(frames_.size(), 0);
- DCHECK_GT(stack_.size(), frames_.back().llimit()); // can't pop into locals
- WasmVal val = stack_.back();
- stack_.pop_back();
- return val;
+ DCHECK_GT(StackHeight(), frames_.back().llimit()); // can't pop into locals
+ return *--sp_;
}
void PopN(int n) {
- DCHECK_GE(stack_.size(), n);
+ DCHECK_GE(StackHeight(), n);
DCHECK_GT(frames_.size(), 0);
- size_t nsize = stack_.size() - n;
- DCHECK_GE(nsize, frames_.back().llimit()); // can't pop into locals
- stack_.resize(nsize);
+ // Check that we don't pop into locals.
+ DCHECK_GE(StackHeight() - n, frames_.back().llimit());
+ sp_ -= n;
}
WasmVal PopArity(size_t arity) {
@@ -2011,12 +2057,36 @@ class ThreadImpl {
return Pop();
}
- void Push(pc_t pc, WasmVal val) {
- // TODO(titzer): store PC as well?
+ void Push(WasmVal val) {
DCHECK_NE(kWasmStmt, val.type);
- stack_.push_back(val);
+ DCHECK_LE(1, stack_limit_ - sp_);
+ *sp_++ = val;
}
+ void Push(WasmVal* vals, size_t arity) {
+ DCHECK_LE(arity, stack_limit_ - sp_);
+ for (WasmVal *val = vals, *end = vals + arity; val != end; ++val) {
+ DCHECK_NE(kWasmStmt, val->type);
+ }
+ memcpy(sp_, vals, arity * sizeof(*sp_));
+ sp_ += arity;
+ }
+
+ void EnsureStackSpace(size_t size) {
+ if (V8_LIKELY(static_cast<size_t>(stack_limit_ - sp_) >= size)) return;
+ size_t old_size = stack_limit_ - stack_start_;
+ size_t requested_size =
+ base::bits::RoundUpToPowerOfTwo64((sp_ - stack_start_) + size);
+ size_t new_size = Max(size_t{8}, Max(2 * old_size, requested_size));
+ WasmVal* new_stack = zone_->NewArray<WasmVal>(new_size);
+ memcpy(new_stack, stack_start_, old_size * sizeof(*sp_));
+ sp_ = new_stack + (sp_ - stack_start_);
+ stack_start_ = new_stack;
+ stack_limit_ = new_stack + new_size;
+ }
+
+ sp_t StackHeight() { return sp_ - stack_start_; }
+
void TraceStack(const char* phase, pc_t pc) {
if (FLAG_trace_wasm_interpreter) {
PrintF("%s @%zu", phase, pc);
@@ -2027,39 +2097,38 @@ class ThreadImpl {
void TraceValueStack() {
#ifdef DEBUG
+ if (!FLAG_trace_wasm_interpreter) return;
Frame* top = frames_.size() > 0 ? &frames_.back() : nullptr;
sp_t sp = top ? top->sp : 0;
sp_t plimit = top ? top->plimit() : 0;
sp_t llimit = top ? top->llimit() : 0;
- if (FLAG_trace_wasm_interpreter) {
- for (size_t i = sp; i < stack_.size(); ++i) {
- if (i < plimit)
- PrintF(" p%zu:", i);
- else if (i < llimit)
- PrintF(" l%zu:", i);
- else
- PrintF(" s%zu:", i);
- WasmVal val = stack_[i];
- switch (val.type) {
- case kWasmI32:
- PrintF("i32:%d", val.to<int32_t>());
- break;
- case kWasmI64:
- PrintF("i64:%" PRId64 "", val.to<int64_t>());
- break;
- case kWasmF32:
- PrintF("f32:%f", val.to<float>());
- break;
- case kWasmF64:
- PrintF("f64:%lf", val.to<double>());
- break;
- case kWasmStmt:
- PrintF("void");
- break;
- default:
- UNREACHABLE();
- break;
- }
+ for (size_t i = sp; i < StackHeight(); ++i) {
+ if (i < plimit)
+ PrintF(" p%zu:", i);
+ else if (i < llimit)
+ PrintF(" l%zu:", i);
+ else
+ PrintF(" s%zu:", i);
+ WasmVal val = GetStackValue(i);
+ switch (val.type) {
+ case kWasmI32:
+ PrintF("i32:%d", val.to<int32_t>());
+ break;
+ case kWasmI64:
+ PrintF("i64:%" PRId64 "", val.to<int64_t>());
+ break;
+ case kWasmF32:
+ PrintF("f32:%f", val.to<float>());
+ break;
+ case kWasmF64:
+ PrintF("f64:%lf", val.to<double>());
+ break;
+ case kWasmStmt:
+ PrintF("void");
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
}
#endif // DEBUG
@@ -2112,7 +2181,7 @@ class ThreadImpl {
// Get all arguments as JS values.
std::vector<Handle<Object>> args;
args.reserve(num_args);
- WasmVal* wasm_args = stack_.data() + (stack_.size() - num_args);
+ WasmVal* wasm_args = sp_ - num_args;
for (int i = 0; i < num_args; ++i) {
args.push_back(WasmValToNumber(isolate->factory(), wasm_args[i],
signature->GetParam(i)));
@@ -2132,12 +2201,11 @@ class ThreadImpl {
Handle<Object> retval = maybe_retval.ToHandleChecked();
// Pop arguments off the stack.
- stack_.resize(stack_.size() - num_args);
+ sp_ -= num_args;
if (signature->return_count() > 0) {
// TODO(wasm): Handle multiple returns.
DCHECK_EQ(1, signature->return_count());
- stack_.push_back(
- ToWebAssemblyValue(isolate, retval, signature->GetReturn()));
+ Push(ToWebAssemblyValue(isolate, retval, signature->GetReturn()));
}
return {ExternalCallResult::EXTERNAL_RETURNED};
}
@@ -2157,7 +2225,8 @@ class ThreadImpl {
ExternalCallResult CallIndirectFunction(uint32_t table_index,
uint32_t entry_index,
uint32_t sig_index) {
- if (!codemap()->has_instance()) {
+ if (!codemap()->has_instance() ||
+ !codemap()->instance()->compiled_module()->has_function_tables()) {
// No instance. Rely on the information stored in the WasmModule.
// TODO(wasm): This is only needed for testing. Refactor testing to use
// the same paths as production.
@@ -2234,6 +2303,68 @@ class ThreadImpl {
}
};
+class InterpretedFrameImpl {
+ public:
+ InterpretedFrameImpl(ThreadImpl* thread, int index)
+ : thread_(thread), index_(index) {
+ DCHECK_LE(0, index);
+ }
+
+ const WasmFunction* function() const { return frame()->code->function; }
+
+ int pc() const {
+ DCHECK_LE(0, frame()->pc);
+ DCHECK_GE(kMaxInt, frame()->pc);
+ return static_cast<int>(frame()->pc);
+ }
+
+ int GetParameterCount() const {
+ DCHECK_GE(kMaxInt, function()->sig->parameter_count());
+ return static_cast<int>(function()->sig->parameter_count());
+ }
+
+ int GetLocalCount() const {
+ size_t num_locals = function()->sig->parameter_count() +
+ frame()->code->locals.type_list.size();
+ DCHECK_GE(kMaxInt, num_locals);
+ return static_cast<int>(num_locals);
+ }
+
+ int GetStackHeight() const {
+ bool is_top_frame =
+ static_cast<size_t>(index_) + 1 == thread_->frames_.size();
+ size_t stack_limit =
+ is_top_frame ? thread_->StackHeight() : thread_->frames_[index_ + 1].sp;
+ DCHECK_LE(frame()->sp, stack_limit);
+ size_t frame_size = stack_limit - frame()->sp;
+ DCHECK_LE(GetLocalCount(), frame_size);
+ return static_cast<int>(frame_size) - GetLocalCount();
+ }
+
+ WasmVal GetLocalValue(int index) const {
+ DCHECK_LE(0, index);
+ DCHECK_GT(GetLocalCount(), index);
+ return thread_->GetStackValue(static_cast<int>(frame()->sp) + index);
+ }
+
+ WasmVal GetStackValue(int index) const {
+ DCHECK_LE(0, index);
+ // Index must be within the number of stack values of this frame.
+ DCHECK_GT(GetStackHeight(), index);
+ return thread_->GetStackValue(static_cast<int>(frame()->sp) +
+ GetLocalCount() + index);
+ }
+
+ private:
+ ThreadImpl* thread_;
+ int index_;
+
+ ThreadImpl::Frame* frame() const {
+ DCHECK_GT(thread_->frames_.size(), index_);
+ return &thread_->frames_[index_];
+ }
+};
+
// Converters between WasmInterpreter::Thread and WasmInterpreter::ThreadImpl.
// Thread* is the public interface, without knowledge of the object layout.
// This cast is potentially risky, but as long as we always cast it back before
@@ -2245,6 +2376,14 @@ ThreadImpl* ToImpl(WasmInterpreter::Thread* thread) {
return reinterpret_cast<ThreadImpl*>(thread);
}
+// Same conversion for InterpretedFrame and InterpretedFrameImpl.
+InterpretedFrame* ToFrame(InterpretedFrameImpl* impl) {
+ return reinterpret_cast<InterpretedFrame*>(impl);
+}
+const InterpretedFrameImpl* ToImpl(const InterpretedFrame* frame) {
+ return reinterpret_cast<const InterpretedFrameImpl*>(frame);
+}
+
} // namespace
//============================================================================
@@ -2275,16 +2414,11 @@ pc_t WasmInterpreter::Thread::GetBreakpointPc() {
int WasmInterpreter::Thread::GetFrameCount() {
return ToImpl(this)->GetFrameCount();
}
-const InterpretedFrame WasmInterpreter::Thread::GetFrame(int index) {
- return GetMutableFrame(index);
-}
-InterpretedFrame WasmInterpreter::Thread::GetMutableFrame(int index) {
- // We have access to the constructor of InterpretedFrame, but ThreadImpl has
- // not. So pass it as a lambda (should all get inlined).
- auto frame_cons = [](const WasmFunction* function, int pc, int fp, int sp) {
- return InterpretedFrame(function, pc, fp, sp);
- };
- return ToImpl(this)->GetMutableFrame(index, frame_cons);
+std::unique_ptr<InterpretedFrame> WasmInterpreter::Thread::GetFrame(int index) {
+ DCHECK_LE(0, index);
+ DCHECK_GT(GetFrameCount(), index);
+ return std::unique_ptr<InterpretedFrame>(
+ ToFrame(new InterpretedFrameImpl(ToImpl(this), index)));
}
WasmVal WasmInterpreter::Thread::GetReturnValue(int index) {
return ToImpl(this)->GetReturnValue(index);
@@ -2416,6 +2550,11 @@ void WasmInterpreter::WriteMemory(size_t offset, WasmVal val) {
UNIMPLEMENTED();
}
+void WasmInterpreter::UpdateMemory(byte* mem_start, uint32_t mem_size) {
+ internals_->instance_->mem_start = mem_start;
+ internals_->instance_->mem_size = mem_size;
+}
+
void WasmInterpreter::AddFunctionForTesting(const WasmFunction* function) {
internals_->codemap_.AddFunction(function, nullptr, nullptr);
}
@@ -2427,39 +2566,41 @@ void WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
}
ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
- Zone* zone, const byte* start, const byte* end) {
- ControlTransfers targets(zone, nullptr, start, end);
- return targets.map_;
+ Zone* zone, const WasmModule* module, const byte* start, const byte* end) {
+ // Create some dummy structures, to avoid special-casing the implementation
+ // just for testing.
+ FunctionSig sig(0, 0, nullptr);
+ WasmFunction function{&sig, 0, 0, 0, 0, 0, 0, false, false};
+ InterpreterCode code{
+ &function, BodyLocalDecls(zone), start, end, nullptr, nullptr, nullptr};
+
+ // Now compute and return the control transfers.
+ SideTable side_table(zone, module, &code);
+ return side_table.map_;
}
//============================================================================
// Implementation of the frame inspection interface.
//============================================================================
+const WasmFunction* InterpretedFrame::function() const {
+ return ToImpl(this)->function();
+}
+int InterpretedFrame::pc() const { return ToImpl(this)->pc(); }
int InterpretedFrame::GetParameterCount() const {
- USE(fp_);
- USE(sp_);
- // TODO(clemensh): Return the correct number of parameters.
- return 0;
+ return ToImpl(this)->GetParameterCount();
}
-
-WasmVal InterpretedFrame::GetLocalVal(int index) const {
- CHECK_GE(index, 0);
- UNIMPLEMENTED();
- WasmVal none;
- none.type = kWasmStmt;
- return none;
+int InterpretedFrame::GetLocalCount() const {
+ return ToImpl(this)->GetLocalCount();
}
-
-WasmVal InterpretedFrame::GetExprVal(int pc) const {
- UNIMPLEMENTED();
- WasmVal none;
- none.type = kWasmStmt;
- return none;
+int InterpretedFrame::GetStackHeight() const {
+ return ToImpl(this)->GetStackHeight();
+}
+WasmVal InterpretedFrame::GetLocalValue(int index) const {
+ return ToImpl(this)->GetLocalValue(index);
+}
+WasmVal InterpretedFrame::GetStackValue(int index) const {
+ return ToImpl(this)->GetStackValue(index);
}
-
-void InterpretedFrame::SetLocalVal(int index, WasmVal val) { UNIMPLEMENTED(); }
-
-void InterpretedFrame::SetExprVal(int pc, WasmVal val) { UNIMPLEMENTED(); }
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/wasm-interpreter.h b/deps/v8/src/wasm/wasm-interpreter.h
index 76845dc500..1259f09ff2 100644
--- a/deps/v8/src/wasm/wasm-interpreter.h
+++ b/deps/v8/src/wasm/wasm-interpreter.h
@@ -21,16 +21,27 @@ namespace wasm {
// forward declarations.
struct ModuleBytesEnv;
struct WasmFunction;
+struct WasmModule;
class WasmInterpreterInternals;
-typedef size_t pc_t;
-typedef size_t sp_t;
-typedef int32_t pcdiff_t;
-typedef uint32_t spdiff_t;
-
-const pc_t kInvalidPc = 0x80000000;
+using pc_t = size_t;
+using sp_t = size_t;
+using pcdiff_t = int32_t;
+using spdiff_t = uint32_t;
+
+constexpr pc_t kInvalidPc = 0x80000000;
+
+struct ControlTransferEntry {
+ // Distance from the instruction to the label to jump to (forward, but can be
+ // negative).
+ pcdiff_t pc_diff;
+ // Delta by which to decrease the stack height.
+ spdiff_t sp_diff;
+ // Arity of the block we jump to.
+ uint32_t target_arity;
+};
-typedef ZoneMap<pc_t, pcdiff_t> ControlTransferMap;
+using ControlTransferMap = ZoneMap<pc_t, ControlTransferEntry>;
// Macro for defining union members.
#define FOREACH_UNION_MEMBER(V) \
@@ -57,55 +68,74 @@ struct WasmVal {
FOREACH_UNION_MEMBER(DECLARE_CONSTRUCTOR)
#undef DECLARE_CONSTRUCTOR
+ bool operator==(const WasmVal& other) const {
+ if (type != other.type) return false;
+#define CHECK_VAL_EQ(field, localtype, ctype) \
+ if (type == localtype) { \
+ return val.field == other.val.field; \
+ }
+ FOREACH_UNION_MEMBER(CHECK_VAL_EQ)
+#undef CHECK_VAL_EQ
+ UNREACHABLE();
+ return false;
+ }
+
template <typename T>
- inline T to() {
+ inline T to() const {
UNREACHABLE();
}
template <typename T>
- inline T to_unchecked() {
+ inline T to_unchecked() const {
UNREACHABLE();
}
};
-#define DECLARE_CAST(field, localtype, ctype) \
- template <> \
- inline ctype WasmVal::to_unchecked() { \
- return val.field; \
- } \
- template <> \
- inline ctype WasmVal::to() { \
- CHECK_EQ(localtype, type); \
- return val.field; \
+#define DECLARE_CAST(field, localtype, ctype) \
+ template <> \
+ inline ctype WasmVal::to_unchecked() const { \
+ return val.field; \
+ } \
+ template <> \
+ inline ctype WasmVal::to() const { \
+ CHECK_EQ(localtype, type); \
+ return val.field; \
}
FOREACH_UNION_MEMBER(DECLARE_CAST)
#undef DECLARE_CAST
// Representation of frames within the interpreter.
+//
+// Layout of a frame:
+// -----------------
+// stack slot #N ‾\.
+// ... | stack entries: GetStackHeight(); GetStackValue()
+// stack slot #0 _/·
+// local #L ‾\.
+// ... | locals: GetLocalCount(); GetLocalValue()
+// local #P+1 |
+// param #P | ‾\.
+// ... | | parameters: GetParameterCount(); GetLocalValue()
+// param #0 _/· _/·
+// -----------------
+//
class InterpretedFrame {
public:
- const WasmFunction* function() const { return function_; }
- int pc() const { return pc_; }
+ const WasmFunction* function() const;
+ int pc() const;
- //==========================================================================
- // Stack frame inspection.
- //==========================================================================
int GetParameterCount() const;
- WasmVal GetLocalVal(int index) const;
- WasmVal GetExprVal(int pc) const;
- void SetLocalVal(int index, WasmVal val);
- void SetExprVal(int pc, WasmVal val);
+ int GetLocalCount() const;
+ int GetStackHeight() const;
+ WasmVal GetLocalValue(int index) const;
+ WasmVal GetStackValue(int index) const;
private:
friend class WasmInterpreter;
-
- InterpretedFrame(const WasmFunction* function, int pc, int fp, int sp)
- : function_(function), pc_(pc), fp_(fp), sp_(sp) {}
-
- const WasmFunction* function_;
- int pc_;
- int fp_;
- int sp_;
+ // Don't instante InterpretedFrames; they will be allocated as
+ // InterpretedFrameImpl in the interpreter implementation.
+ InterpretedFrame() = delete;
+ DISALLOW_COPY_AND_ASSIGN(InterpretedFrame);
};
// An interpreter capable of executing WASM.
@@ -154,8 +184,8 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
pc_t GetBreakpointPc();
// TODO(clemensh): Make this uint32_t.
int GetFrameCount();
- const InterpretedFrame GetFrame(int index);
- InterpretedFrame GetMutableFrame(int index);
+ // The InterpretedFrame is only valid as long as the Thread is paused.
+ std::unique_ptr<InterpretedFrame> GetFrame(int index);
WasmVal GetReturnValue(int index = 0);
TrapReason GetTrapReason();
@@ -226,6 +256,8 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
size_t GetMemorySize();
WasmVal ReadMemory(size_t offset);
void WriteMemory(size_t offset, WasmVal val);
+ // Update the memory region, e.g. after external GrowMemory.
+ void UpdateMemory(byte* mem_start, uint32_t mem_size);
//==========================================================================
// Testing functionality.
@@ -239,9 +271,8 @@ class V8_EXPORT_PRIVATE WasmInterpreter {
// Computes the control transfers for the given bytecode. Used internally in
// the interpreter, but exposed for testing.
- static ControlTransferMap ComputeControlTransfersForTesting(Zone* zone,
- const byte* start,
- const byte* end);
+ static ControlTransferMap ComputeControlTransfersForTesting(
+ Zone* zone, const WasmModule* module, const byte* start, const byte* end);
private:
Zone zone_;
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index ab6c71fd37..3dde623594 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -4,9 +4,6 @@
#include "src/api-natives.h"
#include "src/api.h"
-#include "src/asmjs/asm-js.h"
-#include "src/asmjs/asm-typer.h"
-#include "src/asmjs/asm-wasm-builder.h"
#include "src/assert-scope.h"
#include "src/ast/ast.h"
#include "src/execution.h"
@@ -32,6 +29,17 @@ namespace v8 {
namespace {
+#define ASSIGN(type, var, expr) \
+ Local<type> var; \
+ do { \
+ if (!expr.ToLocal(&var)) { \
+ DCHECK(i_isolate->has_scheduled_exception()); \
+ return; \
+ } else { \
+ DCHECK(!i_isolate->has_scheduled_exception()); \
+ } \
+ } while (false)
+
// TODO(wasm): move brand check to the respective types, and don't throw
// in it, rather, use a provided ErrorThrower, or let caller handle it.
static bool HasBrand(i::Handle<i::Object> value, i::Handle<i::Symbol> sym) {
@@ -117,31 +125,58 @@ i::wasm::ModuleWireBytes GetFirstArgumentAsBytes(
return i::wasm::ModuleWireBytes(start, start + length);
}
-i::MaybeHandle<i::JSReceiver> GetSecondArgumentAsImports(
- const v8::FunctionCallbackInfo<v8::Value>& args, ErrorThrower* thrower) {
- if (args.Length() < 2) return {};
- if (args[1]->IsUndefined()) return {};
+i::MaybeHandle<i::JSReceiver> GetValueAsImports(Local<Value> arg,
+ ErrorThrower* thrower) {
+ if (arg->IsUndefined()) return {};
- if (!args[1]->IsObject()) {
+ if (!arg->IsObject()) {
thrower->TypeError("Argument 1 must be an object");
return {};
}
- Local<Object> obj = Local<Object>::Cast(args[1]);
+ Local<Object> obj = Local<Object>::Cast(arg);
return i::Handle<i::JSReceiver>::cast(v8::Utils::OpenHandle(*obj));
}
+void RejectResponseAPI(const v8::FunctionCallbackInfo<v8::Value>& args,
+ ErrorThrower* thrower) {
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+
+ HandleScope scope(isolate);
+ Local<Context> context = isolate->GetCurrentContext();
+
+ ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context));
+ Local<Promise> module_promise = resolver->GetPromise();
+ args.GetReturnValue().Set(module_promise);
+ thrower->TypeError(
+ "Argument 0 must be provided and must be a Response or Response promise");
+ auto maybe = resolver->Reject(context, Utils::ToLocal(thrower->Reify()));
+ CHECK_IMPLIES(!maybe.FromMaybe(false), i_isolate->has_scheduled_exception());
+}
+
+void WebAssemblyCompileStreaming(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
+ if (!i_isolate->wasm_compile_callback()(args)) {
+ ErrorThrower thrower(i_isolate, "WebAssembly.compileStreaming()");
+ RejectResponseAPI(args, &thrower);
+ }
+}
+
// WebAssembly.compile(bytes) -> Promise
void WebAssemblyCompile(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
if (i_isolate->wasm_compile_callback()(args)) return;
HandleScope scope(isolate);
ErrorThrower thrower(i_isolate, "WebAssembly.compile()");
Local<Context> context = isolate->GetCurrentContext();
- v8::Local<v8::Promise::Resolver> resolver;
- if (!v8::Promise::Resolver::New(context).ToLocal(&resolver)) return;
+ ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context));
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
return_value.Set(resolver->GetPromise());
@@ -167,11 +202,10 @@ void WebAssemblyValidate(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
if (!thrower.error() &&
- i::wasm::SyncValidate(reinterpret_cast<i::Isolate*>(isolate), &thrower,
- bytes)) {
+ i::wasm::SyncValidate(reinterpret_cast<i::Isolate*>(isolate), bytes)) {
return_value.Set(v8::True(isolate));
} else {
- if (thrower.wasm_error()) thrower.Reify(); // Clear error.
+ if (thrower.wasm_error()) thrower.Reset(); // Clear error.
return_value.Set(v8::False(isolate));
}
}
@@ -253,10 +287,91 @@ void WebAssemblyModuleCustomSections(
args.GetReturnValue().Set(Utils::ToLocal(custom_sections));
}
+MaybeLocal<Value> WebAssemblyInstantiateImpl(Isolate* isolate,
+ Local<Value> module,
+ Local<Value> ffi) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+
+ ErrorThrower thrower(i_isolate, "WebAssembly Instantiation");
+ i::MaybeHandle<i::JSReceiver> maybe_imports =
+ GetValueAsImports(ffi, &thrower);
+ if (thrower.error()) return {};
+
+ i::Handle<i::WasmModuleObject> module_obj =
+ i::Handle<i::WasmModuleObject>::cast(
+ Utils::OpenHandle(Object::Cast(*module)));
+ i::MaybeHandle<i::Object> instance_object =
+ i::wasm::SyncInstantiate(i_isolate, &thrower, module_obj, maybe_imports,
+ i::MaybeHandle<i::JSArrayBuffer>());
+
+ if (instance_object.is_null()) {
+ // TODO(wasm): this *should* mean there's an error to throw, but
+ // we exit sometimes the instantiation pipeline without throwing.
+ // v8:6232.
+ return {};
+ }
+ return Utils::ToLocal(instance_object.ToHandleChecked());
+}
+
+// Entered as internal implementation detail of sync and async instantiate.
+// args[0] *must* be a WebAssembly.Module.
+void WebAssemblyInstantiateImplCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ DCHECK_GE(args.Length(), 1);
+ v8::Isolate* isolate = args.GetIsolate();
+ MicrotasksScope does_not_run_microtasks(isolate,
+ MicrotasksScope::kDoNotRunMicrotasks);
+
+ HandleScope scope(args.GetIsolate());
+ Local<Value> module = args[0];
+ Local<Value> ffi = args.Data();
+ Local<Value> instance;
+ if (WebAssemblyInstantiateImpl(isolate, module, ffi).ToLocal(&instance)) {
+ args.GetReturnValue().Set(instance);
+ }
+}
+
+void WebAssemblyInstantiateToPairCallback(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ DCHECK_GE(args.Length(), 1);
+ Isolate* isolate = args.GetIsolate();
+ MicrotasksScope does_not_run_microtasks(isolate,
+ MicrotasksScope::kDoNotRunMicrotasks);
+
+ HandleScope scope(args.GetIsolate());
+
+ Local<Context> context = isolate->GetCurrentContext();
+ Local<Value> module = args[0];
+
+ const uint8_t* instance_str = reinterpret_cast<const uint8_t*>("instance");
+ const uint8_t* module_str = reinterpret_cast<const uint8_t*>("module");
+ Local<Value> instance;
+ if (!WebAssemblyInstantiateImpl(isolate, module, args.Data())
+ .ToLocal(&instance)) {
+ return;
+ }
+
+ Local<Object> ret = Object::New(isolate);
+ Local<String> instance_name =
+ String::NewFromOneByte(isolate, instance_str,
+ NewStringType::kInternalized)
+ .ToLocalChecked();
+ Local<String> module_name =
+ String::NewFromOneByte(isolate, module_str, NewStringType::kInternalized)
+ .ToLocalChecked();
+
+ CHECK(ret->CreateDataProperty(context, instance_name, instance).IsJust());
+ CHECK(ret->CreateDataProperty(context, module_name, module).IsJust());
+ args.GetReturnValue().Set(ret);
+}
+
// new WebAssembly.Instance(module, imports) -> WebAssembly.Instance
void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ Isolate* isolate = args.GetIsolate();
+ MicrotasksScope does_not_run_microtasks(isolate,
+ MicrotasksScope::kDoNotRunMicrotasks);
+
HandleScope scope(args.GetIsolate());
- v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
if (i_isolate->wasm_instance_callback()(args)) return;
@@ -265,14 +380,41 @@ void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
auto maybe_module = GetFirstArgumentAsModule(args, &thrower);
if (thrower.error()) return;
- auto maybe_imports = GetSecondArgumentAsImports(args, &thrower);
- if (thrower.error()) return;
+ // If args.Length < 2, this will be undefined - see FunctionCallbackInfo.
+ // We'll check for that in WebAssemblyInstantiateImpl.
+ Local<Value> data = args[1];
- i::MaybeHandle<i::Object> instance_object = i::wasm::SyncInstantiate(
- i_isolate, &thrower, maybe_module.ToHandleChecked(), maybe_imports,
- i::MaybeHandle<i::JSArrayBuffer>());
- if (instance_object.is_null()) return;
- args.GetReturnValue().Set(Utils::ToLocal(instance_object.ToHandleChecked()));
+ Local<Value> instance;
+ if (WebAssemblyInstantiateImpl(isolate, args[0], data).ToLocal(&instance)) {
+ args.GetReturnValue().Set(instance);
+ }
+}
+
+void WebAssemblyInstantiateStreaming(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Isolate* isolate = args.GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ // we use i_isolate in DCHECKS in the ASSIGN statements.
+ USE(i_isolate);
+ MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
+ HandleScope scope(isolate);
+
+ Local<Context> context = isolate->GetCurrentContext();
+ ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context));
+ Local<Value> first_arg_value = args[0];
+
+ ASSIGN(Function, compileStreaming,
+ Function::New(context, WebAssemblyCompileStreaming));
+ ASSIGN(Value, compile_retval,
+ compileStreaming->Call(context, args.Holder(), 1, &first_arg_value));
+ Local<Promise> module_promise = Local<Promise>::Cast(compile_retval);
+
+ DCHECK(!module_promise.IsEmpty());
+ Local<Value> data = args[1];
+ ASSIGN(Function, instantiate_impl,
+ Function::New(context, WebAssemblyInstantiateToPairCallback, data));
+ ASSIGN(Promise, result, module_promise->Then(context, instantiate_impl));
+ args.GetReturnValue().Set(result);
}
// WebAssembly.instantiate(module, imports) -> WebAssembly.Instance
@@ -281,6 +423,7 @@ void WebAssemblyInstance(const v8::FunctionCallbackInfo<v8::Value>& args) {
void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Isolate* isolate = args.GetIsolate();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ MicrotasksScope runs_microtasks(isolate, MicrotasksScope::kRunMicrotasks);
if (i_isolate->wasm_instantiate_callback()(args)) return;
ErrorThrower thrower(i_isolate, "WebAssembly.instantiate()");
@@ -290,10 +433,9 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
Local<Context> context = isolate->GetCurrentContext();
i::Handle<i::Context> i_context = Utils::OpenHandle(*context);
- v8::Local<v8::Promise::Resolver> resolver;
- if (!v8::Promise::Resolver::New(context).ToLocal(&resolver)) return;
- v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- return_value.Set(resolver->GetPromise());
+ ASSIGN(Promise::Resolver, resolver, Promise::Resolver::New(context));
+ Local<Promise> module_promise = resolver->GetPromise();
+ args.GetReturnValue().Set(module_promise);
if (args.Length() < 1) {
thrower.TypeError(
@@ -305,7 +447,8 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- i::Handle<i::Object> first_arg = Utils::OpenHandle(*args[0]);
+ Local<Value> first_arg_value = args[0];
+ i::Handle<i::Object> first_arg = Utils::OpenHandle(*first_arg_value);
if (!first_arg->IsJSObject()) {
thrower.TypeError(
"Argument 0 must be a buffer source or a WebAssembly.Module object");
@@ -315,31 +458,27 @@ void WebAssemblyInstantiate(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
- auto maybe_imports = GetSecondArgumentAsImports(args, &thrower);
- if (thrower.error()) {
- auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
- CHECK_IMPLIES(!maybe.FromMaybe(false),
- i_isolate->has_scheduled_exception());
- return;
- }
- i::Handle<i::JSPromise> promise = Utils::OpenHandle(*resolver->GetPromise());
+ FunctionCallback instantiator = nullptr;
if (HasBrand(first_arg, i::Handle<i::Symbol>(i_context->wasm_module_sym()))) {
- // WebAssembly.instantiate(module, imports) -> WebAssembly.Instance
- auto module_object = GetFirstArgumentAsModule(args, &thrower);
- i::wasm::AsyncInstantiate(i_isolate, promise,
- module_object.ToHandleChecked(), maybe_imports);
+ module_promise = resolver->GetPromise();
+ if (!resolver->Resolve(context, first_arg_value).IsJust()) return;
+ instantiator = WebAssemblyInstantiateImplCallback;
} else {
- // WebAssembly.instantiate(bytes, imports) -> {module, instance}
- auto bytes = GetFirstArgumentAsBytes(args, &thrower);
- if (thrower.error()) {
- auto maybe = resolver->Reject(context, Utils::ToLocal(thrower.Reify()));
- CHECK_IMPLIES(!maybe.FromMaybe(false),
- i_isolate->has_scheduled_exception());
- return;
- }
- i::wasm::AsyncCompileAndInstantiate(i_isolate, promise, bytes,
- maybe_imports);
+ ASSIGN(Function, async_compile, Function::New(context, WebAssemblyCompile));
+ ASSIGN(Value, async_compile_retval,
+ async_compile->Call(context, args.Holder(), 1, &first_arg_value));
+ module_promise = Local<Promise>::Cast(async_compile_retval);
+ instantiator = WebAssemblyInstantiateToPairCallback;
}
+ DCHECK(!module_promise.IsEmpty());
+ DCHECK_NOT_NULL(instantiator);
+ // If args.Length < 2, this will be undefined - see FunctionCallbackInfo.
+ // We'll check for that in WebAssemblyInstantiateImpl.
+ Local<Value> data = args[1];
+ ASSIGN(Function, instantiate_impl,
+ Function::New(context, instantiator, data));
+ ASSIGN(Promise, result, module_promise->Then(context, instantiate_impl));
+ args.GetReturnValue().Set(result);
}
bool GetIntegerProperty(v8::Isolate* isolate, ErrorThrower* thrower,
@@ -522,8 +661,7 @@ void WebAssemblyTableGrow(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
int new_size = static_cast<int>(new_size64);
- i::WasmTableObject::Grow(i_isolate, receiver,
- static_cast<uint32_t>(new_size - old_size));
+ receiver->grow(i_isolate, static_cast<uint32_t>(new_size - old_size));
if (new_size != old_size) {
i::Handle<i::FixedArray> new_array =
@@ -778,8 +916,7 @@ void WasmJs::Install(Isolate* isolate) {
// Setup WebAssembly
Handle<String> name = v8_str(isolate, "WebAssembly");
Handle<JSFunction> cons = factory->NewFunction(name);
- JSFunction::SetInstancePrototype(
- cons, Handle<Object>(context->initial_object_prototype(), isolate));
+ JSFunction::SetPrototype(cons, isolate->initial_object_prototype());
cons->shared()->set_instance_class_name(*name);
Handle<JSObject> webassembly = factory->NewJSObject(cons, TENURED);
PropertyAttributes attributes = static_cast<PropertyAttributes>(DONT_ENUM);
@@ -789,8 +926,12 @@ void WasmJs::Install(Isolate* isolate) {
JSObject::AddProperty(webassembly, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly"), ro_attributes);
InstallFunc(isolate, webassembly, "compile", WebAssemblyCompile, 1);
+ InstallFunc(isolate, webassembly, "compileStreaming",
+ WebAssemblyCompileStreaming, 1);
InstallFunc(isolate, webassembly, "validate", WebAssemblyValidate, 1);
InstallFunc(isolate, webassembly, "instantiate", WebAssemblyInstantiate, 1);
+ InstallFunc(isolate, webassembly, "instantiateStreaming",
+ WebAssemblyInstantiateStreaming, 1);
// Setup Module
Handle<JSFunction> module_constructor =
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index a3d52eba79..07a58b7ea8 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -12,26 +12,18 @@
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/module-decoder.h"
-#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/v8memory.h"
-#if DEBUG
-#define TRACE(...) \
- do { \
- if (FLAG_trace_wasm_encoder) PrintF(__VA_ARGS__); \
- } while (false)
-#else
-#define TRACE(...)
-#endif
-
namespace v8 {
namespace internal {
namespace wasm {
+namespace {
+
// Emit a section code and the size as a padded varint that can be patched
// later.
size_t EmitSection(SectionCode code, ZoneBuffer& buffer) {
@@ -48,14 +40,14 @@ void FixupSection(ZoneBuffer& buffer, size_t start) {
kPaddedVarInt32Size));
}
+} // namespace
+
WasmFunctionBuilder::WasmFunctionBuilder(WasmModuleBuilder* builder)
: builder_(builder),
locals_(builder->zone()),
signature_index_(0),
func_index_(static_cast<uint32_t>(builder->functions_.size())),
- body_(builder->zone()),
- name_(builder->zone()),
- exported_names_(builder->zone()),
+ body_(builder->zone(), 256),
i32_temps_(builder->zone()),
i64_temps_(builder->zone()),
f32_temps_(builder->zone()),
@@ -63,21 +55,9 @@ WasmFunctionBuilder::WasmFunctionBuilder(WasmModuleBuilder* builder)
direct_calls_(builder->zone()),
asm_offsets_(builder->zone(), 8) {}
-void WasmFunctionBuilder::EmitVarInt(int32_t val) {
- byte buffer[5];
- byte* ptr = buffer;
- LEBHelper::write_i32v(&ptr, val);
- DCHECK_GE(5, ptr - buffer);
- body_.insert(body_.end(), buffer, ptr);
-}
+void WasmFunctionBuilder::EmitI32V(int32_t val) { body_.write_i32v(val); }
-void WasmFunctionBuilder::EmitVarUint(uint32_t val) {
- byte buffer[5];
- byte* ptr = buffer;
- LEBHelper::write_u32v(&ptr, val);
- DCHECK_GE(5, ptr - buffer);
- body_.insert(body_.end(), buffer, ptr);
-}
+void WasmFunctionBuilder::EmitU32V(uint32_t val) { body_.write_u32v(val); }
void WasmFunctionBuilder::SetSignature(FunctionSig* sig) {
DCHECK(!locals_.has_sig());
@@ -91,52 +71,62 @@ uint32_t WasmFunctionBuilder::AddLocal(ValueType type) {
}
void WasmFunctionBuilder::EmitGetLocal(uint32_t local_index) {
- EmitWithVarUint(kExprGetLocal, local_index);
+ EmitWithU32V(kExprGetLocal, local_index);
}
void WasmFunctionBuilder::EmitSetLocal(uint32_t local_index) {
- EmitWithVarUint(kExprSetLocal, local_index);
+ EmitWithU32V(kExprSetLocal, local_index);
}
void WasmFunctionBuilder::EmitTeeLocal(uint32_t local_index) {
- EmitWithVarUint(kExprTeeLocal, local_index);
+ EmitWithU32V(kExprTeeLocal, local_index);
}
void WasmFunctionBuilder::EmitCode(const byte* code, uint32_t code_size) {
- for (size_t i = 0; i < code_size; ++i) {
- body_.push_back(code[i]);
- }
+ body_.write(code, code_size);
}
-void WasmFunctionBuilder::Emit(WasmOpcode opcode) {
- body_.push_back(static_cast<byte>(opcode));
-}
+void WasmFunctionBuilder::Emit(WasmOpcode opcode) { body_.write_u8(opcode); }
void WasmFunctionBuilder::EmitWithU8(WasmOpcode opcode, const byte immediate) {
- body_.push_back(static_cast<byte>(opcode));
- body_.push_back(immediate);
+ body_.write_u8(opcode);
+ body_.write_u8(immediate);
}
void WasmFunctionBuilder::EmitWithU8U8(WasmOpcode opcode, const byte imm1,
const byte imm2) {
- body_.push_back(static_cast<byte>(opcode));
- body_.push_back(imm1);
- body_.push_back(imm2);
+ body_.write_u8(opcode);
+ body_.write_u8(imm1);
+ body_.write_u8(imm2);
}
-void WasmFunctionBuilder::EmitWithVarInt(WasmOpcode opcode, int32_t immediate) {
- body_.push_back(static_cast<byte>(opcode));
- EmitVarInt(immediate);
+void WasmFunctionBuilder::EmitWithI32V(WasmOpcode opcode, int32_t immediate) {
+ body_.write_u8(opcode);
+ body_.write_i32v(immediate);
}
-void WasmFunctionBuilder::EmitWithVarUint(WasmOpcode opcode,
- uint32_t immediate) {
- body_.push_back(static_cast<byte>(opcode));
- EmitVarUint(immediate);
+void WasmFunctionBuilder::EmitWithU32V(WasmOpcode opcode, uint32_t immediate) {
+ body_.write_u8(opcode);
+ body_.write_u32v(immediate);
}
void WasmFunctionBuilder::EmitI32Const(int32_t value) {
- EmitWithVarInt(kExprI32Const, value);
+ EmitWithI32V(kExprI32Const, value);
+}
+
+void WasmFunctionBuilder::EmitI64Const(int64_t value) {
+ body_.write_u8(kExprI64Const);
+ body_.write_i64v(value);
+}
+
+void WasmFunctionBuilder::EmitF32Const(float value) {
+ body_.write_u8(kExprF32Const);
+ body_.write_f32(value);
+}
+
+void WasmFunctionBuilder::EmitF64Const(double value) {
+ body_.write_u8(kExprF64Const);
+ body_.write_f64(value);
}
void WasmFunctionBuilder::EmitDirectCallIndex(uint32_t index) {
@@ -144,19 +134,11 @@ void WasmFunctionBuilder::EmitDirectCallIndex(uint32_t index) {
call.offset = body_.size();
call.direct_index = index;
direct_calls_.push_back(call);
- byte code[] = {U32V_5(0)};
- EmitCode(code, sizeof(code));
-}
-
-void WasmFunctionBuilder::ExportAs(Vector<const char> name) {
- exported_names_.push_back(ZoneVector<char>(
- name.start(), name.start() + name.length(), builder_->zone()));
+ byte placeholder_bytes[kMaxVarInt32Size] = {0};
+ EmitCode(placeholder_bytes, arraysize(placeholder_bytes));
}
-void WasmFunctionBuilder::SetName(Vector<const char> name) {
- name_.resize(name.length());
- memcpy(name_.data(), name.start(), name.length());
-}
+void WasmFunctionBuilder::SetName(Vector<const char> name) { name_ = name; }
void WasmFunctionBuilder::AddAsmWasmOffset(int call_position,
int to_number_position) {
@@ -185,31 +167,15 @@ void WasmFunctionBuilder::SetAsmFunctionStartPosition(int position) {
last_asm_source_position_ = position;
}
-void WasmFunctionBuilder::StashCode(std::vector<byte>* dst, size_t position) {
- if (dst == nullptr) {
- body_.resize(position);
- return;
- }
+void WasmFunctionBuilder::DeleteCodeAfter(size_t position) {
DCHECK_LE(position, body_.size());
- size_t len = body_.size() - position;
- dst->resize(len);
- memcpy(dst->data(), body_.data() + position, len);
- body_.resize(position);
+ body_.Truncate(position);
}
void WasmFunctionBuilder::WriteSignature(ZoneBuffer& buffer) const {
buffer.write_u32v(signature_index_);
}
-void WasmFunctionBuilder::WriteExports(ZoneBuffer& buffer) const {
- for (auto name : exported_names_) {
- buffer.write_size(name.size());
- buffer.write(reinterpret_cast<const byte*>(name.data()), name.size());
- buffer.write_u8(kExternalFunction);
- buffer.write_size(func_index_ + builder_->function_imports_.size());
- }
-}
-
void WasmFunctionBuilder::WriteBody(ZoneBuffer& buffer) const {
size_t locals_size = locals_.Size();
buffer.write_size(locals_size + body_.size());
@@ -219,7 +185,7 @@ void WasmFunctionBuilder::WriteBody(ZoneBuffer& buffer) const {
(*ptr) += locals_size; // UGLY: manual bump of position pointer
if (body_.size() > 0) {
size_t base = buffer.offset();
- buffer.write(&body_[0], body_.size());
+ buffer.write(body_.begin(), body_.size());
for (DirectCallIndex call : direct_calls_) {
buffer.patch_u32v(
base + call.offset,
@@ -250,6 +216,7 @@ WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
: zone_(zone),
signatures_(zone),
function_imports_(zone),
+ function_exports_(zone),
global_imports_(zone),
functions_(zone),
data_segments_(zone),
@@ -304,9 +271,13 @@ uint32_t WasmModuleBuilder::AddSignature(FunctionSig* sig) {
}
uint32_t WasmModuleBuilder::AllocateIndirectFunctions(uint32_t count) {
- uint32_t ret = static_cast<uint32_t>(indirect_functions_.size());
+ uint32_t index = static_cast<uint32_t>(indirect_functions_.size());
+ DCHECK_GE(FLAG_wasm_max_table_size, index);
+ if (count > FLAG_wasm_max_table_size - index) {
+ return std::numeric_limits<uint32_t>::max();
+ }
indirect_functions_.resize(indirect_functions_.size() + count);
- return ret;
+ return index;
}
void WasmModuleBuilder::SetIndirectFunction(uint32_t indirect,
@@ -314,16 +285,15 @@ void WasmModuleBuilder::SetIndirectFunction(uint32_t indirect,
indirect_functions_[indirect] = direct;
}
-uint32_t WasmModuleBuilder::AddImport(const char* name, int name_length,
+uint32_t WasmModuleBuilder::AddImport(Vector<const char> name,
FunctionSig* sig) {
- function_imports_.push_back({AddSignature(sig), name, name_length});
+ function_imports_.push_back({name, AddSignature(sig)});
return static_cast<uint32_t>(function_imports_.size() - 1);
}
-uint32_t WasmModuleBuilder::AddGlobalImport(const char* name, int name_length,
+uint32_t WasmModuleBuilder::AddGlobalImport(Vector<const char> name,
ValueType type) {
- global_imports_.push_back(
- {WasmOpcodes::ValueTypeCodeFor(type), name, name_length});
+ global_imports_.push_back({name, WasmOpcodes::ValueTypeCodeFor(type)});
return static_cast<uint32_t>(global_imports_.size() - 1);
}
@@ -331,6 +301,11 @@ void WasmModuleBuilder::MarkStartFunction(WasmFunctionBuilder* function) {
start_function_index_ = function->func_index();
}
+void WasmModuleBuilder::AddExport(Vector<const char> name,
+ WasmFunctionBuilder* function) {
+ function_exports_.push_back({name, function->func_index()});
+}
+
uint32_t WasmModuleBuilder::AddGlobal(ValueType type, bool exported,
bool mutability,
const WasmInitExpr& init) {
@@ -339,10 +314,7 @@ uint32_t WasmModuleBuilder::AddGlobal(ValueType type, bool exported,
}
void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
- uint32_t exports = 0;
-
// == Emit magic =============================================================
- TRACE("emit magic\n");
buffer.write_u32(kWasmMagic);
buffer.write_u32(kWasmVersion);
@@ -370,19 +342,15 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
size_t start = EmitSection(kImportSectionCode, buffer);
buffer.write_size(global_imports_.size() + function_imports_.size());
for (auto import : global_imports_) {
- buffer.write_u32v(0); // module name length
- buffer.write_u32v(import.name_length); // field name length
- buffer.write(reinterpret_cast<const byte*>(import.name), // field name
- import.name_length);
+ buffer.write_u32v(0); // module name (length)
+ buffer.write_string(import.name); // field name
buffer.write_u8(kExternalGlobal);
buffer.write_u8(import.type_code);
buffer.write_u8(0); // immutable
}
for (auto import : function_imports_) {
- buffer.write_u32v(0); // module name length
- buffer.write_u32v(import.name_length); // field name length
- buffer.write(reinterpret_cast<const byte*>(import.name), // field name
- import.name_length);
+ buffer.write_u32v(0); // module name (length)
+ buffer.write_string(import.name); // field name
buffer.write_u8(kExternalFunction);
buffer.write_u32v(import.sig_index);
}
@@ -396,8 +364,7 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
buffer.write_size(functions_.size());
for (auto function : functions_) {
function->WriteSignature(buffer);
- exports += static_cast<uint32_t>(function->exported_names_.size());
- if (!function->name_.empty()) ++num_function_names;
+ if (!function->name_.is_empty()) ++num_function_names;
}
FixupSection(buffer, start);
}
@@ -432,59 +399,51 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
buffer.write_u8(WasmOpcodes::ValueTypeCodeFor(global.type));
buffer.write_u8(global.mutability ? 1 : 0);
switch (global.init.kind) {
- case WasmInitExpr::kI32Const: {
+ case WasmInitExpr::kI32Const:
DCHECK_EQ(kWasmI32, global.type);
- const byte code[] = {WASM_I32V_5(global.init.val.i32_const)};
- buffer.write(code, sizeof(code));
+ buffer.write_u8(kExprI32Const);
+ buffer.write_i32v(global.init.val.i32_const);
break;
- }
- case WasmInitExpr::kI64Const: {
+ case WasmInitExpr::kI64Const:
DCHECK_EQ(kWasmI64, global.type);
- const byte code[] = {WASM_I64V_10(global.init.val.i64_const)};
- buffer.write(code, sizeof(code));
+ buffer.write_u8(kExprI64Const);
+ buffer.write_i64v(global.init.val.i64_const);
break;
- }
- case WasmInitExpr::kF32Const: {
+ case WasmInitExpr::kF32Const:
DCHECK_EQ(kWasmF32, global.type);
- const byte code[] = {WASM_F32(global.init.val.f32_const)};
- buffer.write(code, sizeof(code));
+ buffer.write_u8(kExprF32Const);
+ buffer.write_f32(global.init.val.f32_const);
break;
- }
- case WasmInitExpr::kF64Const: {
+ case WasmInitExpr::kF64Const:
DCHECK_EQ(kWasmF64, global.type);
- const byte code[] = {WASM_F64(global.init.val.f64_const)};
- buffer.write(code, sizeof(code));
+ buffer.write_u8(kExprF64Const);
+ buffer.write_f64(global.init.val.f64_const);
break;
- }
- case WasmInitExpr::kGlobalIndex: {
- const byte code[] = {kExprGetGlobal,
- U32V_5(global.init.val.global_index)};
- buffer.write(code, sizeof(code));
+ case WasmInitExpr::kGlobalIndex:
+ buffer.write_u8(kExprGetGlobal);
+ buffer.write_u32v(global.init.val.global_index);
break;
- }
default: {
// No initializer, emit a default value.
switch (global.type) {
- case kWasmI32: {
- const byte code[] = {WASM_I32V_1(0)};
- buffer.write(code, sizeof(code));
+ case kWasmI32:
+ buffer.write_u8(kExprI32Const);
+ // LEB encoding of 0.
+ buffer.write_u8(0);
break;
- }
- case kWasmI64: {
- const byte code[] = {WASM_I64V_1(0)};
- buffer.write(code, sizeof(code));
+ case kWasmI64:
+ buffer.write_u8(kExprI64Const);
+ // LEB encoding of 0.
+ buffer.write_u8(0);
break;
- }
- case kWasmF32: {
- const byte code[] = {WASM_F32(0.0)};
- buffer.write(code, sizeof(code));
+ case kWasmF32:
+ buffer.write_u8(kExprF32Const);
+ buffer.write_f32(0.f);
break;
- }
- case kWasmF64: {
- const byte code[] = {WASM_F64(0.0)};
- buffer.write(code, sizeof(code));
+ case kWasmF64:
+ buffer.write_u8(kExprF64Const);
+ buffer.write_f64(0.);
break;
- }
default:
UNREACHABLE();
}
@@ -496,10 +455,15 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
}
// == emit exports ===========================================================
- if (exports > 0) {
+ if (!function_exports_.empty()) {
size_t start = EmitSection(kExportSectionCode, buffer);
- buffer.write_u32v(exports);
- for (auto function : functions_) function->WriteExports(buffer);
+ buffer.write_size(function_exports_.size());
+ for (auto function_export : function_exports_) {
+ buffer.write_string(function_export.name);
+ buffer.write_u8(kExternalFunction);
+ buffer.write_size(function_export.function_index +
+ function_imports_.size());
+ }
FixupSection(buffer, start);
}
@@ -573,21 +537,17 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer& buffer) const {
uint32_t function_index = 0;
for (; function_index < num_imports; ++function_index) {
const WasmFunctionImport* import = &function_imports_[function_index];
- DCHECK_NOT_NULL(import->name);
+ DCHECK(!import->name.is_empty());
buffer.write_u32v(function_index);
- uint32_t name_len = static_cast<uint32_t>(import->name_length);
- buffer.write_u32v(name_len);
- buffer.write(reinterpret_cast<const byte*>(import->name), name_len);
+ buffer.write_string(import->name);
}
if (num_function_names > 0) {
for (auto function : functions_) {
DCHECK_EQ(function_index,
function->func_index() + function_imports_.size());
- if (!function->name_.empty()) {
+ if (!function->name_.is_empty()) {
buffer.write_u32v(function_index);
- buffer.write_size(function->name_.size());
- buffer.write(reinterpret_cast<const byte*>(function->name_.data()),
- function->name_.size());
+ buffer.write_string(function->name_);
}
++function_index;
}
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index 61dd269020..a75df4313d 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -9,7 +9,7 @@
#include "src/zone/zone-containers.h"
#include "src/wasm/leb-helper.h"
-#include "src/wasm/wasm-macro-gen.h"
+#include "src/wasm/local-decl-encoder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"
@@ -20,7 +20,7 @@ namespace wasm {
class ZoneBuffer : public ZoneObject {
public:
- static const uint32_t kInitialSize = 4096;
+ static constexpr size_t kInitialSize = 1024;
explicit ZoneBuffer(Zone* zone, size_t initial = kInitialSize)
: zone_(zone), buffer_(reinterpret_cast<byte*>(zone->New(initial))) {
pos_ = buffer_;
@@ -44,6 +44,12 @@ class ZoneBuffer : public ZoneObject {
pos_ += 4;
}
+ void write_u64(uint64_t x) {
+ EnsureSpace(8);
+ WriteLittleEndianValue<uint64_t>(pos_, x);
+ pos_ += 8;
+ }
+
void write_u32v(uint32_t val) {
EnsureSpace(kMaxVarInt32Size);
LEBHelper::write_u32v(&pos_, val);
@@ -54,18 +60,37 @@ class ZoneBuffer : public ZoneObject {
LEBHelper::write_i32v(&pos_, val);
}
+ void write_u64v(uint64_t val) {
+ EnsureSpace(kMaxVarInt64Size);
+ LEBHelper::write_u64v(&pos_, val);
+ }
+
+ void write_i64v(int64_t val) {
+ EnsureSpace(kMaxVarInt64Size);
+ LEBHelper::write_i64v(&pos_, val);
+ }
+
void write_size(size_t val) {
EnsureSpace(kMaxVarInt32Size);
DCHECK_EQ(val, static_cast<uint32_t>(val));
LEBHelper::write_u32v(&pos_, static_cast<uint32_t>(val));
}
+ void write_f32(float val) { write_u32(bit_cast<uint32_t>(val)); }
+
+ void write_f64(double val) { write_u64(bit_cast<uint64_t>(val)); }
+
void write(const byte* data, size_t size) {
EnsureSpace(size);
memcpy(pos_, data, size);
pos_ += size;
}
+ void write_string(Vector<const char> name) {
+ write_size(name.length());
+ write(reinterpret_cast<const byte*>(name.start()), name.length());
+ }
+
size_t reserve_u32v() {
size_t off = offset();
EnsureSpace(kMaxVarInt32Size);
@@ -88,6 +113,11 @@ class ZoneBuffer : public ZoneObject {
}
}
+ void patch_u8(size_t offset, byte val) {
+ DCHECK_GE(size(), offset);
+ buffer_[offset] = val;
+ }
+
size_t offset() const { return static_cast<size_t>(pos_ - buffer_); }
size_t size() const { return static_cast<size_t>(pos_ - buffer_); }
const byte* begin() const { return buffer_; }
@@ -95,7 +125,7 @@ class ZoneBuffer : public ZoneObject {
void EnsureSpace(size_t size) {
if ((pos_ + size) > end_) {
- size_t new_size = 4096 + size + (end_ - buffer_) * 3;
+ size_t new_size = size + (end_ - buffer_) * 2;
byte* new_buffer = reinterpret_cast<byte*>(zone_->New(new_size));
memcpy(new_buffer, buffer_, (pos_ - buffer_));
pos_ = new_buffer + (pos_ - buffer_);
@@ -105,6 +135,11 @@ class ZoneBuffer : public ZoneObject {
DCHECK(pos_ + size <= end_);
}
+ void Truncate(size_t size) {
+ DCHECK_GE(offset(), size);
+ pos_ = buffer_ + size;
+ }
+
byte** pos_ptr() { return &pos_; }
private:
@@ -121,40 +156,43 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
// Building methods.
void SetSignature(FunctionSig* sig);
uint32_t AddLocal(ValueType type);
- void EmitVarInt(int32_t val);
- void EmitVarUint(uint32_t val);
+ void EmitI32V(int32_t val);
+ void EmitU32V(uint32_t val);
void EmitCode(const byte* code, uint32_t code_size);
void Emit(WasmOpcode opcode);
void EmitGetLocal(uint32_t index);
void EmitSetLocal(uint32_t index);
void EmitTeeLocal(uint32_t index);
void EmitI32Const(int32_t val);
+ void EmitI64Const(int64_t val);
+ void EmitF32Const(float val);
+ void EmitF64Const(double val);
void EmitWithU8(WasmOpcode opcode, const byte immediate);
void EmitWithU8U8(WasmOpcode opcode, const byte imm1, const byte imm2);
- void EmitWithVarInt(WasmOpcode opcode, int32_t immediate);
- void EmitWithVarUint(WasmOpcode opcode, uint32_t immediate);
+ void EmitWithI32V(WasmOpcode opcode, int32_t immediate);
+ void EmitWithU32V(WasmOpcode opcode, uint32_t immediate);
void EmitDirectCallIndex(uint32_t index);
- void ExportAs(Vector<const char> name);
void SetName(Vector<const char> name);
void AddAsmWasmOffset(int call_position, int to_number_position);
void SetAsmFunctionStartPosition(int position);
size_t GetPosition() const { return body_.size(); }
- void FixupByte(size_t position, byte value) { body_[position] = value; }
- void StashCode(std::vector<byte>* dst, size_t position);
+ void FixupByte(size_t position, byte value) {
+ body_.patch_u8(position, value);
+ }
+ void DeleteCodeAfter(size_t position);
void WriteSignature(ZoneBuffer& buffer) const;
- void WriteExports(ZoneBuffer& buffer) const;
void WriteBody(ZoneBuffer& buffer) const;
void WriteAsmWasmOffsetTable(ZoneBuffer& buffer) const;
+ WasmModuleBuilder* builder() const { return builder_; }
uint32_t func_index() { return func_index_; }
FunctionSig* signature();
private:
explicit WasmFunctionBuilder(WasmModuleBuilder* builder);
friend class WasmModuleBuilder;
- friend class WasmTemporary;
struct DirectCallIndex {
size_t offset;
@@ -165,9 +203,8 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
LocalDeclEncoder locals_;
uint32_t signature_index_;
uint32_t func_index_;
- ZoneVector<uint8_t> body_;
- ZoneVector<char> name_;
- ZoneVector<ZoneVector<char>> exported_names_;
+ ZoneBuffer body_;
+ Vector<const char> name_;
ZoneVector<uint32_t> i32_temps_;
ZoneVector<uint32_t> i64_temps_;
ZoneVector<uint32_t> f32_temps_;
@@ -181,64 +218,22 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
uint32_t asm_func_start_source_position_ = 0;
};
-class WasmTemporary {
- public:
- WasmTemporary(WasmFunctionBuilder* builder, ValueType type) {
- switch (type) {
- case kWasmI32:
- temporary_ = &builder->i32_temps_;
- break;
- case kWasmI64:
- temporary_ = &builder->i64_temps_;
- break;
- case kWasmF32:
- temporary_ = &builder->f32_temps_;
- break;
- case kWasmF64:
- temporary_ = &builder->f64_temps_;
- break;
- default:
- UNREACHABLE();
- temporary_ = nullptr;
- }
- if (temporary_->size() == 0) {
- // Allocate a new temporary.
- index_ = builder->AddLocal(type);
- } else {
- // Reuse a previous temporary.
- index_ = temporary_->back();
- temporary_->pop_back();
- }
- }
- ~WasmTemporary() {
- temporary_->push_back(index_); // return the temporary to the list.
- }
- uint32_t index() { return index_; }
-
- private:
- ZoneVector<uint32_t>* temporary_;
- uint32_t index_;
-};
-
class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
public:
explicit WasmModuleBuilder(Zone* zone);
// Building methods.
- uint32_t AddImport(const char* name, int name_length, FunctionSig* sig);
- void SetImportName(uint32_t index, const char* name, int name_length) {
- function_imports_[index].name = name;
- function_imports_[index].name_length = name_length;
- }
+ uint32_t AddImport(Vector<const char> name, FunctionSig* sig);
WasmFunctionBuilder* AddFunction(FunctionSig* sig = nullptr);
uint32_t AddGlobal(ValueType type, bool exported, bool mutability = true,
const WasmInitExpr& init = WasmInitExpr());
- uint32_t AddGlobalImport(const char* name, int name_length, ValueType type);
+ uint32_t AddGlobalImport(Vector<const char> name, ValueType type);
void AddDataSegment(const byte* data, uint32_t size, uint32_t dest);
uint32_t AddSignature(FunctionSig* sig);
uint32_t AllocateIndirectFunctions(uint32_t count);
void SetIndirectFunction(uint32_t indirect, uint32_t direct);
void MarkStartFunction(WasmFunctionBuilder* builder);
+ void AddExport(Vector<const char> name, WasmFunctionBuilder* builder);
// Writing methods.
void WriteTo(ZoneBuffer& buffer) const;
@@ -257,15 +252,18 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
private:
struct WasmFunctionImport {
+ Vector<const char> name;
uint32_t sig_index;
- const char* name;
- int name_length;
+ };
+
+ struct WasmFunctionExport {
+ Vector<const char> name;
+ uint32_t function_index;
};
struct WasmGlobalImport {
+ Vector<const char> name;
ValueTypeCode type_code;
- const char* name;
- int name_length;
};
struct WasmGlobal {
@@ -284,6 +282,7 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
Zone* zone_;
ZoneVector<FunctionSig*> signatures_;
ZoneVector<WasmFunctionImport> function_imports_;
+ ZoneVector<WasmFunctionExport> function_exports_;
ZoneVector<WasmGlobalImport> global_imports_;
ZoneVector<WasmFunctionBuilder*> functions_;
ZoneVector<WasmDataSegment> data_segments_;
diff --git a/deps/v8/src/wasm/wasm-module.cc b/deps/v8/src/wasm/wasm-module.cc
index 65cd79f9ee..bd44955735 100644
--- a/deps/v8/src/wasm/wasm-module.cc
+++ b/deps/v8/src/wasm/wasm-module.cc
@@ -4,9 +4,10 @@
#include <memory>
+#include "src/asmjs/asm-js.h"
#include "src/assembler-inl.h"
-#include "src/base/adapters.h"
#include "src/base/atomic-utils.h"
+#include "src/base/utils/random-number-generator.h"
#include "src/code-stubs.h"
#include "src/compiler/wasm-compiler.h"
#include "src/debug/interface-types.h"
@@ -18,7 +19,6 @@
#include "src/trap-handler/trap-handler.h"
#include "src/v8.h"
-#include "src/asmjs/asm-wasm-builder.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-code-specialization.h"
@@ -55,40 +55,28 @@ byte* raw_buffer_ptr(MaybeHandle<JSArrayBuffer> buffer, int offset) {
return static_cast<byte*>(buffer.ToHandleChecked()->backing_store()) + offset;
}
-static void MemoryFinalizer(const v8::WeakCallbackInfo<void>& data) {
- DisallowHeapAllocation no_gc;
- JSArrayBuffer** p = reinterpret_cast<JSArrayBuffer**>(data.GetParameter());
- JSArrayBuffer* buffer = *p;
-
- if (!buffer->was_neutered()) {
- void* memory = buffer->backing_store();
- DCHECK(memory != nullptr);
- base::OS::Free(memory,
- RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize()));
-
- data.GetIsolate()->AdjustAmountOfExternalAllocatedMemory(
- -buffer->byte_length()->Number());
+static void RecordStats(Isolate* isolate, Code* code, bool is_sync) {
+ if (is_sync) {
+ // TODO(karlschimpf): Make this work when asynchronous.
+ // https://bugs.chromium.org/p/v8/issues/detail?id=6361
+ isolate->counters()->wasm_generated_code_size()->Increment(
+ code->body_size());
+ isolate->counters()->wasm_reloc_size()->Increment(
+ code->relocation_info()->length());
}
-
- GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
}
-static void RecordStats(Isolate* isolate, Code* code) {
- isolate->counters()->wasm_generated_code_size()->Increment(code->body_size());
- isolate->counters()->wasm_reloc_size()->Increment(
- code->relocation_info()->length());
-}
-
-static void RecordStats(Isolate* isolate, Handle<FixedArray> functions) {
+static void RecordStats(Isolate* isolate, Handle<FixedArray> functions,
+ bool is_sync) {
DisallowHeapAllocation no_gc;
for (int i = 0; i < functions->length(); ++i) {
- RecordStats(isolate, Code::cast(functions->get(i)));
+ RecordStats(isolate, Code::cast(functions->get(i)), is_sync);
}
}
void* TryAllocateBackingStore(Isolate* isolate, size_t size,
- bool enable_guard_regions, bool& is_external) {
- is_external = false;
+ bool enable_guard_regions, void*& allocation_base,
+ size_t& allocation_length) {
// TODO(eholk): Right now enable_guard_regions has no effect on 32-bit
// systems. It may be safer to fail instead, given that other code might do
// things that would be unsafe if they expected guard pages where there
@@ -99,26 +87,30 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
// We always allocate the largest possible offset into the heap, so the
// addressable memory after the guard page can be made inaccessible.
- const size_t alloc_size =
- RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize());
+ allocation_length = RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize());
DCHECK_EQ(0, size % base::OS::CommitPageSize());
// AllocateGuarded makes the whole region inaccessible by default.
- void* memory = base::OS::AllocateGuarded(alloc_size);
- if (memory == nullptr) {
+ allocation_base =
+ isolate->array_buffer_allocator()->Reserve(allocation_length);
+ if (allocation_base == nullptr) {
return nullptr;
}
+ void* memory = allocation_base;
+
// Make the part we care about accessible.
- base::OS::Unprotect(memory, size);
+ isolate->array_buffer_allocator()->SetProtection(
+ memory, size, v8::ArrayBuffer::Allocator::Protection::kReadWrite);
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(size);
- is_external = true;
return memory;
} else {
void* memory = isolate->array_buffer_allocator()->Allocate(size);
+ allocation_base = memory;
+ allocation_length = size;
return memory;
}
}
@@ -135,13 +127,12 @@ Handle<Script> CreateWasmScript(Isolate* isolate,
const ModuleWireBytes& wire_bytes) {
Handle<Script> script =
isolate->factory()->NewScript(isolate->factory()->empty_string());
- FixedArray* array = isolate->native_context()->embedder_data();
- script->set_context_data(array->get(v8::Context::kDebugIdIndex));
+ script->set_context_data(isolate->native_context()->debug_context_id());
script->set_type(Script::TYPE_WASM);
int hash = StringHasher::HashSequentialString(
- reinterpret_cast<const char*>(wire_bytes.start()), wire_bytes.length(),
- kZeroHashSeed);
+ reinterpret_cast<const char*>(wire_bytes.start()),
+ static_cast<int>(wire_bytes.length()), kZeroHashSeed);
const int kBufferSize = 32;
char buffer[kBufferSize];
@@ -302,30 +293,109 @@ bool compile_lazy(const WasmModule* module) {
// A helper for compiling an entire module.
class CompilationHelper {
public:
- CompilationHelper(Isolate* isolate, WasmModule* module)
- : isolate_(isolate), module_(module) {}
+ // The compilation helper takes ownership of the {WasmModule}.
+ // In {CompileToModuleObject}, it will transfer ownership to the generated
+ // {WasmModuleWrapper}. If this method is not called, ownership may be
+ // reclaimed by explicitely releasing the {module_} field.
+ CompilationHelper(Isolate* isolate, std::unique_ptr<WasmModule> module,
+ bool is_sync)
+ : isolate_(isolate),
+ module_(std::move(module)),
+ is_sync_(is_sync),
+ executed_units_(
+ isolate->random_number_generator(),
+ (isolate->heap()->memory_allocator()->code_range()->valid()
+ ? isolate->heap()->memory_allocator()->code_range()->size()
+ : isolate->heap()->code_space()->Capacity()) /
+ 2),
+ num_background_tasks_(Min(
+ static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
+ V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())),
+ stopped_compilation_tasks_(num_background_tasks_) {}
+
+ bool GetNextUncompiledFunctionId(size_t* index) {
+ DCHECK_NOT_NULL(index);
+ // - 1 because AtomicIncrement returns the value after the atomic increment.
+ *index = next_unit_.Increment(1) - 1;
+ return *index < compilation_units_.size();
+ }
// The actual runnable task that performs compilations in the background.
class CompilationTask : public CancelableTask {
public:
CompilationHelper* helper_;
explicit CompilationTask(CompilationHelper* helper)
- : CancelableTask(helper->isolate_), helper_(helper) {}
+ : CancelableTask(helper->isolate_, &helper->background_task_manager_),
+ helper_(helper) {}
void RunInternal() override {
- while (helper_->FetchAndExecuteCompilationUnit()) {
+ size_t index = 0;
+ while (helper_->executed_units_.CanAcceptWork() &&
+ helper_->GetNextUncompiledFunctionId(&index)) {
+ helper_->CompileAndSchedule(index);
}
- helper_->module_->pending_tasks.get()->Signal();
+ helper_->OnBackgroundTaskStopped();
}
};
+ void OnBackgroundTaskStopped() {
+ base::LockGuard<base::Mutex> guard(&tasks_mutex_);
+ ++stopped_compilation_tasks_;
+ DCHECK_LE(stopped_compilation_tasks_, num_background_tasks_);
+ }
+
+ void CompileAndSchedule(size_t index) {
+ DisallowHeapAllocation no_allocation;
+ DisallowHandleAllocation no_handles;
+ DisallowHandleDereference no_deref;
+ DisallowCodeDependencyChange no_dependency_change;
+ DCHECK_LT(index, compilation_units_.size());
+
+ std::unique_ptr<compiler::WasmCompilationUnit> unit =
+ std::move(compilation_units_.at(index));
+ unit->ExecuteCompilation();
+ {
+ base::LockGuard<base::Mutex> guard(&result_mutex_);
+ executed_units_.Schedule(std::move(unit));
+ }
+ }
+
+ class CodeGenerationSchedule {
+ public:
+ explicit CodeGenerationSchedule(
+ base::RandomNumberGenerator* random_number_generator,
+ size_t max_memory = 0);
+
+ void Schedule(std::unique_ptr<compiler::WasmCompilationUnit>&& item);
+
+ bool IsEmpty() const { return schedule_.empty(); }
+
+ std::unique_ptr<compiler::WasmCompilationUnit> GetNext();
+
+ bool CanAcceptWork() const;
+
+ void EnableThrottling() { throttle_ = true; }
+
+ private:
+ size_t GetRandomIndexInSchedule();
+
+ base::RandomNumberGenerator* random_number_generator_ = nullptr;
+ std::vector<std::unique_ptr<compiler::WasmCompilationUnit>> schedule_;
+ const size_t max_memory_;
+ bool throttle_ = false;
+ base::AtomicNumber<size_t> allocated_memory_{0};
+ };
+
Isolate* isolate_;
- WasmModule* module_;
- std::vector<compiler::WasmCompilationUnit*> compilation_units_;
- std::queue<compiler::WasmCompilationUnit*> executed_units_;
+ std::unique_ptr<WasmModule> module_;
+ bool is_sync_;
+ std::vector<std::unique_ptr<compiler::WasmCompilationUnit>>
+ compilation_units_;
+ CodeGenerationSchedule executed_units_;
base::Mutex result_mutex_;
base::AtomicNumber<size_t> next_unit_;
- size_t num_background_tasks_ = 0;
+ const size_t num_background_tasks_ = 0;
+ CancelableTaskManager background_task_manager_;
// Run by each compilation task and by the main thread.
bool FetchAndExecuteCompilationUnit() {
@@ -340,10 +410,11 @@ class CompilationHelper {
return false;
}
- compiler::WasmCompilationUnit* unit = compilation_units_.at(index);
+ std::unique_ptr<compiler::WasmCompilationUnit> unit =
+ std::move(compilation_units_.at(index));
unit->ExecuteCompilation();
base::LockGuard<base::Mutex> guard(&result_mutex_);
- executed_units_.push(unit);
+ executed_units_.Schedule(std::move(unit));
return true;
}
@@ -356,30 +427,21 @@ class CompilationHelper {
compilation_units_.reserve(funcs_to_compile);
for (uint32_t i = start; i < num_funcs; ++i) {
const WasmFunction* func = &functions[i];
+ constexpr bool is_sync = true;
compilation_units_.push_back(
- new compiler::WasmCompilationUnit(isolate_, &module_env, func));
+ std::unique_ptr<compiler::WasmCompilationUnit>(
+ new compiler::WasmCompilationUnit(isolate_, &module_env, func,
+ !is_sync)));
}
return funcs_to_compile;
}
- void InitializeHandles() {
- for (auto unit : compilation_units_) {
- unit->InitializeHandles();
- }
- }
-
- uint32_t* StartCompilationTasks() {
- num_background_tasks_ =
- Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
- V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
- uint32_t* task_ids = new uint32_t[num_background_tasks_];
- for (size_t i = 0; i < num_background_tasks_; ++i) {
- CompilationTask* task = new CompilationTask(this);
- task_ids[i] = task->id();
+ void RestartCompilationTasks() {
+ base::LockGuard<base::Mutex> guard(&tasks_mutex_);
+ for (; stopped_compilation_tasks_ > 0; --stopped_compilation_tasks_) {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
+ new CompilationTask(this), v8::Platform::kShortRunningTask);
}
- return task_ids;
}
void WaitForCompilationTasks(uint32_t* task_ids) {
@@ -393,27 +455,29 @@ class CompilationHelper {
}
}
- void FinishCompilationUnits(std::vector<Handle<Code>>& results,
- ErrorThrower* thrower) {
+ size_t FinishCompilationUnits(std::vector<Handle<Code>>& results,
+ ErrorThrower* thrower) {
+ size_t finished = 0;
while (true) {
int func_index = -1;
Handle<Code> result = FinishCompilationUnit(thrower, &func_index);
if (func_index < 0) break;
results[func_index] = result;
+ ++finished;
}
+ RestartCompilationTasks();
+ return finished;
}
Handle<Code> FinishCompilationUnit(ErrorThrower* thrower, int* func_index) {
- compiler::WasmCompilationUnit* unit = nullptr;
+ std::unique_ptr<compiler::WasmCompilationUnit> unit;
{
base::LockGuard<base::Mutex> guard(&result_mutex_);
- if (executed_units_.empty()) return Handle<Code>::null();
- unit = executed_units_.front();
- executed_units_.pop();
+ if (executed_units_.IsEmpty()) return Handle<Code>::null();
+ unit = executed_units_.GetNext();
}
*func_index = unit->func_index();
Handle<Code> result = unit->FinishCompilation(thrower);
- delete unit;
return result;
}
@@ -446,32 +510,34 @@ class CompilationHelper {
// 1) The main thread allocates a compilation unit for each wasm function
// and stores them in the vector {compilation_units}.
InitializeParallelCompilation(module->functions, *module_env);
- InitializeHandles();
- // Objects for the synchronization with the background threads.
- base::AtomicNumber<size_t> next_unit(
- static_cast<size_t>(FLAG_skip_compiling_wasm_funcs));
+ executed_units_.EnableThrottling();
// 2) The main thread spawns {CompilationTask} instances which run on
// the background threads.
- std::unique_ptr<uint32_t[]> task_ids(StartCompilationTasks());
-
- // 3.a) The background threads and the main thread pick one compilation
- // unit at a time and execute the parallel phase of the compilation
- // unit. After finishing the execution of the parallel phase, the
- // result is enqueued in {executed_units}.
- while (FetchAndExecuteCompilationUnit()) {
+ RestartCompilationTasks();
+
+ size_t finished_functions = 0;
+ while (finished_functions < compilation_units_.size()) {
+ // 3.a) The background threads and the main thread pick one compilation
+ // unit at a time and execute the parallel phase of the compilation
+ // unit. After finishing the execution of the parallel phase, the
+ // result is enqueued in {executed_units}.
+ size_t index = 0;
+ if (GetNextUncompiledFunctionId(&index)) {
+ CompileAndSchedule(index);
+ }
// 3.b) If {executed_units} contains a compilation unit, the main thread
// dequeues it and finishes the compilation unit. Compilation units
// are finished concurrently to the background threads to save
// memory.
- FinishCompilationUnits(results, thrower);
+ finished_functions += FinishCompilationUnits(results, thrower);
}
// 4) After the parallel phase of all compilation units has started, the
- // main thread waits for all {CompilationTask} instances to finish.
- WaitForCompilationTasks(task_ids.get());
- // Finish the compilation of the remaining compilation units.
- FinishCompilationUnits(results, thrower);
+ // main thread waits for all {CompilationTask} instances to finish -
+ // which happens once they all realize there's no next work item to
+ // process.
+ background_task_manager_.CancelAndWait();
}
void CompileSequentially(ModuleBytesEnv* module_env,
@@ -504,11 +570,7 @@ class CompilationHelper {
Handle<Script> asm_js_script,
Vector<const byte> asm_js_offset_table_bytes) {
Factory* factory = isolate_->factory();
- // The {module_wrapper} will take ownership of the {WasmModule} object,
- // and it will be destroyed when the GC reclaims the wrapper object.
- Handle<WasmModuleWrapper> module_wrapper =
- WasmModuleWrapper::New(isolate_, module_);
- WasmInstance temp_instance(module_);
+ WasmInstance temp_instance(module_.get());
temp_instance.context = isolate_->native_context();
temp_instance.mem_size = WasmModule::kPageSize * module_->min_mem_pages;
temp_instance.mem_start = nullptr;
@@ -528,12 +590,30 @@ class CompilationHelper {
signature_tables->set(i, *temp_instance.signature_tables[i]);
}
- HistogramTimerScope wasm_compile_module_time_scope(
- module_->is_wasm()
- ? isolate_->counters()->wasm_compile_wasm_module_time()
- : isolate_->counters()->wasm_compile_asm_module_time());
+ if (is_sync_) {
+ // TODO(karlschimpf): Make this work when asynchronous.
+ // https://bugs.chromium.org/p/v8/issues/detail?id=6361
+ HistogramTimerScope wasm_compile_module_time_scope(
+ module_->is_wasm()
+ ? isolate_->counters()->wasm_compile_wasm_module_time()
+ : isolate_->counters()->wasm_compile_asm_module_time());
+ return CompileToModuleObjectInternal(
+ thrower, wire_bytes, asm_js_script, asm_js_offset_table_bytes,
+ factory, &temp_instance, &function_tables, &signature_tables);
+ }
+ return CompileToModuleObjectInternal(
+ thrower, wire_bytes, asm_js_script, asm_js_offset_table_bytes, factory,
+ &temp_instance, &function_tables, &signature_tables);
+ }
- ModuleBytesEnv module_env(module_, &temp_instance, wire_bytes);
+ private:
+ MaybeHandle<WasmModuleObject> CompileToModuleObjectInternal(
+ ErrorThrower* thrower, const ModuleWireBytes& wire_bytes,
+ Handle<Script> asm_js_script,
+ Vector<const byte> asm_js_offset_table_bytes, Factory* factory,
+ WasmInstance* temp_instance, Handle<FixedArray>* function_tables,
+ Handle<FixedArray>* signature_tables) {
+ ModuleBytesEnv module_env(module_.get(), temp_instance, wire_bytes);
// The {code_table} array contains import wrappers and functions (which
// are both included in {functions.size()}, and export wrappers.
@@ -543,7 +623,7 @@ class CompilationHelper {
factory->NewFixedArray(static_cast<int>(code_table_size), TENURED);
// Check whether lazy compilation is enabled for this module.
- bool lazy_compile = compile_lazy(module_);
+ bool lazy_compile = compile_lazy(module_.get());
// If lazy compile: Initialize the code table with the lazy compile builtin.
// Otherwise: Initialize with the illegal builtin. All call sites will be
@@ -554,41 +634,42 @@ class CompilationHelper {
for (int i = 0, e = static_cast<int>(module_->functions.size()); i < e;
++i) {
code_table->set(i, *init_builtin);
- temp_instance.function_code[i] = init_builtin;
+ temp_instance->function_code[i] = init_builtin;
}
- (module_->is_wasm() ? isolate_->counters()->wasm_functions_per_wasm_module()
- : isolate_->counters()->wasm_functions_per_asm_module())
- ->AddSample(static_cast<int>(module_->functions.size()));
+ if (is_sync_)
+ // TODO(karlschimpf): Make this work when asynchronous.
+ // https://bugs.chromium.org/p/v8/issues/detail?id=6361
+ (module_->is_wasm()
+ ? isolate_->counters()->wasm_functions_per_wasm_module()
+ : isolate_->counters()->wasm_functions_per_asm_module())
+ ->AddSample(static_cast<int>(module_->functions.size()));
if (!lazy_compile) {
- CompilationHelper helper(isolate_, module_);
size_t funcs_to_compile =
module_->functions.size() - module_->num_imported_functions;
if (!FLAG_trace_wasm_decoder && FLAG_wasm_num_compilation_tasks != 0 &&
funcs_to_compile > 1) {
// Avoid a race condition by collecting results into a second vector.
- std::vector<Handle<Code>> results(temp_instance.function_code);
- helper.CompileInParallel(&module_env, results, thrower);
- temp_instance.function_code.swap(results);
+ std::vector<Handle<Code>> results(temp_instance->function_code);
+ CompileInParallel(&module_env, results, thrower);
+ temp_instance->function_code.swap(results);
} else {
- helper.CompileSequentially(&module_env, temp_instance.function_code,
- thrower);
+ CompileSequentially(&module_env, temp_instance->function_code, thrower);
}
if (thrower->error()) return {};
}
// At this point, compilation has completed. Update the code table.
for (size_t i = FLAG_skip_compiling_wasm_funcs;
- i < temp_instance.function_code.size(); ++i) {
- Code* code = *temp_instance.function_code[i];
+ i < temp_instance->function_code.size(); ++i) {
+ Code* code = *temp_instance->function_code[i];
code_table->set(static_cast<int>(i), code);
- RecordStats(isolate_, code);
+ RecordStats(isolate_, code, is_sync_);
}
// Create heap objects for script, module bytes and asm.js offset table to
- // be
- // stored in the shared module data.
+ // be stored in the shared module data.
Handle<Script> script;
Handle<ByteArray> asm_js_offset_table;
if (asm_js_script.is_null()) {
@@ -609,6 +690,12 @@ class CompilationHelper {
.ToHandleChecked();
DCHECK(module_bytes->IsSeqOneByteString());
+ // The {module_wrapper} will take ownership of the {WasmModule} object,
+ // and it will be destroyed when the GC reclaims the wrapper object.
+ Handle<WasmModuleWrapper> module_wrapper =
+ WasmModuleWrapper::New(isolate_, module_.release());
+ WasmModule* module = module_wrapper->get();
+
// Create the shared module data.
// TODO(clemensh): For the same module (same bytes / same hash), we should
// only have one WasmSharedModuleData. Otherwise, we might only set
@@ -624,7 +711,7 @@ class CompilationHelper {
// serializable. Instantiation may occur off a deserialized version of this
// object.
Handle<WasmCompiledModule> compiled_module = WasmCompiledModule::New(
- isolate_, shared, code_table, function_tables, signature_tables);
+ isolate_, shared, code_table, *function_tables, *signature_tables);
// If we created a wasm script, finish it now and make it public to the
// debugger.
@@ -636,24 +723,64 @@ class CompilationHelper {
// Compile JS->WASM wrappers for exported functions.
JSToWasmWrapperCache js_to_wasm_cache;
int func_index = 0;
- for (auto exp : module_->export_table) {
+ for (auto exp : module->export_table) {
if (exp.kind != kExternalFunction) continue;
Handle<Code> wasm_code = EnsureExportedLazyDeoptData(
isolate_, Handle<WasmInstanceObject>::null(), code_table, exp.index);
Handle<Code> wrapper_code =
- js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(isolate_, module_,
+ js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(isolate_, module,
wasm_code, exp.index);
int export_index =
- static_cast<int>(module_->functions.size() + func_index);
+ static_cast<int>(module->functions.size() + func_index);
code_table->set(export_index, *wrapper_code);
- RecordStats(isolate_, *wrapper_code);
+ RecordStats(isolate_, *wrapper_code, is_sync_);
func_index++;
}
return WasmModuleObject::New(isolate_, compiled_module);
-}
+ }
+ size_t stopped_compilation_tasks_ = 0;
+ base::Mutex tasks_mutex_;
};
+CompilationHelper::CodeGenerationSchedule::CodeGenerationSchedule(
+ base::RandomNumberGenerator* random_number_generator, size_t max_memory)
+ : random_number_generator_(random_number_generator),
+ max_memory_(max_memory) {
+ DCHECK_NOT_NULL(random_number_generator_);
+ DCHECK_GT(max_memory_, 0);
+}
+
+void CompilationHelper::CodeGenerationSchedule::Schedule(
+ std::unique_ptr<compiler::WasmCompilationUnit>&& item) {
+ size_t cost = item->memory_cost();
+ schedule_.push_back(std::move(item));
+ allocated_memory_.Increment(cost);
+}
+
+bool CompilationHelper::CodeGenerationSchedule::CanAcceptWork() const {
+ return (!throttle_ || allocated_memory_.Value() <= max_memory_);
+}
+
+std::unique_ptr<compiler::WasmCompilationUnit>
+CompilationHelper::CodeGenerationSchedule::GetNext() {
+ DCHECK(!IsEmpty());
+ size_t index = GetRandomIndexInSchedule();
+ auto ret = std::move(schedule_[index]);
+ std::swap(schedule_[schedule_.size() - 1], schedule_[index]);
+ schedule_.pop_back();
+ allocated_memory_.Decrement(ret->memory_cost());
+ return ret;
+}
+
+size_t CompilationHelper::CodeGenerationSchedule::GetRandomIndexInSchedule() {
+ double factor = random_number_generator_->NextDouble();
+ size_t index = (size_t)(factor * schedule_.size());
+ DCHECK_GE(index, 0);
+ DCHECK_LT(index, schedule_.size());
+ return index;
+}
+
static void MemoryInstanceFinalizer(Isolate* isolate,
WasmInstanceObject* instance) {
DisallowHeapAllocation no_gc;
@@ -810,26 +937,18 @@ void RecordLazyCodeStats(Isolate* isolate, Code* code) {
} // namespace
Handle<JSArrayBuffer> wasm::SetupArrayBuffer(Isolate* isolate,
+ void* allocation_base,
+ size_t allocation_length,
void* backing_store, size_t size,
bool is_external,
bool enable_guard_regions) {
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
- JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store,
+ JSArrayBuffer::Setup(buffer, isolate, is_external, allocation_base,
+ allocation_length, backing_store,
static_cast<int>(size));
buffer->set_is_neuterable(false);
buffer->set_is_wasm_buffer(true);
buffer->set_has_guard_region(enable_guard_regions);
-
- if (enable_guard_regions) {
- // We mark the buffer as external if we allocated it here with guard
- // pages. That means we need to arrange for it to be freed.
-
- // TODO(eholk): Finalizers may not run when the main thread is shutting
- // down, which means we may leak memory here.
- Handle<Object> global_handle = isolate->global_handles()->Create(*buffer);
- GlobalHandles::MakeWeak(global_handle.location(), global_handle.location(),
- &MemoryFinalizer, v8::WeakCallbackType::kFinalizer);
- }
return buffer;
}
@@ -842,9 +961,10 @@ Handle<JSArrayBuffer> wasm::NewArrayBuffer(Isolate* isolate, size_t size,
enable_guard_regions = enable_guard_regions && kGuardRegionsSupported;
- bool is_external; // Set by TryAllocateBackingStore
- void* memory =
- TryAllocateBackingStore(isolate, size, enable_guard_regions, is_external);
+ void* allocation_base = nullptr; // Set by TryAllocateBackingStore
+ size_t allocation_length = 0; // Set by TryAllocateBackingStore
+ void* memory = TryAllocateBackingStore(isolate, size, enable_guard_regions,
+ allocation_base, allocation_length);
if (memory == nullptr) {
return Handle<JSArrayBuffer>::null();
@@ -858,8 +978,9 @@ Handle<JSArrayBuffer> wasm::NewArrayBuffer(Isolate* isolate, size_t size,
}
#endif
- return SetupArrayBuffer(isolate, memory, size, is_external,
- enable_guard_regions);
+ const bool is_external = false;
+ return SetupArrayBuffer(isolate, allocation_base, allocation_length, memory,
+ size, is_external, enable_guard_regions);
}
void wasm::UnpackAndRegisterProtectedInstructions(
@@ -925,8 +1046,8 @@ WasmInstanceObject* wasm::GetOwningWasmInstance(Code* code) {
return WasmInstanceObject::cast(cell->value());
}
-WasmModule::WasmModule(Zone* owned)
- : owned_zone(owned), pending_tasks(new base::Semaphore(0)) {}
+WasmModule::WasmModule(std::unique_ptr<Zone> owned)
+ : signature_zone(std::move(owned)), pending_tasks(new base::Semaphore(0)) {}
namespace {
@@ -1003,7 +1124,7 @@ void UpdateDispatchTablesInternal(Isolate* isolate,
// a dangling pointer in the signature map.
Handle<WasmInstanceObject> instance(
WasmInstanceObject::cast(dispatch_tables->get(i)), isolate);
- auto func_table = instance->module()->function_tables[table_index];
+ auto& func_table = instance->module()->function_tables[table_index];
uint32_t sig_index = func_table.map.FindOrInsert(function->sig);
signature_table->set(index, Smi::FromInt(static_cast<int>(sig_index)));
function_table->set(index, *code);
@@ -1136,7 +1257,7 @@ class InstantiationHelper {
UNREACHABLE();
}
}
- RecordStats(isolate_, code_table);
+ RecordStats(isolate_, code_table, is_sync_);
} else {
// There was no owner, so we can reuse the original.
compiled_module_ = original;
@@ -1384,7 +1505,7 @@ class InstantiationHelper {
WasmSharedModuleData::SetBreakpointsOnNewInstance(
compiled_module_->shared(), instance);
- if (FLAG_wasm_interpret_all) {
+ if (FLAG_wasm_interpret_all && module_->is_wasm()) {
Handle<WasmDebugInfo> debug_info =
WasmInstanceObject::GetOrCreateDebugInfo(instance);
std::vector<int> func_indexes;
@@ -1413,7 +1534,7 @@ class InstantiationHelper {
Handle<WasmExportedFunction> startup_fct = WasmExportedFunction::New(
isolate_, instance, MaybeHandle<String>(), start_index,
static_cast<int>(sig->parameter_count()), wrapper_code);
- RecordStats(isolate_, *startup_code);
+ RecordStats(isolate_, *startup_code, is_sync_);
// Call the JS function.
Handle<Object> undefined = factory->undefined_value();
MaybeHandle<Object> retval =
@@ -1447,6 +1568,7 @@ class InstantiationHelper {
Isolate* isolate_;
WasmModule* const module_;
+ constexpr static bool is_sync_ = true;
ErrorThrower* thrower_;
Handle<WasmModuleObject> module_object_;
Handle<JSReceiver> ffi_; // TODO(titzer): Use MaybeHandle
@@ -1618,7 +1740,7 @@ class InstantiationHelper {
return -1;
}
code_table->set(num_imported_functions, *import_wrapper);
- RecordStats(isolate_, *import_wrapper);
+ RecordStats(isolate_, *import_wrapper, is_sync_);
num_imported_functions++;
break;
}
@@ -1737,7 +1859,7 @@ class InstantiationHelper {
module_name, import_name);
return -1;
}
- if (module_->is_asm_js() && FLAG_fast_validate_asm) {
+ if (module_->is_asm_js()) {
if (module_->globals[import.index].type == kWasmI32) {
value = Object::ToInt32(isolate_, value).ToHandleChecked();
} else {
@@ -1858,47 +1980,38 @@ class InstantiationHelper {
isolate_->factory()->InternalizeUtf8String("exports");
JSObject::AddProperty(instance, exports_name, exports_object, NONE);
- Handle<String> foreign_init_name =
- isolate_->factory()->InternalizeUtf8String(
- wasm::AsmWasmBuilder::foreign_init_name);
Handle<String> single_function_name =
- isolate_->factory()->InternalizeUtf8String(
- wasm::AsmWasmBuilder::single_function_name);
+ isolate_->factory()->InternalizeUtf8String(AsmJs::kSingleFunctionName);
PropertyDescriptor desc;
desc.set_writable(module_->is_asm_js());
desc.set_enumerable(true);
-
- // Count up export indexes.
- int export_index = 0;
- for (auto exp : module_->export_table) {
- if (exp.kind == kExternalFunction) {
- ++export_index;
- }
- }
+ desc.set_configurable(module_->is_asm_js());
// Store weak references to all exported functions.
Handle<FixedArray> weak_exported_functions;
if (compiled_module->has_weak_exported_functions()) {
weak_exported_functions = compiled_module->weak_exported_functions();
} else {
+ int export_count = 0;
+ for (WasmExport& exp : module_->export_table) {
+ if (exp.kind == kExternalFunction) ++export_count;
+ }
weak_exported_functions =
- isolate_->factory()->NewFixedArray(export_index);
+ isolate_->factory()->NewFixedArray(export_count);
compiled_module->set_weak_exported_functions(weak_exported_functions);
}
- DCHECK_EQ(export_index, weak_exported_functions->length());
- // Process each export in the export table (go in reverse so asm.js
- // can skip duplicates).
- for (auto exp : base::Reversed(module_->export_table)) {
+ // Process each export in the export table.
+ int export_index = 0; // Index into {weak_exported_functions}.
+ for (WasmExport& exp : module_->export_table) {
Handle<String> name =
WasmCompiledModule::ExtractUtf8StringFromModuleBytes(
isolate_, compiled_module_, exp.name_offset, exp.name_length)
.ToHandleChecked();
Handle<JSObject> export_to;
if (module_->is_asm_js() && exp.kind == kExternalFunction &&
- (String::Equals(name, foreign_init_name) ||
- String::Equals(name, single_function_name))) {
+ String::Equals(name, single_function_name)) {
export_to = instance;
} else {
export_to = exports_object;
@@ -1909,7 +2022,7 @@ class InstantiationHelper {
// Wrap and export the code as a JSFunction.
WasmFunction& function = module_->functions[exp.index];
int func_index =
- static_cast<int>(module_->functions.size() + --export_index);
+ static_cast<int>(module_->functions.size() + export_index);
Handle<JSFunction> js_function = js_wrappers_[exp.index];
if (js_function.is_null()) {
// Wrap the exported code as a JSFunction.
@@ -1933,6 +2046,7 @@ class InstantiationHelper {
isolate_->factory()->NewWeakCell(js_function);
DCHECK_GT(weak_exported_functions->length(), export_index);
weak_exported_functions->set(export_index, *weak_export);
+ export_index++;
break;
}
case kExternalTable: {
@@ -1954,9 +2068,11 @@ class InstantiationHelper {
Handle<WasmMemoryObject> memory_object;
if (!instance->has_memory_object()) {
// If there was no imported WebAssembly.Memory object, create one.
- Handle<JSArrayBuffer> buffer(instance->memory_buffer(), isolate_);
memory_object = WasmMemoryObject::New(
- isolate_, buffer,
+ isolate_,
+ (instance->has_memory_buffer())
+ ? handle(instance->memory_buffer())
+ : Handle<JSArrayBuffer>::null(),
(module_->max_mem_pages != 0) ? module_->max_mem_pages : -1);
instance->set_memory_object(*memory_object);
} else {
@@ -1998,13 +2114,6 @@ class InstantiationHelper {
break;
}
- // Skip duplicates for asm.js.
- if (module_->is_asm_js()) {
- v8::Maybe<bool> status = JSReceiver::HasOwnProperty(export_to, name);
- if (status.FromMaybe(false)) {
- continue;
- }
- }
v8::Maybe<bool> status = JSReceiver::DefineOwnProperty(
isolate_, export_to, name, &desc, Object::THROW_ON_ERROR);
if (!status.IsJust()) {
@@ -2013,6 +2122,7 @@ class InstantiationHelper {
return;
}
}
+ DCHECK_EQ(export_index, weak_exported_functions->length());
if (module_->is_wasm()) {
v8::Maybe<bool> success = JSReceiver::SetIntegrityLevel(
@@ -2094,10 +2204,8 @@ class InstantiationHelper {
Handle<FixedArray> all_dispatch_tables;
if (!table_instance.table_object.is_null()) {
// Get the existing dispatch table(s) with the WebAssembly.Table object.
- all_dispatch_tables = WasmTableObject::AddDispatchTable(
- isolate_, table_instance.table_object,
- Handle<WasmInstanceObject>::null(), index,
- Handle<FixedArray>::null(), Handle<FixedArray>::null());
+ all_dispatch_tables =
+ handle(table_instance.table_object->dispatch_tables());
}
// Count the number of table exports for each function (needed for lazy
@@ -2216,58 +2324,22 @@ void wasm::DetachWebAssemblyMemoryBuffer(Isolate* isolate,
? static_cast<uint32_t>(buffer->byte_length()->Number())
: 0;
if (buffer.is_null() || byte_length == 0) return;
- const bool has_guard_regions = buffer->has_guard_region();
const bool is_external = buffer->is_external();
- void* backing_store = buffer->backing_store();
DCHECK(!buffer->is_neuterable());
- if (!has_guard_regions && !is_external) {
+ if (!is_external) {
buffer->set_is_external(true);
isolate->heap()->UnregisterArrayBuffer(*buffer);
+ if (free_memory) {
+ // We need to free the memory before neutering the buffer because
+ // FreeBackingStore reads buffer->allocation_base(), which is nulled out
+ // by Neuter. This means there is a dangling pointer until we neuter the
+ // buffer. Since there is no way for the user to directly call
+ // FreeBackingStore, we can ensure this is safe.
+ buffer->FreeBackingStore();
+ }
}
buffer->set_is_neuterable(true);
buffer->Neuter();
- // Neuter but do not free, as when pages == 0, the backing store is being used
- // by the new buffer.
- if (!free_memory) return;
- if (has_guard_regions) {
- base::OS::Free(backing_store, RoundUp(i::wasm::kWasmMaxHeapOffset,
- base::OS::CommitPageSize()));
- reinterpret_cast<v8::Isolate*>(isolate)
- ->AdjustAmountOfExternalAllocatedMemory(-byte_length);
- } else if (!has_guard_regions && !is_external) {
- isolate->array_buffer_allocator()->Free(backing_store, byte_length);
- }
-}
-
-void wasm::GrowDispatchTables(Isolate* isolate,
- Handle<FixedArray> dispatch_tables,
- uint32_t old_size, uint32_t count) {
- DCHECK_EQ(0, dispatch_tables->length() % 4);
-
- Zone specialization_zone(isolate->allocator(), ZONE_NAME);
- for (int i = 0; i < dispatch_tables->length(); i += 4) {
- Handle<FixedArray> old_function_table(
- FixedArray::cast(dispatch_tables->get(i + 2)));
- Handle<FixedArray> old_signature_table(
- FixedArray::cast(dispatch_tables->get(i + 3)));
- Handle<FixedArray> new_function_table =
- isolate->factory()->CopyFixedArrayAndGrow(old_function_table, count);
- Handle<FixedArray> new_signature_table =
- isolate->factory()->CopyFixedArrayAndGrow(old_signature_table, count);
-
- // Update dispatch tables with new function/signature tables
- dispatch_tables->set(i + 2, *new_function_table);
- dispatch_tables->set(i + 3, *new_signature_table);
-
- // Patch the code of the respective instance.
- CodeSpecialization code_specialization(isolate, &specialization_zone);
- code_specialization.PatchTableSize(old_size, old_size + count);
- code_specialization.RelocateObject(old_function_table, new_function_table);
- code_specialization.RelocateObject(old_signature_table,
- new_signature_table);
- code_specialization.ApplyToWholeInstance(
- WasmInstanceObject::cast(dispatch_tables->get(i)));
- }
}
void testing::ValidateInstancesChain(Isolate* isolate,
@@ -2480,14 +2552,19 @@ Handle<JSArray> wasm::GetCustomSections(Isolate* isolate,
if (!name->Equals(*section_name.ToHandleChecked())) continue;
// Make a copy of the payload data in the section.
- bool is_external; // Set by TryAllocateBackingStore
+ void* allocation_base = nullptr; // Set by TryAllocateBackingStore
+ size_t allocation_length = 0; // Set by TryAllocateBackingStore
+ const bool enable_guard_regions = false;
void* memory = TryAllocateBackingStore(isolate, section.payload_length,
- false, is_external);
+ enable_guard_regions,
+ allocation_base, allocation_length);
Handle<Object> section_data = factory->undefined_value();
if (memory) {
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
- JSArrayBuffer::Setup(buffer, isolate, is_external, memory,
+ const bool is_external = false;
+ JSArrayBuffer::Setup(buffer, isolate, is_external, allocation_base,
+ allocation_length, memory,
static_cast<int>(section.payload_length));
DisallowHeapAllocation no_gc; // for raw access to string bytes.
Handle<SeqOneByteString> module_bytes(compiled_module->module_bytes(),
@@ -2517,12 +2594,10 @@ Handle<JSArray> wasm::GetCustomSections(Isolate* isolate,
return array_object;
}
-bool wasm::SyncValidate(Isolate* isolate, ErrorThrower* thrower,
- const ModuleWireBytes& bytes) {
+bool wasm::SyncValidate(Isolate* isolate, const ModuleWireBytes& bytes) {
if (bytes.start() == nullptr || bytes.length() == 0) return false;
ModuleResult result =
DecodeWasmModule(isolate, bytes.start(), bytes.end(), true, kWasmOrigin);
- if (result.val) delete result.val;
return result.ok();
}
@@ -2534,13 +2609,14 @@ MaybeHandle<WasmModuleObject> wasm::SyncCompileTranslatedAsmJs(
ModuleResult result = DecodeWasmModule(isolate, bytes.start(), bytes.end(),
false, kAsmJsOrigin);
if (result.failed()) {
- // TODO(titzer): use Result<std::unique_ptr<const WasmModule*>>?
- if (result.val) delete result.val;
thrower->CompileFailed("Wasm decoding failed", result);
return {};
}
- CompilationHelper helper(isolate, const_cast<WasmModule*>(result.val));
+ // Transfer ownership to the {WasmModuleWrapper} generated in
+ // {CompileToModuleObject}.
+ constexpr bool is_sync = true;
+ CompilationHelper helper(isolate, std::move(result.val), is_sync);
return helper.CompileToModuleObject(thrower, bytes, asm_js_script,
asm_js_offset_table_bytes);
}
@@ -2556,12 +2632,14 @@ MaybeHandle<WasmModuleObject> wasm::SyncCompile(Isolate* isolate,
ModuleResult result =
DecodeWasmModule(isolate, bytes.start(), bytes.end(), false, kWasmOrigin);
if (result.failed()) {
- if (result.val) delete result.val;
thrower->CompileFailed("Wasm decoding failed", result);
return {};
}
- CompilationHelper helper(isolate, const_cast<WasmModule*>(result.val));
+ // Transfer ownership to the {WasmModuleWrapper} generated in
+ // {CompileToModuleObject}.
+ constexpr bool is_sync = true;
+ CompilationHelper helper(isolate, std::move(result.val), is_sync);
return helper.CompileToModuleObject(thrower, bytes, Handle<Script>(),
Vector<const byte>());
}
@@ -2577,11 +2655,11 @@ MaybeHandle<WasmInstanceObject> wasm::SyncInstantiate(
namespace {
void RejectPromise(Isolate* isolate, Handle<Context> context,
- ErrorThrower* thrower, Handle<JSPromise> promise) {
+ ErrorThrower& thrower, Handle<JSPromise> promise) {
v8::Local<v8::Promise::Resolver> resolver =
v8::Utils::PromiseToLocal(promise).As<v8::Promise::Resolver>();
auto maybe = resolver->Reject(v8::Utils::ToLocal(context),
- v8::Utils::ToLocal(thrower->Reify()));
+ v8::Utils::ToLocal(thrower.Reify()));
CHECK_IMPLIES(!maybe.FromMaybe(false), isolate->has_scheduled_exception());
}
@@ -2603,51 +2681,13 @@ void wasm::AsyncInstantiate(Isolate* isolate, Handle<JSPromise> promise,
MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
isolate, &thrower, module_object, imports, Handle<JSArrayBuffer>::null());
if (thrower.error()) {
- RejectPromise(isolate, handle(isolate->context()), &thrower, promise);
+ RejectPromise(isolate, handle(isolate->context()), thrower, promise);
return;
}
ResolvePromise(isolate, handle(isolate->context()), promise,
instance_object.ToHandleChecked());
}
-void wasm::AsyncCompileAndInstantiate(Isolate* isolate,
- Handle<JSPromise> promise,
- const ModuleWireBytes& bytes,
- MaybeHandle<JSReceiver> imports) {
- ErrorThrower thrower(isolate, nullptr);
-
- // Compile the module.
- MaybeHandle<WasmModuleObject> module_object =
- SyncCompile(isolate, &thrower, bytes);
- if (thrower.error()) {
- RejectPromise(isolate, handle(isolate->context()), &thrower, promise);
- return;
- }
- Handle<WasmModuleObject> module = module_object.ToHandleChecked();
-
- // Instantiate the module.
- MaybeHandle<WasmInstanceObject> instance_object = SyncInstantiate(
- isolate, &thrower, module, imports, Handle<JSArrayBuffer>::null());
- if (thrower.error()) {
- RejectPromise(isolate, handle(isolate->context()), &thrower, promise);
- return;
- }
-
- Handle<JSFunction> object_function =
- Handle<JSFunction>(isolate->native_context()->object_function(), isolate);
- Handle<JSObject> ret =
- isolate->factory()->NewJSObject(object_function, TENURED);
- Handle<String> module_property_name =
- isolate->factory()->InternalizeUtf8String("module");
- Handle<String> instance_property_name =
- isolate->factory()->InternalizeUtf8String("instance");
- JSObject::AddProperty(ret, module_property_name, module, NONE);
- JSObject::AddProperty(ret, instance_property_name,
- instance_object.ToHandleChecked(), NONE);
-
- ResolvePromise(isolate, handle(isolate->context()), promise, ret);
-}
-
// Encapsulates all the state and steps of an asynchronous compilation.
// An asynchronous compile job consists of a number of tasks that are executed
// as foreground and background tasks. Any phase that touches the V8 heap or
@@ -2660,9 +2700,12 @@ void wasm::AsyncCompileAndInstantiate(Isolate* isolate,
// immediately before returning. Thus we handle the predictable mode specially,
// e.g. when we synchronizing tasks or when we delete the AyncCompileJob.
class AsyncCompileJob {
+ // TODO(ahaas): Fix https://bugs.chromium.org/p/v8/issues/detail?id=6263 to
+ // make sure that d8 does not shut down before the AsyncCompileJob is
+ // finished.
public:
explicit AsyncCompileJob(Isolate* isolate, std::unique_ptr<byte[]> bytes_copy,
- int length, Handle<Context> context,
+ size_t length, Handle<Context> context,
Handle<JSPromise> promise)
: isolate_(isolate),
bytes_copy_(std::move(bytes_copy)),
@@ -2674,8 +2717,8 @@ class AsyncCompileJob {
deferred_handles_.push_back(deferred.Detach());
}
- bool Start() {
- return DoAsync(&AsyncCompileJob::DecodeModule); // --
+ void Start() {
+ DoAsync<DecodeModule>(); // --
}
~AsyncCompileJob() {
@@ -2688,357 +2731,442 @@ class AsyncCompileJob {
ModuleWireBytes wire_bytes_;
Handle<Context> context_;
Handle<JSPromise> module_promise_;
- WasmModule* module_ = nullptr;
- ModuleResult result_;
- std::unique_ptr<CompilationHelper> helper_ = nullptr;
- std::unique_ptr<ModuleBytesEnv> module_bytes_env_ = nullptr;
+ std::unique_ptr<CompilationHelper> helper_;
+ std::unique_ptr<ModuleBytesEnv> module_bytes_env_;
- volatile bool failed_ = false;
+ bool failed_ = false;
std::vector<DeferredHandles*> deferred_handles_;
- Handle<WasmModuleWrapper> module_wrapper_;
Handle<WasmModuleObject> module_object_;
Handle<FixedArray> function_tables_;
Handle<FixedArray> signature_tables_;
Handle<WasmCompiledModule> compiled_module_;
Handle<FixedArray> code_table_;
std::unique_ptr<WasmInstance> temp_instance_ = nullptr;
- std::unique_ptr<uint32_t[]> task_ids_ = nullptr;
size_t outstanding_units_ = 0;
size_t num_background_tasks_ = 0;
void ReopenHandlesInDeferredScope() {
DeferredHandleScope deferred(isolate_);
- module_wrapper_ = handle(*module_wrapper_, isolate_);
function_tables_ = handle(*function_tables_, isolate_);
signature_tables_ = handle(*signature_tables_, isolate_);
code_table_ = handle(*code_table_, isolate_);
temp_instance_->ReopenHandles(isolate_);
- helper_->InitializeHandles();
+ for (auto& unit : helper_->compilation_units_) {
+ unit->ReopenCentryStub();
+ }
deferred_handles_.push_back(deferred.Detach());
}
+ void AsyncCompileFailed(ErrorThrower& thrower) {
+ RejectPromise(isolate_, context_, thrower, module_promise_);
+ // The AsyncCompileJob is finished, we resolved the promise, we do not need
+ // the data anymore. We can delete the AsyncCompileJob object.
+ if (!FLAG_verify_predictable) delete this;
+ }
+
+ void AsyncCompileSucceeded(Handle<Object> result) {
+ ResolvePromise(isolate_, context_, module_promise_, result);
+ // The AsyncCompileJob is finished, we resolved the promise, we do not need
+ // the data anymore. We can delete the AsyncCompileJob object.
+ if (!FLAG_verify_predictable) delete this;
+ }
+
+ enum TaskType { SYNC, ASYNC };
+
+ // A closure to run a compilation step (either as foreground or background
+ // task) and schedule the next step(s), if any.
+ class CompileTask : NON_EXPORTED_BASE(public v8::Task) {
+ public:
+ AsyncCompileJob* job_ = nullptr;
+ CompileTask() {}
+ void Run() override = 0; // Force sub-classes to override Run().
+ };
+
+ class AsyncCompileTask : public CompileTask {};
+
+ class SyncCompileTask : public CompileTask {
+ public:
+ void Run() final {
+ SaveContext saved_context(job_->isolate_);
+ job_->isolate_->set_context(*job_->context_);
+ RunImpl();
+ }
+
+ protected:
+ virtual void RunImpl() = 0;
+ };
+
+ template <typename Task, typename... Args>
+ void DoSync(Args&&... args) {
+ static_assert(std::is_base_of<SyncCompileTask, Task>::value,
+ "Scheduled type must be sync");
+ Task* task = new Task(std::forward<Args>(args)...);
+ task->job_ = this;
+ V8::GetCurrentPlatform()->CallOnForegroundThread(
+ reinterpret_cast<v8::Isolate*>(isolate_), task);
+ }
+
+ template <typename Task, typename... Args>
+ void DoAsync(Args&&... args) {
+ static_assert(std::is_base_of<AsyncCompileTask, Task>::value,
+ "Scheduled type must be async");
+ Task* task = new Task(std::forward<Args>(args)...);
+ task->job_ = this;
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ }
+
//==========================================================================
// Step 1: (async) Decode the module.
//==========================================================================
- bool DecodeModule() {
- {
- DisallowHandleAllocation no_handle;
- DisallowHeapAllocation no_allocation;
- // Decode the module bytes.
- TRACE_COMPILE("(1) Decoding module...\n");
- result_ = DecodeWasmModule(isolate_, wire_bytes_.start(),
- wire_bytes_.end(), true, kWasmOrigin);
- }
- if (result_.failed()) {
- // Decoding failure; reject the promise and clean up.
- if (result_.val) delete result_.val;
- return DoSync(&AsyncCompileJob::DecodeFail);
- } else {
- // Decode passed.
- module_ = const_cast<WasmModule*>(result_.val);
- return DoSync(&AsyncCompileJob::PrepareAndStartCompile);
+ class DecodeModule : public AsyncCompileTask {
+ void Run() override {
+ ModuleResult result;
+ {
+ DisallowHandleAllocation no_handle;
+ DisallowHeapAllocation no_allocation;
+ // Decode the module bytes.
+ TRACE_COMPILE("(1) Decoding module...\n");
+ constexpr bool is_sync = true;
+ result = DecodeWasmModule(job_->isolate_, job_->wire_bytes_.start(),
+ job_->wire_bytes_.end(), false, kWasmOrigin,
+ !is_sync);
+ }
+ if (result.failed()) {
+ // Decoding failure; reject the promise and clean up.
+ job_->DoSync<DecodeFail>(std::move(result));
+ } else {
+ // Decode passed.
+ job_->DoSync<PrepareAndStartCompile>(std::move(result.val));
+ }
}
- }
+ };
//==========================================================================
// Step 1b: (sync) Fail decoding the module.
//==========================================================================
- bool DecodeFail() {
- HandleScope scope(isolate_);
- ErrorThrower thrower(isolate_, nullptr);
- thrower.CompileFailed("Wasm decoding failed", result_);
- RejectPromise(isolate_, context_, &thrower, module_promise_);
- return false;
- }
+ class DecodeFail : public SyncCompileTask {
+ public:
+ explicit DecodeFail(ModuleResult result) : result_(std::move(result)) {}
+
+ private:
+ ModuleResult result_;
+ void RunImpl() override {
+ TRACE_COMPILE("(1b) Decoding failed.\n");
+ HandleScope scope(job_->isolate_);
+ ErrorThrower thrower(job_->isolate_, "AsyncCompile");
+ thrower.CompileFailed("Wasm decoding failed", result_);
+ // {job_} is deleted in AsyncCompileFailed, therefore the {return}.
+ return job_->AsyncCompileFailed(thrower);
+ }
+ };
//==========================================================================
// Step 2 (sync): Create heap-allocated data and start compile.
//==========================================================================
- bool PrepareAndStartCompile() {
- TRACE_COMPILE("(2) Prepare and start compile...\n");
- HandleScope scope(isolate_);
+ class PrepareAndStartCompile : public SyncCompileTask {
+ public:
+ explicit PrepareAndStartCompile(std::unique_ptr<WasmModule> module)
+ : module_(std::move(module)) {}
+
+ private:
+ std::unique_ptr<WasmModule> module_;
+ void RunImpl() override {
+ TRACE_COMPILE("(2) Prepare and start compile...\n");
+ HandleScope scope(job_->isolate_);
+
+ Factory* factory = job_->isolate_->factory();
+ job_->temp_instance_.reset(new WasmInstance(module_.get()));
+ job_->temp_instance_->context = job_->context_;
+ job_->temp_instance_->mem_size =
+ WasmModule::kPageSize * module_->min_mem_pages;
+ job_->temp_instance_->mem_start = nullptr;
+ job_->temp_instance_->globals_start = nullptr;
+
+ // Initialize the indirect tables with placeholders.
+ int function_table_count =
+ static_cast<int>(module_->function_tables.size());
+ job_->function_tables_ =
+ factory->NewFixedArray(function_table_count, TENURED);
+ job_->signature_tables_ =
+ factory->NewFixedArray(function_table_count, TENURED);
+ for (int i = 0; i < function_table_count; ++i) {
+ job_->temp_instance_->function_tables[i] =
+ factory->NewFixedArray(1, TENURED);
+ job_->temp_instance_->signature_tables[i] =
+ factory->NewFixedArray(1, TENURED);
+ job_->function_tables_->set(i,
+ *job_->temp_instance_->function_tables[i]);
+ job_->signature_tables_->set(
+ i, *job_->temp_instance_->signature_tables[i]);
+ }
- Factory* factory = isolate_->factory();
- // The {module_wrapper} will take ownership of the {WasmModule} object,
- // and it will be destroyed when the GC reclaims the wrapper object.
- module_wrapper_ = WasmModuleWrapper::New(isolate_, module_);
- temp_instance_ = std::unique_ptr<WasmInstance>(new WasmInstance(module_));
- temp_instance_->context = context_;
- temp_instance_->mem_size = WasmModule::kPageSize * module_->min_mem_pages;
- temp_instance_->mem_start = nullptr;
- temp_instance_->globals_start = nullptr;
+ // The {code_table} array contains import wrappers and functions (which
+ // are both included in {functions.size()}, and export wrappers.
+ // The results of compilation will be written into it.
+ int code_table_size = static_cast<int>(module_->functions.size() +
+ module_->num_exported_functions);
+ job_->code_table_ = factory->NewFixedArray(code_table_size, TENURED);
+
+ // Initialize {code_table_} with the illegal builtin. All call sites
+ // will be patched at instantiation.
+ Handle<Code> illegal_builtin = job_->isolate_->builtins()->Illegal();
+ // TODO(wasm): Fix this for lazy compilation.
+ for (uint32_t i = 0; i < module_->functions.size(); ++i) {
+ job_->code_table_->set(static_cast<int>(i), *illegal_builtin);
+ job_->temp_instance_->function_code[i] = illegal_builtin;
+ }
- // Initialize the indirect tables with placeholders.
- int function_table_count =
- static_cast<int>(module_->function_tables.size());
- function_tables_ = factory->NewFixedArray(function_table_count, TENURED);
- signature_tables_ = factory->NewFixedArray(function_table_count, TENURED);
- for (int i = 0; i < function_table_count; ++i) {
- temp_instance_->function_tables[i] = factory->NewFixedArray(1, TENURED);
- temp_instance_->signature_tables[i] = factory->NewFixedArray(1, TENURED);
- function_tables_->set(i, *temp_instance_->function_tables[i]);
- signature_tables_->set(i, *temp_instance_->signature_tables[i]);
- }
+ job_->isolate_->counters()->wasm_functions_per_wasm_module()->AddSample(
+ static_cast<int>(module_->functions.size()));
+
+ // Transfer ownership of the {WasmModule} to the {CompilationHelper}, but
+ // keep a pointer.
+ WasmModule* module = module_.get();
+ constexpr bool is_sync = true;
+ job_->helper_.reset(
+ new CompilationHelper(job_->isolate_, std::move(module_), !is_sync));
+
+ DCHECK_LE(module->num_imported_functions, module->functions.size());
+ size_t num_functions =
+ module->functions.size() - module->num_imported_functions;
+ if (num_functions == 0) {
+ job_->ReopenHandlesInDeferredScope();
+ // Degenerate case of an empty module.
+ job_->DoSync<FinishCompile>();
+ return;
+ }
- // The {code_table} array contains import wrappers and functions (which
- // are both included in {functions.size()}, and export wrappers.
- // The results of compilation will be written into it.
- int code_table_size = static_cast<int>(module_->functions.size() +
- module_->num_exported_functions);
- code_table_ = factory->NewFixedArray(code_table_size, TENURED);
-
- // Initialize {code_table_} with the illegal builtin. All call sites
- // will be patched at instantiation.
- Handle<Code> illegal_builtin = isolate_->builtins()->Illegal();
- // TODO(wasm): Fix this for lazy compilation.
- for (uint32_t i = 0; i < module_->functions.size(); ++i) {
- code_table_->set(static_cast<int>(i), *illegal_builtin);
- temp_instance_->function_code[i] = illegal_builtin;
- }
-
- isolate_->counters()->wasm_functions_per_wasm_module()->AddSample(
- static_cast<int>(module_->functions.size()));
-
- helper_.reset(new CompilationHelper(isolate_, module_));
-
- DCHECK_LE(module_->num_imported_functions, module_->functions.size());
- size_t num_functions =
- module_->functions.size() - module_->num_imported_functions;
- if (num_functions == 0) {
- ReopenHandlesInDeferredScope();
- // Degenerate case of an empty module.
- return DoSync(&AsyncCompileJob::FinishCompile);
- }
-
- // Start asynchronous compilation tasks.
- num_background_tasks_ =
- Max(static_cast<size_t>(1),
- Min(num_functions,
- Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
- V8::GetCurrentPlatform()
- ->NumberOfAvailableBackgroundThreads())));
- module_bytes_env_ = std::unique_ptr<ModuleBytesEnv>(
- new ModuleBytesEnv(module_, temp_instance_.get(), wire_bytes_));
- outstanding_units_ = helper_->InitializeParallelCompilation(
- module_->functions, *module_bytes_env_);
-
- // Reopen all handles which should survive in the DeferredHandleScope.
- ReopenHandlesInDeferredScope();
- task_ids_ =
- std::unique_ptr<uint32_t[]>(new uint32_t[num_background_tasks_]);
- for (size_t i = 0; i < num_background_tasks_; ++i) {
- DoAsync(&AsyncCompileJob::ExecuteCompilationUnits, &(task_ids_.get())[i]);
+ // Start asynchronous compilation tasks.
+ job_->num_background_tasks_ =
+ Max(static_cast<size_t>(1),
+ Min(num_functions,
+ Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
+ V8::GetCurrentPlatform()
+ ->NumberOfAvailableBackgroundThreads())));
+ job_->module_bytes_env_.reset(new ModuleBytesEnv(
+ module, job_->temp_instance_.get(), job_->wire_bytes_));
+ job_->outstanding_units_ = job_->helper_->InitializeParallelCompilation(
+ module->functions, *job_->module_bytes_env_);
+
+ // Reopen all handles which should survive in the DeferredHandleScope.
+ job_->ReopenHandlesInDeferredScope();
+ for (size_t i = 0; i < job_->num_background_tasks_; ++i) {
+ job_->DoAsync<ExecuteCompilationUnits>();
+ }
}
- return true;
- }
+ };
//==========================================================================
// Step 3 (async x K tasks): Execute compilation units.
//==========================================================================
- bool ExecuteCompilationUnits() {
- TRACE_COMPILE("(3) Compiling...\n");
- while (!failed_) {
- {
- DisallowHandleAllocation no_handle;
- DisallowHeapAllocation no_allocation;
- if (!helper_->FetchAndExecuteCompilationUnit()) break;
+ class ExecuteCompilationUnits : public AsyncCompileTask {
+ void Run() override {
+ TRACE_COMPILE("(3) Compiling...\n");
+ for (;;) {
+ {
+ DisallowHandleAllocation no_handle;
+ DisallowHeapAllocation no_allocation;
+ if (!job_->helper_->FetchAndExecuteCompilationUnit()) break;
+ }
+ // TODO(ahaas): Create one FinishCompilationUnit job for all compilation
+ // units.
+ job_->DoSync<FinishCompilationUnit>();
+ // TODO(ahaas): Limit the number of outstanding compilation units to be
+ // finished to reduce memory overhead.
}
- // TODO(ahaas): Create one FinishCompilationUnit job for all compilation
- // units.
- DoSync(&AsyncCompileJob::FinishCompilationUnit);
- // TODO(ahaas): Limit the number of outstanding compilation units to be
- // finished to reduce memory overhead.
- }
- // Special handling for predictable mode, see above.
- if (!FLAG_verify_predictable)
- helper_->module_->pending_tasks.get()->Signal();
- return true;
- }
+ // Special handling for predictable mode, see above.
+ if (!FLAG_verify_predictable)
+ job_->helper_->module_->pending_tasks.get()->Signal();
+ }
+ };
//==========================================================================
// Step 4 (sync x each function): Finish a single compilation unit.
//==========================================================================
- bool FinishCompilationUnit() {
- TRACE_COMPILE("(4a) Finishing compilation unit...\n");
- HandleScope scope(isolate_);
- if (failed_) return true; // already failed
-
- int func_index = -1;
- ErrorThrower thrower(isolate_, nullptr);
- Handle<Code> result = helper_->FinishCompilationUnit(&thrower, &func_index);
- if (thrower.error()) {
- RejectPromise(isolate_, context_, &thrower, module_promise_);
- failed_ = true;
- } else {
- DCHECK(func_index >= 0);
- code_table_->set(func_index, *(result));
- }
- if (failed_ || --outstanding_units_ == 0) {
- // All compilation units are done. We still need to wait for the
- // background tasks to shut down and only then is it safe to finish the
- // compile and delete this job. We can wait for that to happen also
- // in a background task.
- DoAsync(&AsyncCompileJob::WaitForBackgroundTasks);
+ class FinishCompilationUnit : public SyncCompileTask {
+ void RunImpl() override {
+ TRACE_COMPILE("(4a) Finishing compilation unit...\n");
+ HandleScope scope(job_->isolate_);
+ if (job_->failed_) return; // already failed
+
+ int func_index = -1;
+ ErrorThrower thrower(job_->isolate_, "AsyncCompile");
+ Handle<Code> result =
+ job_->helper_->FinishCompilationUnit(&thrower, &func_index);
+ if (thrower.error()) {
+ job_->failed_ = true;
+ } else {
+ DCHECK(func_index >= 0);
+ job_->code_table_->set(func_index, *(result));
+ }
+ if (thrower.error() || --job_->outstanding_units_ == 0) {
+ // All compilation units are done. We still need to wait for the
+ // background tasks to shut down and only then is it safe to finish the
+ // compile and delete this job. We can wait for that to happen also
+ // in a background task.
+ job_->DoAsync<WaitForBackgroundTasks>(std::move(thrower));
+ }
}
- return true;
- }
+ };
//==========================================================================
// Step 4b (async): Wait for all background tasks to finish.
//==========================================================================
- bool WaitForBackgroundTasks() {
- TRACE_COMPILE("(4b) Waiting for background tasks...\n");
- // Special handling for predictable mode, see above.
- if (!FLAG_verify_predictable) {
- for (size_t i = 0; i < num_background_tasks_; ++i) {
- // If the task has not started yet, then we abort it. Otherwise we wait
- // for it to finish.
-
- if (isolate_->cancelable_task_manager()->TryAbort(task_ids_.get()[i]) !=
- CancelableTaskManager::kTaskAborted) {
- module_->pending_tasks.get()->Wait();
+ class WaitForBackgroundTasks : public AsyncCompileTask {
+ public:
+ explicit WaitForBackgroundTasks(ErrorThrower thrower)
+ : thrower_(std::move(thrower)) {}
+
+ private:
+ ErrorThrower thrower_;
+
+ void Run() override {
+ TRACE_COMPILE("(4b) Waiting for background tasks...\n");
+ // Bump next_unit_, such that background tasks stop processing the queue.
+ job_->helper_->next_unit_.SetValue(
+ job_->helper_->compilation_units_.size());
+ // Special handling for predictable mode, see above.
+ if (!FLAG_verify_predictable) {
+ for (size_t i = 0; i < job_->num_background_tasks_; ++i) {
+ // We wait for it to finish.
+ job_->helper_->module_->pending_tasks.get()->Wait();
}
}
+ if (thrower_.error()) {
+ job_->DoSync<FailCompile>(std::move(thrower_));
+ } else {
+ job_->DoSync<FinishCompile>();
+ }
}
- if (failed_) {
- // If {failed_}, we've already rejected the promise and there
- // is nothing more to do.
- return false;
- } else {
- // Otherwise, post a synchronous task to finish the compile.
- DoSync(&AsyncCompileJob::FinishCompile);
- return true;
- }
- }
+ };
//==========================================================================
- // Step 5 (sync): Finish heap-allocated data structures.
+ // Step 5a (sync): Fail compilation (reject promise).
//==========================================================================
- bool FinishCompile() {
- TRACE_COMPILE("(5) Finish compile...\n");
- HandleScope scope(isolate_);
- SaveContext saved_context(isolate_);
- isolate_->set_context(*context_);
- // At this point, compilation has completed. Update the code table.
- for (size_t i = FLAG_skip_compiling_wasm_funcs;
- i < temp_instance_->function_code.size(); ++i) {
- Code* code = Code::cast(code_table_->get(static_cast<int>(i)));
- RecordStats(isolate_, code);
- }
-
- // Create heap objects for script and module bytes to be stored in the
- // shared module data. Asm.js is not compiled asynchronously.
- Handle<Script> script = CreateWasmScript(isolate_, wire_bytes_);
- Handle<ByteArray> asm_js_offset_table;
- // TODO(wasm): Improve efficiency of storing module wire bytes.
- // 1. Only store relevant sections, not function bodies
- // 2. Don't make a second copy of the bytes here; reuse the copy made
- // for asynchronous compilation and store it as an external one
- // byte string for serialization/deserialization.
- Handle<String> module_bytes =
- isolate_->factory()
- ->NewStringFromOneByte({wire_bytes_.start(), wire_bytes_.length()},
- TENURED)
- .ToHandleChecked();
- DCHECK(module_bytes->IsSeqOneByteString());
-
- // Create the shared module data.
- // TODO(clemensh): For the same module (same bytes / same hash), we should
- // only have one WasmSharedModuleData. Otherwise, we might only set
- // breakpoints on a (potentially empty) subset of the instances.
+ class FailCompile : public SyncCompileTask {
+ public:
+ explicit FailCompile(ErrorThrower thrower) : thrower_(std::move(thrower)) {}
- Handle<WasmSharedModuleData> shared = WasmSharedModuleData::New(
- isolate_, module_wrapper_, Handle<SeqOneByteString>::cast(module_bytes),
- script, asm_js_offset_table);
+ private:
+ ErrorThrower thrower_;
- // Create the compiled module object and populate with compiled functions
- // and information needed at instantiation time. This object needs to be
- // serializable. Instantiation may occur off a deserialized version of this
- // object.
- compiled_module_ = WasmCompiledModule::New(
- isolate_, shared, code_table_, function_tables_, signature_tables_);
+ void RunImpl() override {
+ TRACE_COMPILE("(5a) Fail compilation...\n");
+ HandleScope scope(job_->isolate_);
+ return job_->AsyncCompileFailed(thrower_);
+ }
+ };
- // Finish the WASM script now and make it public to the debugger.
- script->set_wasm_compiled_module(*compiled_module_);
- isolate_->debug()->OnAfterCompile(script);
+ //==========================================================================
+ // Step 5b (sync): Finish heap-allocated data structures.
+ //==========================================================================
+ class FinishCompile : public SyncCompileTask {
+ void RunImpl() override {
+ TRACE_COMPILE("(5b) Finish compile...\n");
+ HandleScope scope(job_->isolate_);
+ // At this point, compilation has completed. Update the code table.
+ constexpr bool is_sync = true;
+ for (size_t i = FLAG_skip_compiling_wasm_funcs;
+ i < job_->temp_instance_->function_code.size(); ++i) {
+ Code* code = Code::cast(job_->code_table_->get(static_cast<int>(i)));
+ RecordStats(job_->isolate_, code, !is_sync);
+ }
- DeferredHandleScope deferred(isolate_);
- compiled_module_ = handle(*compiled_module_, isolate_);
- deferred_handles_.push_back(deferred.Detach());
- // TODO(wasm): compiling wrappers should be made async as well.
- return DoSync(&AsyncCompileJob::CompileWrappers);
- }
+ // Create heap objects for script and module bytes to be stored in the
+ // shared module data. Asm.js is not compiled asynchronously.
+ Handle<Script> script =
+ CreateWasmScript(job_->isolate_, job_->wire_bytes_);
+ Handle<ByteArray> asm_js_offset_table;
+ // TODO(wasm): Improve efficiency of storing module wire bytes.
+ // 1. Only store relevant sections, not function bodies
+ // 2. Don't make a second copy of the bytes here; reuse the copy made
+ // for asynchronous compilation and store it as an external one
+ // byte string for serialization/deserialization.
+ Handle<String> module_bytes =
+ job_->isolate_->factory()
+ ->NewStringFromOneByte(
+ {job_->wire_bytes_.start(), job_->wire_bytes_.length()},
+ TENURED)
+ .ToHandleChecked();
+ DCHECK(module_bytes->IsSeqOneByteString());
+
+ // The {module_wrapper} will take ownership of the {WasmModule} object,
+ // and it will be destroyed when the GC reclaims the wrapper object.
+ Handle<WasmModuleWrapper> module_wrapper = WasmModuleWrapper::New(
+ job_->isolate_, job_->helper_->module_.release());
+
+ // Create the shared module data.
+ // TODO(clemensh): For the same module (same bytes / same hash), we should
+ // only have one WasmSharedModuleData. Otherwise, we might only set
+ // breakpoints on a (potentially empty) subset of the instances.
+
+ Handle<WasmSharedModuleData> shared = WasmSharedModuleData::New(
+ job_->isolate_, module_wrapper,
+ Handle<SeqOneByteString>::cast(module_bytes), script,
+ asm_js_offset_table);
+
+ // Create the compiled module object and populate with compiled functions
+ // and information needed at instantiation time. This object needs to be
+ // serializable. Instantiation may occur off a deserialized version of
+ // this object.
+ job_->compiled_module_ = WasmCompiledModule::New(
+ job_->isolate_, shared, job_->code_table_, job_->function_tables_,
+ job_->signature_tables_);
+
+ // Finish the WASM script now and make it public to the debugger.
+ script->set_wasm_compiled_module(*job_->compiled_module_);
+ job_->isolate_->debug()->OnAfterCompile(script);
+
+ DeferredHandleScope deferred(job_->isolate_);
+ job_->compiled_module_ = handle(*job_->compiled_module_, job_->isolate_);
+ job_->deferred_handles_.push_back(deferred.Detach());
+ // TODO(wasm): compiling wrappers should be made async as well.
+ job_->DoSync<CompileWrappers>();
+ }
+ };
//==========================================================================
// Step 6 (sync): Compile JS->WASM wrappers.
//==========================================================================
- bool CompileWrappers() {
- TRACE_COMPILE("(6) Compile wrappers...\n");
- // Compile JS->WASM wrappers for exported functions.
- HandleScope scope(isolate_);
- JSToWasmWrapperCache js_to_wasm_cache;
- int func_index = 0;
- for (auto exp : module_->export_table) {
- if (exp.kind != kExternalFunction) continue;
- Handle<Code> wasm_code(Code::cast(code_table_->get(exp.index)), isolate_);
- Handle<Code> wrapper_code =
- js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(isolate_, module_,
- wasm_code, exp.index);
- int export_index =
- static_cast<int>(module_->functions.size() + func_index);
- code_table_->set(export_index, *wrapper_code);
- RecordStats(isolate_, *wrapper_code);
- func_index++;
- }
+ class CompileWrappers : public SyncCompileTask {
+ void RunImpl() override {
+ TRACE_COMPILE("(6) Compile wrappers...\n");
+ // Compile JS->WASM wrappers for exported functions.
+ HandleScope scope(job_->isolate_);
+ JSToWasmWrapperCache js_to_wasm_cache;
+ int func_index = 0;
+ constexpr bool is_sync = true;
+ WasmModule* module = job_->compiled_module_->module();
+ for (auto exp : module->export_table) {
+ if (exp.kind != kExternalFunction) continue;
+ Handle<Code> wasm_code(Code::cast(job_->code_table_->get(exp.index)),
+ job_->isolate_);
+ Handle<Code> wrapper_code =
+ js_to_wasm_cache.CloneOrCompileJSToWasmWrapper(
+ job_->isolate_, module, wasm_code, exp.index);
+ int export_index =
+ static_cast<int>(module->functions.size() + func_index);
+ job_->code_table_->set(export_index, *wrapper_code);
+ RecordStats(job_->isolate_, *wrapper_code, !is_sync);
+ func_index++;
+ }
- return DoSync(&AsyncCompileJob::FinishModule);
- }
+ job_->DoSync<FinishModule>();
+ }
+ };
//==========================================================================
// Step 7 (sync): Finish the module and resolve the promise.
//==========================================================================
- bool FinishModule() {
- TRACE_COMPILE("(7) Finish module...\n");
- HandleScope scope(isolate_);
- SaveContext saved_context(isolate_);
- isolate_->set_context(*context_);
- Handle<WasmModuleObject> result =
- WasmModuleObject::New(isolate_, compiled_module_);
- ResolvePromise(isolate_, context_, module_promise_, result);
- return false; // no more work to do.
- }
-
- // Run the given member method as an asynchronous task.
- bool DoAsync(bool (AsyncCompileJob::*func)(), uint32_t* task_id = nullptr) {
- auto task = new Task(this, func);
- if (task_id) *task_id = task->id();
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- task, v8::Platform::kShortRunningTask);
- return true; // more work to do.
- }
-
- // Run the given member method as a synchronous task.
- bool DoSync(bool (AsyncCompileJob::*func)()) {
- V8::GetCurrentPlatform()->CallOnForegroundThread(
- reinterpret_cast<v8::Isolate*>(isolate_), new Task(this, func));
- return true; // more work to do.
- }
-
- // A helper closure to run a particular member method as a task.
- class Task : public CancelableTask {
- public:
- AsyncCompileJob* job_;
- bool (AsyncCompileJob::*func_)();
- explicit Task(AsyncCompileJob* job, bool (AsyncCompileJob::*func)())
- : CancelableTask(job->isolate_), job_(job), func_(func) {}
-
- void RunInternal() override {
- bool more = (job_->*func_)(); // run the task.
- if (!more) {
- // If no more work, then this job is done. Predictable mode is handled
- // specially though, see above.
- if (!FLAG_verify_predictable) delete job_;
- }
+ class FinishModule : public SyncCompileTask {
+ void RunImpl() override {
+ TRACE_COMPILE("(7) Finish module...\n");
+ HandleScope scope(job_->isolate_);
+ Handle<WasmModuleObject> result =
+ WasmModuleObject::New(job_->isolate_, job_->compiled_module_);
+ // {job_} is deleted in AsyncCompileSucceeded, therefore the {return}.
+ return job_->AsyncCompileSucceeded(result);
}
};
};
@@ -3051,7 +3179,7 @@ void wasm::AsyncCompile(Isolate* isolate, Handle<JSPromise> promise,
MaybeHandle<WasmModuleObject> module_object =
SyncCompile(isolate, &thrower, bytes);
if (thrower.error()) {
- RejectPromise(isolate, handle(isolate->context()), &thrower, promise);
+ RejectPromise(isolate, handle(isolate->context()), thrower, promise);
return;
}
Handle<WasmModuleObject> module = module_object.ToHandleChecked();
@@ -3119,13 +3247,8 @@ Handle<Code> wasm::CompileLazy(Isolate* isolate) {
bool patch_caller = caller_code->kind() == Code::JS_TO_WASM_FUNCTION ||
exp_deopt_data.is_null() || exp_deopt_data->length() <= 2;
- MaybeHandle<Code> maybe_compiled_code = WasmCompiledModule::CompileLazy(
+ Handle<Code> compiled_code = WasmCompiledModule::CompileLazy(
isolate, instance, caller_code, offset, func_index, patch_caller);
- if (maybe_compiled_code.is_null()) {
- DCHECK(isolate->has_pending_exception());
- return isolate->builtins()->Illegal();
- }
- Handle<Code> compiled_code = maybe_compiled_code.ToHandleChecked();
if (!exp_deopt_data.is_null() && exp_deopt_data->length() > 2) {
// See EnsureExportedLazyDeoptData: exp_deopt_data[2...(len-1)] are pairs of
// <export_table, index> followed by undefined values.
@@ -3148,14 +3271,15 @@ Handle<Code> wasm::CompileLazy(Isolate* isolate) {
return compiled_code;
}
-bool LazyCompilationOrchestrator::CompileFunction(
+void LazyCompilationOrchestrator::CompileFunction(
Isolate* isolate, Handle<WasmInstanceObject> instance, int func_index) {
Handle<WasmCompiledModule> compiled_module(instance->compiled_module(),
isolate);
if (Code::cast(compiled_module->code_table()->get(func_index))->kind() ==
Code::WASM_FUNCTION) {
- return true;
+ return;
}
+
size_t num_function_tables =
compiled_module->module()->function_tables.size();
// Store a vector of handles to be embedded in the generated code.
@@ -3191,10 +3315,14 @@ bool LazyCompilationOrchestrator::CompileFunction(
ErrorThrower thrower(isolate, "WasmLazyCompile");
compiler::WasmCompilationUnit unit(isolate, &module_env, body,
CStrVector(func_name.c_str()), func_index);
- unit.InitializeHandles();
unit.ExecuteCompilation();
Handle<Code> code = unit.FinishCompilation(&thrower);
+ // If there is a pending error, something really went wrong. The module was
+ // verified before starting execution with lazy compilation.
+ // This might be OOM, but then we cannot continue execution anyway.
+ CHECK(!thrower.error());
+
Handle<FixedArray> deopt_data = isolate->factory()->NewFixedArray(2, TENURED);
Handle<WeakCell> weak_instance = isolate->factory()->NewWeakCell(instance);
// TODO(wasm): Introduce constants for the indexes in wasm deopt data.
@@ -3202,11 +3330,6 @@ bool LazyCompilationOrchestrator::CompileFunction(
deopt_data->set(1, Smi::FromInt(func_index));
code->set_deoptimization_data(*deopt_data);
- if (thrower.error()) {
- if (!isolate->has_pending_exception()) isolate->Throw(*thrower.Reify());
- return false;
- }
-
DCHECK_EQ(Builtins::kWasmCompileLazy,
Code::cast(compiled_module->code_table()->get(func_index))
->builtin_index());
@@ -3235,10 +3358,9 @@ bool LazyCompilationOrchestrator::CompileFunction(
Assembler::FlushICache(isolate, code->instruction_start(),
code->instruction_size());
RecordLazyCodeStats(isolate, *code);
- return true;
}
-MaybeHandle<Code> LazyCompilationOrchestrator::CompileLazy(
+Handle<Code> LazyCompilationOrchestrator::CompileLazy(
Isolate* isolate, Handle<WasmInstanceObject> instance, Handle<Code> caller,
int call_offset, int exported_func_index, bool patch_caller) {
struct NonCompiledFunction {
@@ -3258,7 +3380,7 @@ MaybeHandle<Code> LazyCompilationOrchestrator::CompileLazy(
DisallowHeapAllocation no_gc;
SeqOneByteString* module_bytes = compiled_module->module_bytes();
SourcePositionTableIterator source_pos_iterator(
- caller->source_position_table());
+ caller->SourcePositionTable());
DCHECK_EQ(2, caller->deoptimization_data()->length());
int caller_func_index =
Smi::cast(caller->deoptimization_data()->get(1))->value();
@@ -3289,9 +3411,7 @@ MaybeHandle<Code> LazyCompilationOrchestrator::CompileLazy(
// TODO(clemensh): compile all functions in non_compiled_functions in
// background, wait for func_to_return_idx.
- if (!CompileFunction(isolate, instance, func_to_return_idx)) {
- return {};
- }
+ CompileFunction(isolate, instance, func_to_return_idx);
if (is_js_to_wasm || patch_caller) {
DisallowHeapAllocation no_gc;
diff --git a/deps/v8/src/wasm/wasm-module.h b/deps/v8/src/wasm/wasm-module.h
index 98f498b79c..4776298e9f 100644
--- a/deps/v8/src/wasm/wasm-module.h
+++ b/deps/v8/src/wasm/wasm-module.h
@@ -152,7 +152,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
static const uint32_t kPageSize = 0x10000; // Page size, 64kb.
static const uint32_t kMinMemPages = 1; // Minimum memory size = 64kb
- Zone* owned_zone;
+ std::unique_ptr<Zone> signature_zone;
uint32_t min_mem_pages = 0; // minimum size of the memory in 64k pages
uint32_t max_mem_pages = 0; // maximum size of the memory in 64k pages
bool has_max_mem = false; // try if a maximum memory size exists
@@ -185,10 +185,7 @@ struct V8_EXPORT_PRIVATE WasmModule {
std::unique_ptr<base::Semaphore> pending_tasks;
WasmModule() : WasmModule(nullptr) {}
- WasmModule(Zone* owned_zone);
- ~WasmModule() {
- if (owned_zone) delete owned_zone;
- }
+ WasmModule(std::unique_ptr<Zone> owned);
ModuleOrigin get_origin() const { return origin_; }
void set_origin(ModuleOrigin new_value) { origin_ = new_value; }
@@ -294,7 +291,7 @@ struct V8_EXPORT_PRIVATE ModuleWireBytes {
const byte* start() const { return module_bytes_.start(); }
const byte* end() const { return module_bytes_.end(); }
- int length() const { return module_bytes_.length(); }
+ size_t length() const { return module_bytes_.length(); }
private:
const Vector<const byte> module_bytes_;
@@ -433,8 +430,10 @@ WasmInstanceObject* GetOwningWasmInstance(Code* code);
Handle<JSArrayBuffer> NewArrayBuffer(Isolate*, size_t size,
bool enable_guard_regions);
-Handle<JSArrayBuffer> SetupArrayBuffer(Isolate*, void* backing_store,
- size_t size, bool is_external,
+Handle<JSArrayBuffer> SetupArrayBuffer(Isolate*, void* allocation_base,
+ size_t allocation_length,
+ void* backing_store, size_t size,
+ bool is_external,
bool enable_guard_regions);
void DetachWebAssemblyMemoryBuffer(Isolate* isolate,
@@ -444,13 +443,10 @@ void DetachWebAssemblyMemoryBuffer(Isolate* isolate,
void UpdateDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
int index, Handle<JSFunction> js_function);
-void GrowDispatchTables(Isolate* isolate, Handle<FixedArray> dispatch_tables,
- uint32_t old_size, uint32_t count);
-
//============================================================================
//== Compilation and instantiation ===========================================
//============================================================================
-V8_EXPORT_PRIVATE bool SyncValidate(Isolate* isolate, ErrorThrower* thrower,
+V8_EXPORT_PRIVATE bool SyncValidate(Isolate* isolate,
const ModuleWireBytes& bytes);
V8_EXPORT_PRIVATE MaybeHandle<WasmModuleObject> SyncCompileTranslatedAsmJs(
@@ -473,10 +469,6 @@ V8_EXPORT_PRIVATE void AsyncInstantiate(Isolate* isolate,
Handle<WasmModuleObject> module_object,
MaybeHandle<JSReceiver> imports);
-V8_EXPORT_PRIVATE void AsyncCompileAndInstantiate(
- Isolate* isolate, Handle<JSPromise> promise, const ModuleWireBytes& bytes,
- MaybeHandle<JSReceiver> imports);
-
#if V8_TARGET_ARCH_64_BIT
const bool kGuardRegionsSupported = true;
#else
@@ -509,13 +501,12 @@ Handle<Code> CompileLazy(Isolate* isolate);
// logic to actually orchestrate parallel execution of wasm compilation jobs.
// TODO(clemensh): Implement concurrent lazy compilation.
class LazyCompilationOrchestrator {
- bool CompileFunction(Isolate*, Handle<WasmInstanceObject>,
- int func_index) WARN_UNUSED_RESULT;
+ void CompileFunction(Isolate*, Handle<WasmInstanceObject>, int func_index);
public:
- MaybeHandle<Code> CompileLazy(Isolate*, Handle<WasmInstanceObject>,
- Handle<Code> caller, int call_offset,
- int exported_func_index, bool patch_caller);
+ Handle<Code> CompileLazy(Isolate*, Handle<WasmInstanceObject>,
+ Handle<Code> caller, int call_offset,
+ int exported_func_index, bool patch_caller);
};
namespace testing {
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index b83fd7ad4e..d43087b263 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -259,8 +259,6 @@ Handle<WasmTableObject> WasmTableObject::New(Isolate* isolate, uint32_t initial,
return Handle<WasmTableObject>::cast(table_obj);
}
-DEFINE_OBJ_GETTER(WasmTableObject, dispatch_tables, kDispatchTables, FixedArray)
-
Handle<FixedArray> WasmTableObject::AddDispatchTable(
Isolate* isolate, Handle<WasmTableObject> table_obj,
Handle<WasmInstanceObject> instance, int table_index,
@@ -290,6 +288,8 @@ Handle<FixedArray> WasmTableObject::AddDispatchTable(
DEFINE_OBJ_ACCESSORS(WasmTableObject, functions, kFunctions, FixedArray)
+DEFINE_OBJ_GETTER(WasmTableObject, dispatch_tables, kDispatchTables, FixedArray)
+
uint32_t WasmTableObject::current_length() { return functions()->length(); }
bool WasmTableObject::has_maximum_length() {
@@ -306,11 +306,36 @@ WasmTableObject* WasmTableObject::cast(Object* object) {
return reinterpret_cast<WasmTableObject*>(object);
}
-void WasmTableObject::Grow(Isolate* isolate, Handle<WasmTableObject> table,
- uint32_t count) {
- Handle<FixedArray> dispatch_tables(table->dispatch_tables());
- wasm::GrowDispatchTables(isolate, dispatch_tables,
- table->functions()->length(), count);
+void WasmTableObject::grow(Isolate* isolate, uint32_t count) {
+ Handle<FixedArray> dispatch_tables(
+ FixedArray::cast(GetEmbedderField(kDispatchTables)));
+ DCHECK_EQ(0, dispatch_tables->length() % 4);
+ uint32_t old_size = functions()->length();
+
+ Zone specialization_zone(isolate->allocator(), ZONE_NAME);
+ for (int i = 0; i < dispatch_tables->length(); i += 4) {
+ Handle<FixedArray> old_function_table(
+ FixedArray::cast(dispatch_tables->get(i + 2)));
+ Handle<FixedArray> old_signature_table(
+ FixedArray::cast(dispatch_tables->get(i + 3)));
+ Handle<FixedArray> new_function_table =
+ isolate->factory()->CopyFixedArrayAndGrow(old_function_table, count);
+ Handle<FixedArray> new_signature_table =
+ isolate->factory()->CopyFixedArrayAndGrow(old_signature_table, count);
+
+ // Update dispatch tables with new function/signature tables
+ dispatch_tables->set(i + 2, *new_function_table);
+ dispatch_tables->set(i + 3, *new_signature_table);
+
+ // Patch the code of the respective instance.
+ CodeSpecialization code_specialization(isolate, &specialization_zone);
+ code_specialization.PatchTableSize(old_size, old_size + count);
+ code_specialization.RelocateObject(old_function_table, new_function_table);
+ code_specialization.RelocateObject(old_signature_table,
+ new_signature_table);
+ code_specialization.ApplyToWholeInstance(
+ WasmInstanceObject::cast(dispatch_tables->get(i)));
+ }
}
namespace {
@@ -382,8 +407,9 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
Handle<JSObject> memory_obj =
isolate->factory()->NewJSObject(memory_ctor, TENURED);
memory_obj->SetEmbedderField(kWrapperTracerHeader, Smi::kZero);
-
- memory_obj->SetEmbedderField(kArrayBuffer, *buffer);
+ buffer.is_null() ? memory_obj->SetEmbedderField(
+ kArrayBuffer, isolate->heap()->undefined_value())
+ : memory_obj->SetEmbedderField(kArrayBuffer, *buffer);
Handle<Object> max = isolate->factory()->NewNumber(maximum);
memory_obj->SetEmbedderField(kMaximum, *max);
Handle<Symbol> memory_sym(isolate->native_context()->wasm_memory_sym());
@@ -391,7 +417,8 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
return Handle<WasmMemoryObject>::cast(memory_obj);
}
-DEFINE_OBJ_ACCESSORS(WasmMemoryObject, buffer, kArrayBuffer, JSArrayBuffer)
+DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmMemoryObject, buffer, kArrayBuffer,
+ JSArrayBuffer)
DEFINE_OPTIONAL_OBJ_ACCESSORS(WasmMemoryObject, instances_link, kInstancesLink,
WasmInstanceWrapper)
@@ -438,11 +465,11 @@ void WasmMemoryObject::ResetInstancesLink(Isolate* isolate) {
int32_t WasmMemoryObject::Grow(Isolate* isolate,
Handle<WasmMemoryObject> memory_object,
uint32_t pages) {
- Handle<JSArrayBuffer> old_buffer(memory_object->buffer(), isolate);
+ Handle<JSArrayBuffer> old_buffer;
uint32_t old_size = 0;
Address old_mem_start = nullptr;
- // Force byte_length to 0, if byte_length fails IsNumber() check.
- if (!old_buffer.is_null()) {
+ if (memory_object->has_buffer()) {
+ old_buffer = handle(memory_object->buffer());
old_size = old_buffer->byte_length()->Number();
old_mem_start = static_cast<Address>(old_buffer->backing_store());
}
@@ -452,9 +479,10 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
// Even for pages == 0, we need to attach a new JSArrayBuffer with the same
// backing store and neuter the old one to be spec compliant.
if (!old_buffer.is_null() && old_size != 0) {
- new_buffer = SetupArrayBuffer(isolate, old_buffer->backing_store(),
- old_size, old_buffer->is_external(),
- old_buffer->has_guard_region());
+ new_buffer = SetupArrayBuffer(
+ isolate, old_buffer->allocation_base(),
+ old_buffer->allocation_length(), old_buffer->backing_store(),
+ old_size, old_buffer->is_external(), old_buffer->has_guard_region());
memory_object->set_buffer(*new_buffer);
}
DCHECK_EQ(0, old_size % WasmModule::kPageSize);
@@ -523,7 +551,7 @@ Handle<WasmDebugInfo> WasmInstanceObject::GetOrCreateDebugInfo(
Handle<WasmInstanceObject> instance) {
if (instance->has_debug_info()) return handle(instance->debug_info());
Handle<WasmDebugInfo> new_info = WasmDebugInfo::New(instance);
- instance->set_debug_info(*new_info);
+ DCHECK(instance->has_debug_info());
return new_info;
}
@@ -619,10 +647,9 @@ uint32_t WasmInstanceObject::GetMaxMemoryPages() {
}
uint32_t compiled_max_pages = compiled_module()->module()->max_mem_pages;
Isolate* isolate = GetIsolate();
- auto* histogram = (compiled_module()->module()->is_wasm()
- ? isolate->counters()->wasm_wasm_max_mem_pages_count()
- : isolate->counters()->wasm_asm_max_mem_pages_count());
- histogram->AddSample(compiled_max_pages);
+ assert(compiled_module()->module()->is_wasm());
+ isolate->counters()->wasm_wasm_max_mem_pages_count()->AddSample(
+ compiled_max_pages);
if (compiled_max_pages != 0) return compiled_max_pages;
return FLAG_wasm_max_mem_pages;
}
@@ -778,7 +805,9 @@ void WasmSharedModuleData::ReinitializeAfterDeserialization(
DecodeWasmModule(isolate, start, end, false, kWasmOrigin);
CHECK(result.ok());
CHECK_NOT_NULL(result.val);
- module = const_cast<WasmModule*>(result.val);
+ // Take ownership of the WasmModule and immediately transfer it to the
+ // WasmModuleWrapper below.
+ module = result.val.release();
}
Handle<WasmModuleWrapper> module_wrapper =
@@ -987,6 +1016,9 @@ void WasmCompiledModule::Reset(Isolate* isolate,
Object* fct_obj = compiled_module->ptr_to_code_table();
if (fct_obj != nullptr && fct_obj != undefined) {
uint32_t old_mem_size = compiled_module->mem_size();
+ // We use default_mem_size throughout, as the mem size of an uninstantiated
+ // module, because if we can statically prove a memory access is over
+ // bounds, we'll codegen a trap. See {WasmGraphBuilder::BoundsCheckMem}
uint32_t default_mem_size = compiled_module->default_mem_size();
Address old_mem_start = compiled_module->GetEmbeddedMemStartOrNull();
@@ -1062,7 +1094,7 @@ void WasmCompiledModule::InitId() {
void WasmCompiledModule::ResetSpecializationMemInfoIfNeeded() {
DisallowHeapAllocation no_gc;
if (has_embedded_mem_start()) {
- set_embedded_mem_size(0);
+ set_embedded_mem_size(default_mem_size());
set_embedded_mem_start(0);
}
}
@@ -1519,7 +1551,7 @@ MaybeHandle<FixedArray> WasmCompiledModule::CheckBreakPoints(int position) {
return isolate->debug()->GetHitBreakPointObjects(breakpoint_objects);
}
-MaybeHandle<Code> WasmCompiledModule::CompileLazy(
+Handle<Code> WasmCompiledModule::CompileLazy(
Isolate* isolate, Handle<WasmInstanceObject> instance, Handle<Code> caller,
int offset, int func_index, bool patch_caller) {
isolate->set_context(*instance->compiled_module()->native_context());
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 21299878b3..00dfc60f10 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -16,6 +16,8 @@ namespace internal {
namespace wasm {
class InterpretedFrame;
struct WasmModule;
+struct WasmInstance;
+class WasmInterpreter;
}
class WasmCompiledModule;
@@ -71,17 +73,16 @@ class WasmTableObject : public JSObject {
DECLARE_CASTS(WasmTableObject);
DECLARE_ACCESSORS(functions, FixedArray);
+ DECLARE_GETTER(dispatch_tables, FixedArray);
- FixedArray* dispatch_tables();
uint32_t current_length();
bool has_maximum_length();
int64_t maximum_length(); // Returns < 0 if no maximum.
+ void grow(Isolate* isolate, uint32_t count);
static Handle<WasmTableObject> New(Isolate* isolate, uint32_t initial,
int64_t maximum,
Handle<FixedArray>* js_functions);
- static void Grow(Isolate* isolate, Handle<WasmTableObject> table,
- uint32_t count);
static Handle<FixedArray> AddDispatchTable(
Isolate* isolate, Handle<WasmTableObject> table,
Handle<WasmInstanceObject> instance, int table_index,
@@ -102,7 +103,7 @@ class WasmMemoryObject : public JSObject {
};
DECLARE_CASTS(WasmMemoryObject);
- DECLARE_ACCESSORS(buffer, JSArrayBuffer);
+ DECLARE_OPTIONAL_ACCESSORS(buffer, JSArrayBuffer);
DECLARE_OPTIONAL_ACCESSORS(instances_link, WasmInstanceWrapper);
void AddInstance(Isolate* isolate, Handle<WasmInstanceObject> object);
@@ -519,12 +520,10 @@ class WasmCompiledModule : public FixedArray {
// call / exported function), func_index must be set. Otherwise it can be -1.
// If patch_caller is set, then all direct calls to functions which were
// already lazily compiled are patched (at least the given call site).
- // Returns the Code to be called at the given call site, or an empty Handle if
- // an error occured during lazy compilation. In this case, an exception has
- // been set on the isolate.
- static MaybeHandle<Code> CompileLazy(Isolate*, Handle<WasmInstanceObject>,
- Handle<Code> caller, int offset,
- int func_index, bool patch_caller);
+ // Returns the Code to be called at the given call site.
+ static Handle<Code> CompileLazy(Isolate*, Handle<WasmInstanceObject>,
+ Handle<Code> caller, int offset,
+ int func_index, bool patch_caller);
void ReplaceCodeTableForTesting(Handle<FixedArray> testing_table) {
set_code_table(testing_table);
@@ -549,6 +548,13 @@ class WasmDebugInfo : public FixedArray {
static Handle<WasmDebugInfo> New(Handle<WasmInstanceObject>);
+ // Setup a WasmDebugInfo with an existing WasmInstance struct.
+ // Returns a pointer to the interpreter instantiated inside this
+ // WasmDebugInfo.
+ // Use for testing only.
+ static wasm::WasmInterpreter* SetupForTesting(Handle<WasmInstanceObject>,
+ wasm::WasmInstance*);
+
static bool IsDebugInfo(Object*);
static WasmDebugInfo* cast(Object*);
@@ -579,7 +585,7 @@ class WasmDebugInfo : public FixedArray {
Address frame_pointer);
std::unique_ptr<wasm::InterpretedFrame> GetInterpretedFrame(
- Address frame_pointer, int idx);
+ Address frame_pointer, int frame_index);
// Unwind the interpreted stack belonging to the passed interpreter entry
// frame.
@@ -593,6 +599,17 @@ class WasmDebugInfo : public FixedArray {
// Update the memory view of the interpreter after executing GrowMemory in
// compiled code.
void UpdateMemory(JSArrayBuffer* new_memory);
+
+ // Get scope details for a specific interpreted frame.
+ // This returns a JSArray of length two: One entry for the global scope, one
+ // for the local scope. Both elements are JSArrays of size
+ // ScopeIterator::kScopeDetailsSize and layout as described in debug-scopes.h.
+ // The global scope contains information about globals and the memory.
+ // The local scope contains information about parameters, locals, and stack
+ // values.
+ static Handle<JSArray> GetScopeDetails(Handle<WasmDebugInfo>,
+ Address frame_pointer,
+ int frame_index);
};
class WasmInstanceWrapper : public FixedArray {
@@ -649,8 +666,11 @@ class WasmInstanceWrapper : public FixedArray {
};
};
+#undef DECLARE_CASTS
+#undef DECLARE_GETTER
#undef DECLARE_ACCESSORS
#undef DECLARE_OPTIONAL_ACCESSORS
+#undef DECLARE_OPTIONAL_GETTER
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-opcodes.cc b/deps/v8/src/wasm/wasm-opcodes.cc
index 10dcfe59a7..355cdf40b5 100644
--- a/deps/v8/src/wasm/wasm-opcodes.cc
+++ b/deps/v8/src/wasm/wasm-opcodes.cc
@@ -175,16 +175,11 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_SIMD_OP(Sub, "sub")
CASE_SIMD_OP(Mul, "mul")
CASE_F32x4_OP(Abs, "abs")
- CASE_F32x4_OP(Sqrt, "sqrt")
- CASE_F32x4_OP(Div, "div")
+ CASE_F32x4_OP(AddHoriz, "add_horizontal")
CASE_F32x4_OP(RecipApprox, "recip_approx")
- CASE_F32x4_OP(RecipRefine, "recip_refine")
CASE_F32x4_OP(RecipSqrtApprox, "recip_sqrt_approx")
- CASE_F32x4_OP(RecipSqrtRefine, "recip_sqrt_refine")
CASE_F32x4_OP(Min, "min")
CASE_F32x4_OP(Max, "max")
- CASE_F32x4_OP(MinNum, "min_num")
- CASE_F32x4_OP(MaxNum, "max_num")
CASE_F32x4_OP(Lt, "lt")
CASE_F32x4_OP(Le, "le")
CASE_F32x4_OP(Gt, "gt")
@@ -209,6 +204,8 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_SIGN_OP(SIMDI, Ge, "ge")
CASE_SIGN_OP(SIMDI, Shr, "shr")
CASE_SIMDI_OP(Shl, "shl")
+ CASE_I32x4_OP(AddHoriz, "add_horizontal")
+ CASE_I16x8_OP(AddHoriz, "add_horizontal")
CASE_SIGN_OP(I16x8, AddSaturate, "add_saturate")
CASE_SIGN_OP(I8x16, AddSaturate, "add_saturate")
CASE_SIGN_OP(I16x8, SubSaturate, "sub_saturate")
@@ -217,15 +214,12 @@ const char* WasmOpcodes::OpcodeName(WasmOpcode opcode) {
CASE_S128_OP(Or, "or")
CASE_S128_OP(Xor, "xor")
CASE_S128_OP(Not, "not")
- CASE_S32x4_OP(Select, "select")
- CASE_S32x4_OP(Swizzle, "swizzle")
CASE_S32x4_OP(Shuffle, "shuffle")
- CASE_S16x8_OP(Select, "select")
- CASE_S16x8_OP(Swizzle, "swizzle")
+ CASE_S32x4_OP(Select, "select")
CASE_S16x8_OP(Shuffle, "shuffle")
- CASE_S8x16_OP(Select, "select")
- CASE_S8x16_OP(Swizzle, "swizzle")
+ CASE_S16x8_OP(Select, "select")
CASE_S8x16_OP(Shuffle, "shuffle")
+ CASE_S8x16_OP(Select, "select")
CASE_S1x4_OP(And, "and")
CASE_S1x4_OP(Or, "or")
CASE_S1x4_OP(Xor, "xor")
@@ -271,6 +265,30 @@ bool WasmOpcodes::IsPrefixOpcode(WasmOpcode opcode) {
}
}
+bool WasmOpcodes::IsControlOpcode(WasmOpcode opcode) {
+ switch (opcode) {
+#define CHECK_OPCODE(name, opcode, _) \
+ case kExpr##name: \
+ return true;
+ FOREACH_CONTROL_OPCODE(CHECK_OPCODE)
+#undef CHECK_OPCODE
+ default:
+ return false;
+ }
+}
+
+bool WasmOpcodes::IsUnconditionalJump(WasmOpcode opcode) {
+ switch (opcode) {
+ case kExprUnreachable:
+ case kExprBr:
+ case kExprBrTable:
+ case kExprReturn:
+ return true;
+ default:
+ return false;
+ }
+}
+
std::ostream& operator<<(std::ostream& os, const FunctionSig& sig) {
if (sig.return_count() == 0) os << "v";
for (auto ret : sig.returns()) {
diff --git a/deps/v8/src/wasm/wasm-opcodes.h b/deps/v8/src/wasm/wasm-opcodes.h
index 22a84e519a..a1a84366a2 100644
--- a/deps/v8/src/wasm/wasm-opcodes.h
+++ b/deps/v8/src/wasm/wasm-opcodes.h
@@ -286,19 +286,14 @@ constexpr WasmCodePosition kNoCodePosition = -1;
V(F32x4Splat, 0xe500, s_f) \
V(F32x4Abs, 0xe503, s_s) \
V(F32x4Neg, 0xe504, s_s) \
- V(F32x4Sqrt, 0xe505, s_s) \
V(F32x4RecipApprox, 0xe506, s_s) \
V(F32x4RecipSqrtApprox, 0xe507, s_s) \
V(F32x4Add, 0xe508, s_ss) \
+ V(F32x4AddHoriz, 0xe5b9, s_ss) \
V(F32x4Sub, 0xe509, s_ss) \
V(F32x4Mul, 0xe50a, s_ss) \
- V(F32x4Div, 0xe50b, s_ss) \
V(F32x4Min, 0xe50c, s_ss) \
V(F32x4Max, 0xe50d, s_ss) \
- V(F32x4MinNum, 0xe50e, s_ss) \
- V(F32x4MaxNum, 0xe50f, s_ss) \
- V(F32x4RecipRefine, 0xe592, s_ss) \
- V(F32x4RecipSqrtRefine, 0xe593, s_ss) \
V(F32x4Eq, 0xe510, s1x4_ss) \
V(F32x4Ne, 0xe511, s1x4_ss) \
V(F32x4Lt, 0xe512, s1x4_ss) \
@@ -310,6 +305,7 @@ constexpr WasmCodePosition kNoCodePosition = -1;
V(I32x4Splat, 0xe51b, s_i) \
V(I32x4Neg, 0xe51e, s_s) \
V(I32x4Add, 0xe51f, s_ss) \
+ V(I32x4AddHoriz, 0xe5ba, s_ss) \
V(I32x4Sub, 0xe520, s_ss) \
V(I32x4Mul, 0xe521, s_ss) \
V(I32x4MinS, 0xe522, s_ss) \
@@ -336,6 +332,7 @@ constexpr WasmCodePosition kNoCodePosition = -1;
V(I16x8Neg, 0xe53b, s_s) \
V(I16x8Add, 0xe53c, s_ss) \
V(I16x8AddSaturateS, 0xe53d, s_ss) \
+ V(I16x8AddHoriz, 0xe5bb, s_ss) \
V(I16x8Sub, 0xe53e, s_ss) \
V(I16x8SubSaturateS, 0xe53f, s_ss) \
V(I16x8Mul, 0xe540, s_ss) \
@@ -391,14 +388,8 @@ constexpr WasmCodePosition kNoCodePosition = -1;
V(S128Xor, 0xe578, s_ss) \
V(S128Not, 0xe579, s_s) \
V(S32x4Select, 0xe52c, s_s1x4ss) \
- V(S32x4Swizzle, 0xe52d, s_s) \
- V(S32x4Shuffle, 0xe52e, s_ss) \
V(S16x8Select, 0xe54b, s_s1x8ss) \
- V(S16x8Swizzle, 0xe54c, s_s) \
- V(S16x8Shuffle, 0xe54d, s_ss) \
V(S8x16Select, 0xe56a, s_s1x16ss) \
- V(S8x16Swizzle, 0xe56b, s_s) \
- V(S8x16Shuffle, 0xe56c, s_ss) \
V(S1x4And, 0xe580, s1x4_s1x4s1x4) \
V(S1x4Or, 0xe581, s1x4_s1x4s1x4) \
V(S1x4Xor, 0xe582, s1x4_s1x4s1x4) \
@@ -437,6 +428,11 @@ constexpr WasmCodePosition kNoCodePosition = -1;
V(I8x16ShrS, 0xe563, _) \
V(I8x16ShrU, 0xe571, _)
+#define FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \
+ V(S32x4Shuffle, 0xe52d, s_ss) \
+ V(S16x8Shuffle, 0xe54c, s_ss) \
+ V(S8x16Shuffle, 0xe56b, s_ss)
+
#define FOREACH_ATOMIC_OPCODE(V) \
V(I32AtomicAdd8S, 0xe601, i_ii) \
V(I32AtomicAdd8U, 0xe602, i_ii) \
@@ -475,16 +471,17 @@ constexpr WasmCodePosition kNoCodePosition = -1;
V(I32AtomicXor, 0xe623, i_ii)
// All opcodes.
-#define FOREACH_OPCODE(V) \
- FOREACH_CONTROL_OPCODE(V) \
- FOREACH_MISC_OPCODE(V) \
- FOREACH_SIMPLE_OPCODE(V) \
- FOREACH_STORE_MEM_OPCODE(V) \
- FOREACH_LOAD_MEM_OPCODE(V) \
- FOREACH_MISC_MEM_OPCODE(V) \
- FOREACH_ASMJS_COMPAT_OPCODE(V) \
- FOREACH_SIMD_0_OPERAND_OPCODE(V) \
- FOREACH_SIMD_1_OPERAND_OPCODE(V) \
+#define FOREACH_OPCODE(V) \
+ FOREACH_CONTROL_OPCODE(V) \
+ FOREACH_MISC_OPCODE(V) \
+ FOREACH_SIMPLE_OPCODE(V) \
+ FOREACH_STORE_MEM_OPCODE(V) \
+ FOREACH_LOAD_MEM_OPCODE(V) \
+ FOREACH_MISC_MEM_OPCODE(V) \
+ FOREACH_ASMJS_COMPAT_OPCODE(V) \
+ FOREACH_SIMD_0_OPERAND_OPCODE(V) \
+ FOREACH_SIMD_1_OPERAND_OPCODE(V) \
+ FOREACH_SIMD_MASK_OPERAND_OPCODE(V) \
FOREACH_ATOMIC_OPCODE(V)
// All signatures.
@@ -581,6 +578,10 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
static FunctionSig* AsmjsSignature(WasmOpcode opcode);
static FunctionSig* AtomicSignature(WasmOpcode opcode);
static bool IsPrefixOpcode(WasmOpcode opcode);
+ static bool IsControlOpcode(WasmOpcode opcode);
+ // Check whether the given opcode always jumps, i.e. all instructions after
+ // this one in the current block are dead. Returns false for |end|.
+ static bool IsUnconditionalJump(WasmOpcode opcode);
static int TrapReasonToMessageId(TrapReason reason);
static const char* TrapReasonMessage(TrapReason reason);
@@ -644,66 +645,28 @@ class V8_EXPORT_PRIVATE WasmOpcodes {
}
static ValueType ValueTypeFor(MachineType type) {
- if (type == MachineType::Int8()) {
- return kWasmI32;
- } else if (type == MachineType::Uint8()) {
- return kWasmI32;
- } else if (type == MachineType::Int16()) {
- return kWasmI32;
- } else if (type == MachineType::Uint16()) {
- return kWasmI32;
- } else if (type == MachineType::Int32()) {
- return kWasmI32;
- } else if (type == MachineType::Uint32()) {
- return kWasmI32;
- } else if (type == MachineType::Int64()) {
- return kWasmI64;
- } else if (type == MachineType::Uint64()) {
- return kWasmI64;
- } else if (type == MachineType::Float32()) {
- return kWasmF32;
- } else if (type == MachineType::Float64()) {
- return kWasmF64;
- } else if (type == MachineType::Simd128()) {
- return kWasmS128;
- } else if (type == MachineType::Simd1x4()) {
- return kWasmS1x4;
- } else if (type == MachineType::Simd1x8()) {
- return kWasmS1x8;
- } else if (type == MachineType::Simd1x16()) {
- return kWasmS1x16;
- } else {
- UNREACHABLE();
- return kWasmI32;
- }
- }
-
- static WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
- if (type == MachineType::Int8()) {
- return store ? kExprI32StoreMem8 : kExprI32LoadMem8S;
- } else if (type == MachineType::Uint8()) {
- return store ? kExprI32StoreMem8 : kExprI32LoadMem8U;
- } else if (type == MachineType::Int16()) {
- return store ? kExprI32StoreMem16 : kExprI32LoadMem16S;
- } else if (type == MachineType::Uint16()) {
- return store ? kExprI32StoreMem16 : kExprI32LoadMem16U;
- } else if (type == MachineType::Int32()) {
- return store ? kExprI32StoreMem : kExprI32LoadMem;
- } else if (type == MachineType::Uint32()) {
- return store ? kExprI32StoreMem : kExprI32LoadMem;
- } else if (type == MachineType::Int64()) {
- return store ? kExprI64StoreMem : kExprI64LoadMem;
- } else if (type == MachineType::Uint64()) {
- return store ? kExprI64StoreMem : kExprI64LoadMem;
- } else if (type == MachineType::Float32()) {
- return store ? kExprF32StoreMem : kExprF32LoadMem;
- } else if (type == MachineType::Float64()) {
- return store ? kExprF64StoreMem : kExprF64LoadMem;
- } else if (type == MachineType::Simd128()) {
- return store ? kExprS128StoreMem : kExprS128LoadMem;
- } else {
- UNREACHABLE();
- return kExprNop;
+ switch (type.representation()) {
+ case MachineRepresentation::kWord8:
+ case MachineRepresentation::kWord16:
+ case MachineRepresentation::kWord32:
+ return kWasmI32;
+ case MachineRepresentation::kWord64:
+ return kWasmI64;
+ case MachineRepresentation::kFloat32:
+ return kWasmF32;
+ case MachineRepresentation::kFloat64:
+ return kWasmF64;
+ case MachineRepresentation::kSimd128:
+ return kWasmS128;
+ case MachineRepresentation::kSimd1x4:
+ return kWasmS1x4;
+ case MachineRepresentation::kSimd1x8:
+ return kWasmS1x8;
+ case MachineRepresentation::kSimd1x16:
+ return kWasmS1x16;
+ default:
+ UNREACHABLE();
+ return kWasmI32;
}
}
diff --git a/deps/v8/src/wasm/wasm-result.cc b/deps/v8/src/wasm/wasm-result.cc
index 2f702551ee..b83d9dbbaa 100644
--- a/deps/v8/src/wasm/wasm-result.cc
+++ b/deps/v8/src/wasm/wasm-result.cc
@@ -15,80 +15,154 @@ namespace v8 {
namespace internal {
namespace wasm {
-void ErrorThrower::Format(i::Handle<i::JSFunction> constructor,
- const char* format, va_list args) {
+namespace {
+
+PRINTF_FORMAT(3, 0)
+void VPrintFToString(std::string& str, size_t str_offset, const char* format,
+ va_list args) {
+ DCHECK_LE(str_offset, str.size());
+ size_t len = str_offset + strlen(format);
+ // Allocate increasingly large buffers until the message fits.
+ for (;; len = base::bits::RoundUpToPowerOfTwo64(len + 1)) {
+ DCHECK_GE(kMaxInt, len);
+ str.resize(len);
+ va_list args_copy;
+ va_copy(args_copy, args);
+ int written = VSNPrintF(Vector<char>(&str.front() + str_offset,
+ static_cast<int>(len - str_offset)),
+ format, args_copy);
+ va_end(args_copy);
+ if (written < 0) continue; // not enough space.
+ str.resize(str_offset + written);
+ return;
+ }
+}
+
+PRINTF_FORMAT(3, 4)
+void PrintFToString(std::string& str, size_t str_offset, const char* format,
+ ...) {
+ va_list args;
+ va_start(args, format);
+ VPrintFToString(str, str_offset, format, args);
+ va_end(args);
+}
+
+} // namespace
+
+void ResultBase::error(uint32_t offset, std::string error_msg) {
+ // The error message must not be empty, otherwise Result::failed() will be
+ // false.
+ DCHECK(!error_msg.empty());
+ error_offset_ = offset;
+ error_msg_ = std::move(error_msg);
+}
+
+void ResultBase::verror(const char* format, va_list args) {
+ VPrintFToString(error_msg_, 0, format, args);
+ // Assign default message such that ok() and failed() work.
+ if (error_msg_.empty() == 0) error_msg_.assign("Error");
+}
+
+void ErrorThrower::Format(ErrorType type, const char* format, va_list args) {
+ DCHECK_NE(kNone, type);
// Only report the first error.
if (error()) return;
- constexpr int kMaxErrorMessageLength = 256;
- EmbeddedVector<char, kMaxErrorMessageLength> buffer;
-
- int context_len = 0;
+ size_t context_len = 0;
if (context_) {
- context_len = SNPrintF(buffer, "%s: ", context_);
- CHECK_LE(0, context_len); // check for overflow.
+ PrintFToString(error_msg_, 0, "%s: ", context_);
+ context_len = error_msg_.size();
}
-
- int message_len =
- VSNPrintF(buffer.SubVector(context_len, buffer.length()), format, args);
- CHECK_LE(0, message_len); // check for overflow.
-
- Vector<char> whole_message = buffer.SubVector(0, context_len + message_len);
- i::Handle<i::String> message =
- isolate_->factory()
- ->NewStringFromOneByte(Vector<uint8_t>::cast(whole_message))
- .ToHandleChecked();
- exception_ = isolate_->factory()->NewError(constructor, message);
+ VPrintFToString(error_msg_, context_len, format, args);
+ error_type_ = type;
}
void ErrorThrower::TypeError(const char* format, ...) {
- if (error()) return;
va_list arguments;
va_start(arguments, format);
- Format(isolate_->type_error_function(), format, arguments);
+ Format(kTypeError, format, arguments);
va_end(arguments);
}
void ErrorThrower::RangeError(const char* format, ...) {
- if (error()) return;
va_list arguments;
va_start(arguments, format);
- Format(isolate_->range_error_function(), format, arguments);
+ Format(kRangeError, format, arguments);
va_end(arguments);
}
void ErrorThrower::CompileError(const char* format, ...) {
- if (error()) return;
- wasm_error_ = true;
va_list arguments;
va_start(arguments, format);
- Format(isolate_->wasm_compile_error_function(), format, arguments);
+ Format(kCompileError, format, arguments);
va_end(arguments);
}
void ErrorThrower::LinkError(const char* format, ...) {
- if (error()) return;
- wasm_error_ = true;
va_list arguments;
va_start(arguments, format);
- Format(isolate_->wasm_link_error_function(), format, arguments);
+ Format(kLinkError, format, arguments);
va_end(arguments);
}
void ErrorThrower::RuntimeError(const char* format, ...) {
- if (error()) return;
- wasm_error_ = true;
va_list arguments;
va_start(arguments, format);
- Format(isolate_->wasm_runtime_error_function(), format, arguments);
+ Format(kRuntimeError, format, arguments);
va_end(arguments);
}
+Handle<Object> ErrorThrower::Reify() {
+ Handle<JSFunction> constructor;
+ switch (error_type_) {
+ case kNone:
+ UNREACHABLE();
+ case kTypeError:
+ constructor = isolate_->type_error_function();
+ break;
+ case kRangeError:
+ constructor = isolate_->range_error_function();
+ break;
+ case kCompileError:
+ constructor = isolate_->wasm_compile_error_function();
+ break;
+ case kLinkError:
+ constructor = isolate_->wasm_link_error_function();
+ break;
+ case kRuntimeError:
+ constructor = isolate_->wasm_runtime_error_function();
+ break;
+ }
+ Vector<const uint8_t> msg_vec(
+ reinterpret_cast<const uint8_t*>(error_msg_.data()),
+ static_cast<int>(error_msg_.size()));
+ Handle<String> message =
+ isolate_->factory()->NewStringFromOneByte(msg_vec).ToHandleChecked();
+ error_type_ = kNone; // Reset.
+ Handle<Object> exception =
+ isolate_->factory()->NewError(constructor, message);
+ return exception;
+}
+
+void ErrorThrower::Reset() {
+ error_type_ = kNone;
+ error_msg_.clear();
+}
+
+ErrorThrower::ErrorThrower(ErrorThrower&& other)
+ : isolate_(other.isolate_),
+ context_(other.context_),
+ error_type_(other.error_type_),
+ error_msg_(other.error_msg_) {
+ other.error_type_ = kNone;
+}
+
ErrorThrower::~ErrorThrower() {
if (error() && !isolate_->has_pending_exception()) {
- isolate_->ScheduleThrow(*exception_);
+ isolate_->ScheduleThrow(*Reify());
}
}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/wasm/wasm-result.h b/deps/v8/src/wasm/wasm-result.h
index 79d06758b1..848170c80e 100644
--- a/deps/v8/src/wasm/wasm-result.h
+++ b/deps/v8/src/wasm/wasm-result.h
@@ -21,38 +21,18 @@ class Isolate;
namespace wasm {
-// The overall result of decoding a function or a module.
-template <typename T>
-class Result {
- public:
- Result() = default;
+// Base class for Result<T>.
+class V8_EXPORT_PRIVATE ResultBase {
+ protected:
+ ResultBase(ResultBase&& other)
+ : error_offset_(other.error_offset_),
+ error_msg_(std::move(other.error_msg_)) {}
+ ResultBase() = default;
- template <typename S>
- explicit Result(S&& value) : val(value) {}
+ ResultBase& operator=(ResultBase&& other) = default;
- template <typename S>
- Result(Result<S>&& other)
- : val(std::move(other.val)),
- error_offset(other.error_offset),
- error_msg(std::move(other.error_msg)) {}
-
- Result& operator=(Result&& other) = default;
-
- T val = T{};
- uint32_t error_offset = 0;
- std::string error_msg;
-
- bool ok() const { return error_msg.empty(); }
- bool failed() const { return !ok(); }
-
- template <typename V>
- void MoveErrorFrom(Result<V>& that) {
- error_offset = that.error_offset;
- // Use {swap()} + {clear()} instead of move assign, as {that} might still be
- // used afterwards.
- error_msg.swap(that.error_msg);
- that.error_msg.clear();
- }
+ public:
+ void error(uint32_t offset, std::string error_msg);
void PRINTF_FORMAT(2, 3) error(const char* format, ...) {
va_list args;
@@ -61,22 +41,42 @@ class Result {
va_end(args);
}
- void PRINTF_FORMAT(2, 0) verror(const char* format, va_list args) {
- size_t len = base::bits::RoundUpToPowerOfTwo32(
- static_cast<uint32_t>(strlen(format)));
- // Allocate increasingly large buffers until the message fits.
- for (;; len *= 2) {
- DCHECK_GE(kMaxInt, len);
- error_msg.resize(len);
- int written =
- VSNPrintF(Vector<char>(&error_msg.front(), static_cast<int>(len)),
- format, args);
- if (written < 0) continue; // not enough space.
- if (written == 0) error_msg = "Error"; // assign default message.
- return;
- }
+ void PRINTF_FORMAT(2, 0) verror(const char* format, va_list args);
+
+ void MoveErrorFrom(ResultBase& that) {
+ error_offset_ = that.error_offset_;
+ // Use {swap()} + {clear()} instead of move assign, as {that} might still
+ // be used afterwards.
+ error_msg_.swap(that.error_msg_);
+ that.error_msg_.clear();
}
+ bool ok() const { return error_msg_.empty(); }
+ bool failed() const { return !ok(); }
+
+ uint32_t error_offset() const { return error_offset_; }
+ const std::string& error_msg() const { return error_msg_; }
+
+ private:
+ uint32_t error_offset_ = 0;
+ std::string error_msg_;
+};
+
+// The overall result of decoding a function or a module.
+template <typename T>
+class Result : public ResultBase {
+ public:
+ Result() = default;
+
+ template <typename S>
+ explicit Result(S&& value) : val(std::forward<S>(value)) {}
+
+ template <typename S>
+ Result(Result<S>&& other)
+ : ResultBase(std::move(other)), val(std::move(other.val)) {}
+
+ Result& operator=(Result&& other) = default;
+
static Result<T> PRINTF_FORMAT(1, 2) Error(const char* format, ...) {
va_list args;
va_start(args, format);
@@ -86,6 +86,8 @@ class Result {
return result;
}
+ T val = T{};
+
private:
DISALLOW_COPY_AND_ASSIGN(Result);
};
@@ -93,8 +95,10 @@ class Result {
// A helper for generating error messages that bubble up to JS exceptions.
class V8_EXPORT_PRIVATE ErrorThrower {
public:
- ErrorThrower(i::Isolate* isolate, const char* context)
+ ErrorThrower(Isolate* isolate, const char* context)
: isolate_(isolate), context_(context) {}
+ // Explicitly allow move-construction. Disallow copy (below).
+ ErrorThrower(ErrorThrower&& other);
~ErrorThrower();
PRINTF_FORMAT(2, 3) void TypeError(const char* fmt, ...);
@@ -106,26 +110,42 @@ class V8_EXPORT_PRIVATE ErrorThrower {
template <typename T>
void CompileFailed(const char* error, Result<T>& result) {
DCHECK(result.failed());
- CompileError("%s: %s @+%u", error, result.error_msg.c_str(),
- result.error_offset);
+ CompileError("%s: %s @+%u", error, result.error_msg().c_str(),
+ result.error_offset());
}
- i::Handle<i::Object> Reify() {
- i::Handle<i::Object> result = exception_;
- exception_ = i::Handle<i::Object>::null();
- return result;
- }
+ // Create and return exception object.
+ MUST_USE_RESULT Handle<Object> Reify();
- bool error() const { return !exception_.is_null(); }
- bool wasm_error() { return wasm_error_; }
+ // Reset any error which was set on this thrower.
+ void Reset();
- private:
- void Format(i::Handle<i::JSFunction> constructor, const char* fmt, va_list);
+ bool error() const { return error_type_ != kNone; }
+ bool wasm_error() { return error_type_ >= kFirstWasmError; }
- i::Isolate* isolate_;
+ private:
+ enum ErrorType {
+ kNone,
+ // General errors.
+ kTypeError,
+ kRangeError,
+ // Wasm errors.
+ kCompileError,
+ kLinkError,
+ kRuntimeError,
+
+ // Marker.
+ kFirstWasmError = kCompileError
+ };
+
+ void Format(ErrorType error_type_, const char* fmt, va_list);
+
+ Isolate* isolate_;
const char* context_;
- i::Handle<i::Object> exception_;
- bool wasm_error_ = false;
+ ErrorType error_type_ = kNone;
+ std::string error_msg_;
+
+ DISALLOW_COPY_AND_ASSIGN(ErrorThrower);
};
} // namespace wasm
diff --git a/deps/v8/src/wasm/wasm-text.cc b/deps/v8/src/wasm/wasm-text.cc
index 1656ffbd42..540bfb5ec0 100644
--- a/deps/v8/src/wasm/wasm-text.cc
+++ b/deps/v8/src/wasm/wasm-text.cc
@@ -194,6 +194,7 @@ void wasm::PrintWasmText(const WasmModule *module,
// they are publicly available.
FOREACH_SIMD_0_OPERAND_OPCODE(CASE_OPCODE)
FOREACH_SIMD_1_OPERAND_OPCODE(CASE_OPCODE)
+ FOREACH_SIMD_MASK_OPERAND_OPCODE(CASE_OPCODE)
FOREACH_ATOMIC_OPCODE(CASE_OPCODE)
os << WasmOpcodes::OpcodeName(opcode);
break;
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index 7deaf23635..4a2e9a17e8 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -521,23 +521,23 @@ template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
+ visitor->VisitEmbeddedPointer(host(), this);
Assembler::FlushICache(isolate, pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
+ visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::CELL) {
- visitor->VisitCell(this);
+ visitor->VisitCellPointer(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(this);
+ visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
- visitor->VisitInternalReference(this);
+ visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
+ visitor->VisitCodeAgeSequence(host(), this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
- visitor->VisitDebugTarget(this);
+ visitor->VisitDebugTarget(host(), this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(this);
+ visitor->VisitRuntimeEntry(host(), this);
}
}
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index b366e66c2a..b2330b3320 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -935,7 +935,6 @@ void Assembler::cld() {
emit(0xFC);
}
-
void Assembler::cdq() {
EnsureSpace ensure_space(this);
emit(0x99);
@@ -2892,11 +2891,11 @@ void Assembler::pextrw(Register dst, XMMRegister src, int8_t imm8) {
DCHECK(is_uint8(imm8));
EnsureSpace ensure_space(this);
emit(0x66);
- emit_optional_rex_32(dst, src);
+ emit_optional_rex_32(src, dst);
emit(0x0F);
emit(0x3A);
emit(0x15);
- emit_sse_operand(dst, src);
+ emit_sse_operand(src, dst);
emit(imm8);
}
@@ -4636,6 +4635,26 @@ void Assembler::psrldq(XMMRegister dst, uint8_t shift) {
emit(shift);
}
+void Assembler::pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x70);
+ emit_sse_operand(dst, src);
+ emit(shuffle);
+}
+
+void Assembler::pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+ EnsureSpace ensure_space(this);
+ emit(0xF2);
+ emit_optional_rex_32(dst, src);
+ emit(0x0F);
+ emit(0x70);
+ emit_sse_operand(dst, src);
+ emit(shuffle);
+}
+
void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
EnsureSpace ensure_space(this);
emit(0x66);
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 6a28b51fc8..0f2f27247e 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -1305,6 +1305,8 @@ class Assembler : public AssemblerBase {
void psrldq(XMMRegister dst, uint8_t shift);
void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
void pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle);
+ void pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
+ void pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
void cvtdq2ps(XMMRegister dst, XMMRegister src);
void cvtdq2ps(XMMRegister dst, const Operand& src);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index fc080f4c4c..84630928d4 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -350,85 +350,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ ret(0);
}
-void RegExpExecStub::Generate(MacroAssembler* masm) {
-#ifdef V8_INTERPRETED_REGEXP
- // This case is handled prior to the RegExpExecStub call.
- __ Abort(kUnexpectedRegExpExecCall);
-#else // V8_INTERPRETED_REGEXP
- // Isolates: note we add an additional parameter here (isolate pointer).
- static const int kRegExpExecuteArguments = 9;
- int argument_slots_on_stack =
- masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
- __ EnterApiExitFrame(argument_slots_on_stack);
-
- // Argument 9: Pass current isolate address.
- __ LoadAddress(kScratchRegister,
- ExternalReference::isolate_address(isolate()));
- __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kRegisterSize),
- kScratchRegister);
-
- // Argument 8: Indicate that this is a direct call from JavaScript.
- __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kRegisterSize),
- Immediate(1));
-
- // Argument 7: Start (high end) of backtracking stack memory area.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate());
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate());
- __ Move(kScratchRegister, address_of_regexp_stack_memory_address);
- __ movp(r12, Operand(kScratchRegister, 0));
- __ Move(kScratchRegister, address_of_regexp_stack_memory_size);
- __ addp(r12, Operand(kScratchRegister, 0));
- __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kRegisterSize), r12);
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- // Argument 6 is passed in r9 on Linux and on the stack on Windows.
-#ifdef _WIN64
- __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kRegisterSize),
- Immediate(0));
-#else
- __ Set(r9, 0);
-#endif
-
- // Argument 5: static offsets vector buffer.
- // Argument 5 passed in r8 on Linux and on the stack on Windows.
-#ifdef _WIN64
- __ LoadAddress(
- r12, ExternalReference::address_of_static_offsets_vector(isolate()));
- __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kRegisterSize), r12);
-#else // _WIN64
- __ LoadAddress(
- r8, ExternalReference::address_of_static_offsets_vector(isolate()));
-#endif
-
- // Argument 2: Previous index.
- // TODO(jgruber): Ideally, LastIndexRegister would already equal arg_reg_2,
- // but that makes register allocation fail.
- __ movp(arg_reg_2, RegExpExecDescriptor::LastIndexRegister());
-
- // Argument 4: End of string data
- // Argument 3: Start of string data
- CHECK(arg_reg_4.is(RegExpExecDescriptor::StringEndRegister()));
- CHECK(arg_reg_3.is(RegExpExecDescriptor::StringStartRegister()));
-
- // Argument 1: Original subject string.
- CHECK(arg_reg_1.is(RegExpExecDescriptor::StringRegister()));
-
- __ addp(RegExpExecDescriptor::CodeRegister(),
- Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(RegExpExecDescriptor::CodeRegister());
-
- __ LeaveApiExitFrame(true);
-
- // TODO(jgruber): Don't tag return value once this is supported by stubs.
- __ Integer32ToSmi(rax, rax);
- __ ret(0 * kPointerSize);
-#endif // V8_INTERPRETED_REGEXP
-}
-
-
static int NegativeComparisonResult(Condition cc) {
DCHECK(cc != equal);
DCHECK((cc == less) || (cc == less_equal)
@@ -2822,15 +2743,13 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// call data
__ Push(call_data);
- Register scratch = call_data;
- if (!this->call_data_undefined()) {
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- }
+
// return value
- __ Push(scratch);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
// return value default
- __ Push(scratch);
+ __ PushRoot(Heap::kUndefinedValueRootIndex);
// isolate
+ Register scratch = call_data;
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
__ Push(scratch);
// holder
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 46176b13c6..611a3c6c21 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -30,29 +30,27 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
+ Address instruction_start = code->instruction_start();
// Invalidate the relocation information, as it will become invalid by the
// code patching below, and is not needed any more.
code->InvalidateRelocation();
- if (FLAG_zap_code_space) {
- // Fail hard and early if we enter this code object again.
- byte* pointer = code->FindCodeAgeSequence();
- if (pointer != NULL) {
- pointer += kNoCodeAgeSequenceLength;
- } else {
- pointer = code->instruction_start();
- }
- CodePatcher patcher(isolate, pointer, 1);
- patcher.masm()->int3();
-
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- int osr_offset = data->OsrPcOffset()->value();
- if (osr_offset > 0) {
- CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
- 1);
- osr_patcher.masm()->int3();
- }
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(isolate, pointer, 1);
+ patcher.masm()->int3();
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(isolate, instruction_start + osr_offset, 1);
+ osr_patcher.masm()->int3();
}
// For each LLazyBailout instruction insert a absolute call to the
@@ -61,7 +59,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
// before the safepoint table (space was allocated there when the Code
// object was created, if necessary).
- Address instruction_start = code->instruction_start();
#ifdef DEBUG
Address prev_call_address = NULL;
#endif
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 11cc30a7b6..a7438ad275 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -1682,7 +1682,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("pextrw "); // reg/m32, xmm, imm8
current += PrintRightOperand(current);
- AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 3);
+ AppendToBuffer(",%s,%d", NameOfXMMRegister(regop), (*current) & 7);
current += 1;
} else if (third_byte == 0x16) {
get_modrm(*current, &mod, &regop, &rm);
@@ -1788,6 +1788,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
current += 1;
} else if (opcode == 0xB1) {
current += PrintOperands("cmpxchg", OPER_REG_OP_ORDER, current);
+ } else if (opcode == 0xC4) {
+ AppendToBuffer("pinsrw %s,", NameOfXMMRegister(regop));
+ current += PrintRightOperand(current);
+ AppendToBuffer(",0x%x", (*current) & 7);
+ current += 1;
} else {
const char* mnemonic = "?";
if (opcode == 0x54) {
@@ -1824,10 +1829,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
mnemonic = "punpckhdq";
} else if (opcode == 0x6B) {
mnemonic = "packssdw";
- } else if (opcode == 0xC4) {
- mnemonic = "pinsrw";
- } else if (opcode == 0xC5) {
- mnemonic = "pextrw";
} else if (opcode == 0xD1) {
mnemonic = "psrlw";
} else if (opcode == 0xD2) {
@@ -1941,6 +1942,13 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
AppendToBuffer("%s %s,", mnemonic, NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x70) {
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("pshuflw %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(", %d", (*current) & 7);
+ current += 1;
} else if (opcode == 0xC2) {
// Intel manual 2A, Table 3-18.
int mod, regop, rm;
@@ -1996,6 +2004,13 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
AppendToBuffer("cvttss2si%c %s,",
operand_size_code(), NameOfCPURegister(regop));
current += PrintRightXMMOperand(current);
+ } else if (opcode == 0x70) {
+ int mod, regop, rm;
+ get_modrm(*current, &mod, &regop, &rm);
+ AppendToBuffer("pshufhw %s, ", NameOfXMMRegister(regop));
+ current += PrintRightXMMOperand(current);
+ AppendToBuffer(", %d", (*current) & 7);
+ current += 1;
} else if (opcode == 0x7E) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
diff --git a/deps/v8/src/x64/interface-descriptors-x64.cc b/deps/v8/src/x64/interface-descriptors-x64.cc
index dd03f19cbc..b6ab7ca1af 100644
--- a/deps/v8/src/x64/interface-descriptors-x64.cc
+++ b/deps/v8/src/x64/interface-descriptors-x64.cc
@@ -56,11 +56,6 @@ const Register MathPowIntegerDescriptor::exponent() {
return MathPowTaggedDescriptor::exponent();
}
-const Register RegExpExecDescriptor::StringRegister() { return arg_reg_1; }
-const Register RegExpExecDescriptor::LastIndexRegister() { return r11; }
-const Register RegExpExecDescriptor::StringStartRegister() { return arg_reg_3; }
-const Register RegExpExecDescriptor::StringEndRegister() { return arg_reg_4; }
-const Register RegExpExecDescriptor::CodeRegister() { return rax; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return rax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return rbx; }
@@ -160,9 +155,20 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
+ // rax : number of arguments
+ // rcx : start index (to support rest parameters)
+ // rdi : the target to call
+ Register registers[] = {rdi, rax, rcx};
+ data->InitializePlatformSpecific(arraysize(registers), registers);
+}
+
+void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
+ CallInterfaceDescriptorData* data) {
+ // rax : number of arguments
+ // rdx : the new target
// rcx : start index (to support rest parameters)
// rdi : the target to call
- Register registers[] = {rdi, rcx};
+ Register registers[] = {rdi, rdx, rax, rcx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index c31b5ac379..7087c03973 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -292,7 +292,7 @@ void MacroAssembler::RecordWriteField(
leap(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
Label ok;
- testb(dst, Immediate((1 << kPointerSizeLog2) - 1));
+ testb(dst, Immediate(kPointerSize - 1));
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
@@ -4786,6 +4786,7 @@ void MacroAssembler::CallCFunction(ExternalReference function,
void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+ DCHECK_LE(num_arguments, kMaxCParameters);
DCHECK(has_frame());
// Check stack alignment.
if (emit_debug_code()) {
diff --git a/deps/v8/src/x64/sse-instr.h b/deps/v8/src/x64/sse-instr.h
index 00957278a7..235aa75fcf 100644
--- a/deps/v8/src/x64/sse-instr.h
+++ b/deps/v8/src/x64/sse-instr.h
@@ -41,6 +41,8 @@
V(psubsw, 66, 0F, E9) \
V(psubusb, 66, 0F, D8) \
V(psubusw, 66, 0F, D9) \
+ V(pand, 66, 0F, DB) \
+ V(por, 66, 0F, EB) \
V(pxor, 66, 0F, EF) \
V(cvtps2dq, 66, 0F, 5B)
@@ -48,6 +50,8 @@
V(pabsb, 66, 0F, 38, 1C) \
V(pabsw, 66, 0F, 38, 1D) \
V(pabsd, 66, 0F, 38, 1E) \
+ V(phaddd, 66, 0F, 38, 02) \
+ V(phaddw, 66, 0F, 38, 01) \
V(pshufb, 66, 0F, 38, 00) \
V(psignb, 66, 0F, 38, 08) \
V(psignw, 66, 0F, 38, 09) \
diff --git a/deps/v8/src/x87/assembler-x87-inl.h b/deps/v8/src/x87/assembler-x87-inl.h
index c1af7d06d2..02ffffc292 100644
--- a/deps/v8/src/x87/assembler-x87-inl.h
+++ b/deps/v8/src/x87/assembler-x87-inl.h
@@ -248,23 +248,23 @@ template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
- visitor->VisitEmbeddedPointer(this);
+ visitor->VisitEmbeddedPointer(host(), this);
Assembler::FlushICache(isolate, pc_, sizeof(Address));
} else if (RelocInfo::IsCodeTarget(mode)) {
- visitor->VisitCodeTarget(this);
+ visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::CELL) {
- visitor->VisitCell(this);
+ visitor->VisitCellPointer(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
- visitor->VisitExternalReference(this);
+ visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE) {
- visitor->VisitInternalReference(this);
+ visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
+ visitor->VisitCodeAgeSequence(host(), this);
} else if (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()) {
- visitor->VisitDebugTarget(this);
+ visitor->VisitDebugTarget(host(), this);
} else if (IsRuntimeEntry(mode)) {
- visitor->VisitRuntimeEntry(this);
+ visitor->VisitRuntimeEntry(host(), this);
}
}
diff --git a/deps/v8/src/x87/code-stubs-x87.cc b/deps/v8/src/x87/code-stubs-x87.cc
index 4ec7a45926..8b6fbadcb9 100644
--- a/deps/v8/src/x87/code-stubs-x87.cc
+++ b/deps/v8/src/x87/code-stubs-x87.cc
@@ -281,69 +281,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ ret(0);
}
-void RegExpExecStub::Generate(MacroAssembler* masm) {
-#ifdef V8_INTERPRETED_REGEXP
- // This case is handled prior to the RegExpExecStub call.
- __ Abort(kUnexpectedRegExpExecCall);
-#else // V8_INTERPRETED_REGEXP
- // Isolates: note we add an additional parameter here (isolate pointer).
- static const int kRegExpExecuteArguments = 9;
- __ EnterApiExitFrame(kRegExpExecuteArguments);
-
- // Argument 9: Pass current isolate address.
- __ mov(Operand(esp, 8 * kPointerSize),
- Immediate(ExternalReference::isolate_address(isolate())));
-
- // Argument 8: Indicate that this is a direct call from JavaScript.
- __ mov(Operand(esp, 7 * kPointerSize), Immediate(1));
-
- // Argument 7: Start (high end) of backtracking stack memory area.
- ExternalReference address_of_regexp_stack_memory_address =
- ExternalReference::address_of_regexp_stack_memory_address(isolate());
- ExternalReference address_of_regexp_stack_memory_size =
- ExternalReference::address_of_regexp_stack_memory_size(isolate());
- __ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address));
- __ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size));
- __ mov(Operand(esp, 6 * kPointerSize), esi);
-
- // Argument 6: Set the number of capture registers to zero to force global
- // regexps to behave as non-global. This does not affect non-global regexps.
- __ mov(Operand(esp, 5 * kPointerSize), Immediate(0));
-
- // Argument 5: static offsets vector buffer.
- __ mov(Operand(esp, 4 * kPointerSize),
- Immediate(ExternalReference::address_of_static_offsets_vector(
- isolate())));
-
- // Argument 4: End of string data
- // Argument 3: Start of string data
- __ mov(Operand(esp, 3 * kPointerSize),
- RegExpExecDescriptor::StringEndRegister());
- __ mov(Operand(esp, 2 * kPointerSize),
- RegExpExecDescriptor::StringStartRegister());
-
- // Argument 2: Previous index.
- __ mov(Operand(esp, 1 * kPointerSize),
- RegExpExecDescriptor::LastIndexRegister());
-
- // Argument 1: Original subject string.
- __ mov(Operand(esp, 0 * kPointerSize),
- RegExpExecDescriptor::StringRegister());
-
- // Locate the code entry and call it.
- __ add(RegExpExecDescriptor::CodeRegister(),
- Immediate(Code::kHeaderSize - kHeapObjectTag));
- __ call(RegExpExecDescriptor::CodeRegister());
-
- // Drop arguments and come back to JS mode.
- __ LeaveApiExitFrame(true);
-
- // TODO(jgruber): Don't tag return value once this is supported by stubs.
- __ SmiTag(eax);
- __ ret(0 * kPointerSize);
-#endif // V8_INTERPRETED_REGEXP
-}
-
static int NegativeComparisonResult(Condition cc) {
DCHECK(cc != equal);
diff --git a/deps/v8/src/x87/deoptimizer-x87.cc b/deps/v8/src/x87/deoptimizer-x87.cc
index 521b69d7cf..a198284da5 100644
--- a/deps/v8/src/x87/deoptimizer-x87.cc
+++ b/deps/v8/src/x87/deoptimizer-x87.cc
@@ -94,25 +94,22 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Address code_start_address = code->instruction_start();
- if (FLAG_zap_code_space) {
- // Fail hard and early if we enter this code object again.
- byte* pointer = code->FindCodeAgeSequence();
- if (pointer != NULL) {
- pointer += kNoCodeAgeSequenceLength;
- } else {
- pointer = code->instruction_start();
- }
- CodePatcher patcher(isolate, pointer, 1);
- patcher.masm()->int3();
-
- DeoptimizationInputData* data =
- DeoptimizationInputData::cast(code->deoptimization_data());
- int osr_offset = data->OsrPcOffset()->value();
- if (osr_offset > 0) {
- CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
- 1);
- osr_patcher.masm()->int3();
- }
+ // Fail hard and early if we enter this code object again.
+ byte* pointer = code->FindCodeAgeSequence();
+ if (pointer != NULL) {
+ pointer += kNoCodeAgeSequenceLength;
+ } else {
+ pointer = code->instruction_start();
+ }
+ CodePatcher patcher(isolate, pointer, 1);
+ patcher.masm()->int3();
+
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+ int osr_offset = data->OsrPcOffset()->value();
+ if (osr_offset > 0) {
+ CodePatcher osr_patcher(isolate, code_start_address + osr_offset, 1);
+ osr_patcher.masm()->int3();
}
// We will overwrite the code's relocation info in-place. Relocation info
diff --git a/deps/v8/src/x87/interface-descriptors-x87.cc b/deps/v8/src/x87/interface-descriptors-x87.cc
index 4601e98785..25707a34aa 100644
--- a/deps/v8/src/x87/interface-descriptors-x87.cc
+++ b/deps/v8/src/x87/interface-descriptors-x87.cc
@@ -56,11 +56,6 @@ const Register MathPowIntegerDescriptor::exponent() {
return MathPowTaggedDescriptor::exponent();
}
-const Register RegExpExecDescriptor::StringRegister() { return eax; }
-const Register RegExpExecDescriptor::LastIndexRegister() { return ecx; }
-const Register RegExpExecDescriptor::StringStartRegister() { return edx; }
-const Register RegExpExecDescriptor::StringEndRegister() { return ebx; }
-const Register RegExpExecDescriptor::CodeRegister() { return edi; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return eax; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return ebx; }
diff --git a/deps/v8/src/x87/macro-assembler-x87.cc b/deps/v8/src/x87/macro-assembler-x87.cc
index bd8ef3bb63..e7a512cd5b 100644
--- a/deps/v8/src/x87/macro-assembler-x87.cc
+++ b/deps/v8/src/x87/macro-assembler-x87.cc
@@ -340,7 +340,7 @@ void MacroAssembler::RecordWriteField(
lea(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
Label ok;
- test_b(dst, Immediate((1 << kPointerSizeLog2) - 1));
+ test_b(dst, Immediate(kPointerSize - 1));
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
@@ -370,7 +370,7 @@ void MacroAssembler::RecordWriteForMap(Register object, Handle<Map> map,
if (emit_debug_code()) {
Label ok;
lea(address, FieldOperand(object, HeapObject::kMapOffset));
- test_b(address, Immediate((1 << kPointerSizeLog2) - 1));
+ test_b(address, Immediate(kPointerSize - 1));
j(zero, &ok, Label::kNear);
int3();
bind(&ok);
diff --git a/deps/v8/src/zone/accounting-allocator.h b/deps/v8/src/zone/accounting-allocator.h
index c6bf7a75e3..65128c6f70 100644
--- a/deps/v8/src/zone/accounting-allocator.h
+++ b/deps/v8/src/zone/accounting-allocator.h
@@ -13,7 +13,7 @@
#include "src/base/platform/semaphore.h"
#include "src/base/platform/time.h"
#include "src/zone/zone-segment.h"
-#include "testing/gtest/include/gtest/gtest_prod.h"
+#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/BUILD.gn b/deps/v8/test/BUILD.gn
index 519c1d3a0e..936a6f6afc 100644
--- a/deps/v8/test/BUILD.gn
+++ b/deps/v8/test/BUILD.gn
@@ -11,6 +11,7 @@ group("gn_all") {
deps = [
":default_tests",
"inspector:inspector-test",
+ "mkgrokdump:mkgrokdump",
]
if (host_os != "mac" || !is_android) {
@@ -48,8 +49,10 @@ group("default_tests") {
":intl_run",
":message_run",
":mjsunit_run",
+ ":mkgrokdump_run",
":preparser_run",
":unittests_run",
+ ":wasm_spec_tests_run",
]
}
}
@@ -164,6 +167,14 @@ v8_isolate_run("mjsunit") {
isolate = "mjsunit/mjsunit.isolate"
}
+v8_isolate_run("mkgrokdump") {
+ deps = [
+ "mkgrokdump:mkgrokdump",
+ ]
+
+ isolate = "mkgrokdump/mkgrokdump.isolate"
+}
+
v8_isolate_run("mozilla") {
deps = [
"..:d8_run",
@@ -188,6 +199,14 @@ v8_isolate_run("unittests") {
isolate = "unittests/unittests.isolate"
}
+v8_isolate_run("wasm_spec_tests") {
+ deps = [
+ "..:d8_run",
+ ]
+
+ isolate = "wasm-spec-tests/wasm-spec-tests.isolate"
+}
+
v8_isolate_run("webkit") {
deps = [
"..:d8_run",
diff --git a/deps/v8/test/benchmarks/testcfg.py b/deps/v8/test/benchmarks/testcfg.py
index f96071b1b0..cdbb0adc8f 100644
--- a/deps/v8/test/benchmarks/testcfg.py
+++ b/deps/v8/test/benchmarks/testcfg.py
@@ -35,7 +35,7 @@ from testrunner.objects import testcase
class BenchmarksVariantGenerator(testsuite.VariantGenerator):
- # Both --nocrankshaft and --stressopt are very slow. Add TF but without
+ # Both --noopt and --stressopt are very slow. Add TF but without
# always opt to match the way the benchmarks are run for performance
# testing.
def FilterVariantsByTest(self, testcase):
diff --git a/deps/v8/test/bot_default.gyp b/deps/v8/test/bot_default.gyp
index 88538004d9..13c77e2d03 100644
--- a/deps/v8/test/bot_default.gyp
+++ b/deps/v8/test/bot_default.gyp
@@ -18,6 +18,7 @@
'mjsunit/mjsunit.gyp:mjsunit_run',
'preparser/preparser.gyp:preparser_run',
'unittests/unittests.gyp:unittests_run',
+ 'wasm-spec-tests/wasm-spec-tests.gyp:wasm_spec_tests_run',
'webkit/webkit.gyp:webkit_run',
],
'includes': [
diff --git a/deps/v8/test/bot_default.isolate b/deps/v8/test/bot_default.isolate
index 59420cb056..c4db291cc0 100644
--- a/deps/v8/test/bot_default.isolate
+++ b/deps/v8/test/bot_default.isolate
@@ -15,8 +15,10 @@
'intl/intl.isolate',
'message/message.isolate',
'mjsunit/mjsunit.isolate',
+ 'mkgrokdump/mkgrokdump.isolate',
'preparser/preparser.isolate',
'unittests/unittests.isolate',
+ 'wasm-spec-tests/wasm-spec-tests.isolate',
'webkit/webkit.isolate',
],
}
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index 4034f42949..d5365df606 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -12,7 +12,7 @@ v8_executable("cctest") {
### gcmole(all) ###
"../common/wasm/test-signatures.h",
- "asmjs/test-asm-typer.cc",
+ "../common/wasm/wasm-macro-gen.h",
"ast-types-fuzz.h",
"cctest.cc",
"cctest.h",
@@ -338,7 +338,7 @@ v8_executable("cctest") {
"../..:v8_libbase",
"../..:v8_libplatform",
"../..:wasm_module_runner",
- "//build/config/sanitizers:deps",
+ "//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
]
@@ -437,7 +437,7 @@ v8_executable("generate-bytecode-expectations") {
"../..:v8",
"../..:v8_libbase",
"../..:v8_libplatform",
- "//build/config/sanitizers:deps",
+ "//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
]
}
diff --git a/deps/v8/test/cctest/OWNERS b/deps/v8/test/cctest/OWNERS
index 06141ef626..92ab93045f 100644
--- a/deps/v8/test/cctest/OWNERS
+++ b/deps/v8/test/cctest/OWNERS
@@ -1,9 +1,6 @@
-per-file *-mips*=paul.lind@imgtec.com
-per-file *-mips*=gergely.kis@imgtec.com
-per-file *-mips*=akos.palfi@imgtec.com
-per-file *-mips*=balazs.kilvady@imgtec.com
-per-file *-mips*=dusan.milosavljevic@imgtec.com
per-file *-mips*=ivica.bogosavljevic@imgtec.com
+per-file *-mips*=Miran.Karic@imgtec.com
+per-file *-mips*=dusan.simicic@imgtec.com
per-file *-ppc*=dstence@us.ibm.com
per-file *-ppc*=joransiu@ca.ibm.com
per-file *-ppc*=jyan@ca.ibm.com
diff --git a/deps/v8/test/cctest/asmjs/OWNERS b/deps/v8/test/cctest/asmjs/OWNERS
deleted file mode 100644
index 509581c8db..0000000000
--- a/deps/v8/test/cctest/asmjs/OWNERS
+++ /dev/null
@@ -1,11 +0,0 @@
-# Keep in sync with src/asmjs/OWNERS.
-
-set noparent
-
-ahaas@chromium.org
-bradnelson@chromium.org
-clemensh@chromium.org
-jpp@chromium.org
-mtrofin@chromium.org
-rossberg@chromium.org
-titzer@chromium.org
diff --git a/deps/v8/test/cctest/asmjs/test-asm-typer.cc b/deps/v8/test/cctest/asmjs/test-asm-typer.cc
deleted file mode 100644
index a1737165b1..0000000000
--- a/deps/v8/test/cctest/asmjs/test-asm-typer.cc
+++ /dev/null
@@ -1,2089 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <cstring>
-#include <functional>
-#include <iostream>
-#include <memory>
-
-#include "src/asmjs/asm-typer.h"
-#include "src/asmjs/asm-types.h"
-#include "src/ast/ast-value-factory.h"
-#include "src/ast/ast.h"
-#include "src/ast/scopes.h"
-#include "src/base/platform/platform.h"
-#include "src/compiler.h"
-#include "src/objects-inl.h"
-#include "src/parsing/parse-info.h"
-#include "src/parsing/parser.h"
-#include "src/v8.h"
-#include "test/cctest/cctest.h"
-
-using namespace v8::internal;
-namespace iw = v8::internal::wasm;
-
-namespace v8 {
-namespace internal {
-namespace wasm {
-
-namespace {
-enum ValidationType {
- ValidateModule,
- ValidateGlobals,
- ValidateFunctionTables,
- ValidateExport,
- ValidateFunction,
- ValidateStatement,
- ValidateExpression,
-};
-} // namespace
-
-class AsmTyperHarnessBuilder {
- public:
- AsmTyperHarnessBuilder(const char* source, ValidationType type)
- : source_(source),
- validation_type_(type),
- handles_(),
- isolate_(CcTest::i_isolate()),
- factory_(isolate_->factory()),
- source_code_(
- factory_->NewStringFromUtf8(CStrVector(source)).ToHandleChecked()),
- script_(factory_->NewScript(source_code_)),
- info_(script_),
- ast_value_factory_(info_.zone(), isolate_->ast_string_constants(),
- isolate_->heap()->HashSeed()) {
- info_.set_allow_lazy_parsing(false);
- info_.set_toplevel(true);
- info_.set_ast_value_factory(&ast_value_factory_);
- info_.set_ast_value_factory_owned(false);
- Parser parser(&info_);
-
- if (!Compiler::ParseAndAnalyze(&info_, isolate_)) {
- std::cerr << "Failed to parse:\n" << source_ << "\n";
- CHECK(false);
- }
-
- outer_scope_ = info_.script_scope();
- module_ = info_.scope()
- ->declarations()
- ->AtForTest(0)
- ->AsFunctionDeclaration()
- ->fun();
- typer_.reset(new AsmTyper(isolate_, zone(), script_, module_));
-
- if (validation_type_ == ValidateStatement ||
- validation_type_ == ValidateExpression) {
- fun_scope_.reset(new AsmTyper::FunctionScope(typer_.get()));
-
- for (Declaration* decl : *module_->scope()->declarations()) {
- if (FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration()) {
- fun_decl_ = fun_decl;
- break;
- }
- }
- CHECK_NOT_NULL(fun_decl_);
- }
- }
-
- struct VariableName {
- VariableName(const char* name, VariableMode mode)
- : name_(name), mode_(mode) {}
- VariableName(const VariableName&) = default;
- VariableName& operator=(const VariableName&) = default;
-
- const char* name_;
- const VariableMode mode_;
- };
-
- AsmTyperHarnessBuilder* WithLocal(VariableName var_name, AsmType* type) {
- CHECK(validation_type_ == ValidateStatement ||
- validation_type_ == ValidateExpression);
- auto* var = DeclareVariable(var_name);
- if (var->IsUnallocated()) {
- var->AllocateTo(VariableLocation::LOCAL, -1);
- }
- auto* var_info = new (zone()) AsmTyper::VariableInfo(type);
- var_info->set_mutability(AsmTyper::VariableInfo::kLocal);
- CHECK(typer_->AddLocal(var, var_info));
- return this;
- }
-
- AsmTyperHarnessBuilder* WithGlobal(VariableName var_name, AsmType* type) {
- auto* var = DeclareVariable(var_name);
- if (var->IsUnallocated()) {
- var->AllocateTo(VariableLocation::MODULE, -1);
- }
- if (type != nullptr) {
- auto* var_info = new (zone()) AsmTyper::VariableInfo(type);
- var_info->set_mutability(AsmTyper::VariableInfo::kMutableGlobal);
- CHECK(typer_->AddGlobal(var, var_info));
- }
- return this;
- }
-
- AsmTyperHarnessBuilder* WithGlobal(
- VariableName var_name, std::function<AsmType*(Zone*)> type_creator) {
- return WithGlobal(var_name, type_creator(zone()));
- }
-
- AsmTyperHarnessBuilder* WithUndefinedGlobal(
- VariableName var_name, std::function<AsmType*(Zone*)> type_creator) {
- auto* type = type_creator(zone());
- CHECK(type->AsFunctionType() != nullptr ||
- type->AsFunctionTableType() != nullptr);
- WithGlobal(var_name, type);
- auto* var_info = typer_->Lookup(DeclareVariable(var_name));
- CHECK(var_info);
- MessageLocation location;
- var_info->SetFirstForwardUse(location);
- return this;
- }
-
- AsmTyperHarnessBuilder* WithImport(VariableName var_name,
- AsmTyper::StandardMember standard_member) {
- auto* var = DeclareVariable(var_name);
- if (var->IsUnallocated()) {
- var->AllocateTo(VariableLocation::LOCAL, -1);
- }
- AsmTyper::VariableInfo* var_info = nullptr;
- auto* stdlib_map = &typer_->stdlib_math_types_;
- switch (standard_member) {
- case AsmTyper::kHeap:
- case AsmTyper::kStdlib:
- case AsmTyper::kModule:
- case AsmTyper::kNone:
- CHECK(false);
- case AsmTyper::kFFI:
- stdlib_map = nullptr;
- var_info =
- new (zone()) AsmTyper::VariableInfo(AsmType::FFIType(zone()));
- var_info->set_mutability(AsmTyper::VariableInfo::kImmutableGlobal);
- break;
- case AsmTyper::kInfinity:
- case AsmTyper::kNaN:
- stdlib_map = &typer_->stdlib_types_;
- default:
- break;
- }
-
- if (var_info == nullptr) {
- for (auto iter : *stdlib_map) {
- if (iter.second->standard_member() == standard_member) {
- var_info = iter.second;
- break;
- }
- }
-
- CHECK(var_info != nullptr);
- var_info = var_info->Clone(zone());
- }
-
- CHECK(typer_->AddGlobal(var, var_info));
- return this;
- }
-
- AsmTyperHarnessBuilder* WithReturnType(AsmType* type) {
- CHECK(type->IsReturnType());
- CHECK(typer_->return_type_ == AsmType::None());
- typer_->return_type_ = type;
- return this;
- }
-
- AsmTyperHarnessBuilder* WithStdlib(VariableName var_name) {
- auto* var = DeclareVariable(var_name);
- auto* var_info =
- AsmTyper::VariableInfo::ForSpecialSymbol(zone(), AsmTyper::kStdlib);
- CHECK(typer_->AddGlobal(var, var_info));
- return this;
- }
-
- AsmTyperHarnessBuilder* WithHeap(VariableName var_name) {
- auto* var = DeclareVariable(var_name);
- auto* var_info =
- AsmTyper::VariableInfo::ForSpecialSymbol(zone(), AsmTyper::kHeap);
- CHECK(typer_->AddGlobal(var, var_info));
- return this;
- }
-
- AsmTyperHarnessBuilder* WithFFI(VariableName var_name) {
- auto* var = DeclareVariable(var_name);
- auto* var_info =
- AsmTyper::VariableInfo::ForSpecialSymbol(zone(), AsmTyper::kFFI);
- CHECK(typer_->AddGlobal(var, var_info));
- return this;
- }
-
- bool Succeeds() {
- CHECK(validation_type_ == ValidateModule ||
- validation_type_ == ValidateGlobals ||
- validation_type_ == ValidateFunctionTables ||
- validation_type_ == ValidateExport ||
- validation_type_ == ValidateFunction ||
- validation_type_ == ValidateStatement);
-
- if (validation_type_ == ValidateStatement) {
- CHECK(typer_->return_type_ != AsmType::None());
- if (ValidateAllStatements(fun_decl_)) {
- return true;
- }
- } else if (typer_->Validate()) {
- return true;
- }
-
- std::unique_ptr<char[]> msg = i::MessageHandler::GetLocalizedMessage(
- isolate_, typer_->error_message());
- std::cerr << "Asm validation failed: " << msg.get() << "\n";
- return false;
- }
-
- bool SucceedsWithExactType(AsmType* type) {
- CHECK(validation_type_ == ValidateExpression);
- auto* validated_as = ValidateExpressionStatment(fun_decl_);
- if (validated_as == AsmType::None()) {
- std::unique_ptr<char[]> msg = i::MessageHandler::GetLocalizedMessage(
- isolate_, typer_->error_message());
- std::cerr << "Validation failure: " << msg.get() << "\n";
- return false;
- } else if (validated_as != type) {
- std::cerr << "Validation succeeded with wrong type "
- << validated_as->Name() << " (vs. " << type->Name() << ").\n";
- return false;
- }
-
- return true;
- }
-
- bool FailsWithMessage(const char* error_message) {
- CHECK(validation_type_ == ValidateModule ||
- validation_type_ == ValidateGlobals ||
- validation_type_ == ValidateFunctionTables ||
- validation_type_ == ValidateExport ||
- validation_type_ == ValidateFunction ||
- validation_type_ == ValidateStatement ||
- validation_type_ == ValidateExpression);
-
- bool success;
- if (validation_type_ == ValidateStatement) {
- CHECK(typer_->return_type_ != AsmType::None());
- success = ValidateAllStatements(fun_decl_);
- } else if (validation_type_ == ValidateExpression) {
- success = ValidateExpressionStatment(fun_decl_) != AsmType::None();
- } else {
- success = typer_->Validate();
- }
-
- if (success) {
- std::cerr << "Asm validation succeeded\n";
- return false;
- }
-
- std::unique_ptr<char[]> msg = i::MessageHandler::GetLocalizedMessage(
- isolate_, typer_->error_message());
- if (std::strstr(msg.get(), error_message) == nullptr) {
- std::cerr << "Asm validation failed with the wrong error message:\n"
- "Expected to contain '"
- << error_message << "'\n"
- " Actually is '"
- << msg.get() << "'\n";
- return false;
- }
-
- return true;
- }
-
- private:
- Variable* DeclareVariable(VariableName var_name) {
- auto* name_ast_string = ast_value_factory_.GetOneByteString(var_name.name_);
- ast_value_factory_.Internalize(isolate_);
- return var_name.mode_ == DYNAMIC_GLOBAL
- ? outer_scope_->DeclareDynamicGlobal(name_ast_string,
- NORMAL_VARIABLE)
- : module_->scope()->DeclareLocal(name_ast_string, VAR,
- kCreatedInitialized,
- NORMAL_VARIABLE);
- }
-
- bool ValidateAllStatements(FunctionDeclaration* fun_decl) {
- AsmTyper::FlattenedStatements iter(zone(), fun_decl->fun()->body());
- while (auto* curr = iter.Next()) {
- if (typer_->ValidateStatement(curr) == AsmType::None()) {
- return false;
- }
- }
- return true;
- }
-
- AsmType* ValidateExpressionStatment(FunctionDeclaration* fun_decl) {
- AsmTyper::FlattenedStatements iter(zone(), fun_decl->fun()->body());
- AsmType* ret = AsmType::None();
- bool last_was_expression_statement = false;
- while (auto* curr = iter.Next()) {
- if (auto* expr_stmt = curr->AsExpressionStatement()) {
- last_was_expression_statement = true;
- if ((ret = typer_->ValidateExpression(expr_stmt->expression())) ==
- AsmType::None()) {
- break;
- }
- } else {
- ret = AsmType::None();
- last_was_expression_statement = true;
- if (typer_->ValidateStatement(curr) == AsmType::None()) {
- break;
- }
- }
- }
- CHECK(last_was_expression_statement || ret == AsmType::None());
- return ret;
- }
-
- Zone* zone() { return info_.zone(); }
-
- std::string source_;
- ValidationType validation_type_;
- HandleAndZoneScope handles_;
- Isolate* isolate_;
- Factory* factory_;
- Handle<String> source_code_;
- Handle<Script> script_;
- ParseInfo info_;
- AstValueFactory ast_value_factory_;
-
- DeclarationScope* outer_scope_;
- FunctionLiteral* module_;
- FunctionDeclaration* fun_decl_;
- std::unique_ptr<AsmTyper> typer_;
- std::unique_ptr<AsmTyper::FunctionScope> fun_scope_;
-};
-
-} // namespace wasm
-} // namespace internal
-} // namespace v8
-
-namespace {
-
-struct ValidationInput {
- ValidationInput(const std::string& source, iw::ValidationType type)
- : source_(source), type_(type) {}
-
- const std::string source_;
- const iw::ValidationType type_;
-};
-
-std::unique_ptr<iw::AsmTyperHarnessBuilder> ValidationOf(
- ValidationInput input) {
- return std::unique_ptr<iw::AsmTyperHarnessBuilder>(
- new iw::AsmTyperHarnessBuilder(input.source_.c_str(), input.type_));
-}
-
-ValidationInput Module(const char* source) {
- return ValidationInput(source, iw::ValidateModule);
-}
-
-std::string WrapInFunction(const char* source, bool needs_use_asm) {
- if (needs_use_asm) {
- return std::string(
- "function foo() {\n"
- " 'use asm';\n"
- " ") +
- source +
- "\n"
- "}";
- }
-
- return std::string(
- "function bar() {\n"
- " ") +
- source +
- "\n"
- "}\n"
- "return {b: bar};\n";
-}
-
-ValidationInput Globals(const char* source) {
- static const bool kNeedsUseAsm = true;
- return ValidationInput(WrapInFunction(source, kNeedsUseAsm),
- iw::ValidateGlobals);
-}
-
-ValidationInput FunctionTables(const char* source) {
- static const bool kNeedsUseAsm = true;
- return ValidationInput(WrapInFunction(source, kNeedsUseAsm),
- iw::ValidateFunctionTables);
-}
-
-ValidationInput Export(const char* source) {
- static const bool kNeedsUseAsm = true;
- return ValidationInput(WrapInFunction(source, kNeedsUseAsm),
- iw::ValidateExport);
-}
-
-ValidationInput Function(const char* source) {
- static const bool kNeedsUseAsm = true;
- return ValidationInput(WrapInFunction(source, kNeedsUseAsm),
- iw::ValidateFunction);
-}
-
-ValidationInput Statement(const char* source) {
- static const bool kDoesNotNeedUseAsm = false;
- static const bool kNeedsUseAsm = true;
- return ValidationInput(
- WrapInFunction(WrapInFunction(source, kDoesNotNeedUseAsm).c_str(),
- kNeedsUseAsm),
- iw::ValidateStatement);
-}
-
-ValidationInput Expression(const char* source) {
- static const bool kDoesNotNeedUseAsm = false;
- static const bool kNeedsUseAsm = true;
- return ValidationInput(
- WrapInFunction(WrapInFunction(source, kDoesNotNeedUseAsm).c_str(),
- kNeedsUseAsm),
- iw::ValidateExpression);
-}
-
-iw::AsmTyperHarnessBuilder::VariableName Var(const char* name) {
- return iw::AsmTyperHarnessBuilder::VariableName(name, VAR);
-}
-
-iw::AsmTyperHarnessBuilder::VariableName DynamicGlobal(const char* name) {
- return iw::AsmTyperHarnessBuilder::VariableName(name, DYNAMIC_GLOBAL);
-}
-
-TEST(MissingUseAsmDirective) {
- v8::V8::Initialize();
-
- // We can't test the empty input ("") because the AsmTyperHarnessBuilder will
- // CHECK if there's no function in the top-level scope.
- const char* kTests[] = {"function module(){}",
- "function module(){ use_asm; }",
- "function module(){ \"use asm \"; }",
- "function module(){ \" use asm \"; }",
- "function module(){ \"use Asm\"; }"};
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const char* module = kTests[ii];
- if (!ValidationOf(Module(module))
- ->FailsWithMessage("Missing \"use asm\"")) {
- std::cerr << "Test:\n" << module;
- CHECK(false);
- }
- }
-}
-
-TEST(InvalidModuleSignature) {
- v8::V8::Initialize();
-
- const struct {
- const char* module;
- const char* error_message;
- } kTests[] = {
- {"function eval(){ \"use asm\"; }",
- "Invalid asm.js identifier in module name"},
- {"function arguments(){ \"use asm\"; }",
- "Invalid asm.js identifier in module name"},
- {"function module(eval){ \"use asm\"; }",
- "Invalid asm.js identifier in module parameter"},
- {"function module(arguments){ \"use asm\"; }",
- "Invalid asm.js identifier in module parameter"},
- {"function module(stdlib, eval){ \"use asm\"; }",
- "Invalid asm.js identifier in module parameter"},
- {"function module(stdlib, arguments){ \"use asm\"; }",
- "Invalid asm.js identifier in module parameter"},
- {"function module(stdlib, foreign, eval){ \"use asm\"; }",
- "Invalid asm.js identifier in module parameter"},
- {"function module(stdlib, foreign, arguments){ \"use asm\"; }",
- "Invalid asm.js identifier in module parameter"},
- {"function module(stdlib, foreign, heap, eval){ \"use asm\"; }",
- "asm.js modules may not have more than three parameters"},
- {"function module(stdlib, foreign, heap, arguments){ \"use asm\"; }",
- "asm.js modules may not have more than three parameters"},
- {"function module(module){ \"use asm\"; }",
- "Redeclared identifier in module parameter"},
- {"function module(stdlib, module){ \"use asm\"; }",
- "Redeclared identifier in module parameter"},
- {"function module(stdlib, stdlib){ \"use asm\"; }",
- "Redeclared identifier in module parameter"},
- {"function module(stdlib, foreign, module){ \"use asm\"; }",
- "Redeclared identifier in module parameter"},
- {"function module(stdlib, foreign, stdlib){ \"use asm\"; }",
- "Redeclared identifier in module parameter"},
- {"function module(stdlib, foreign, foreign){ \"use asm\"; }",
- "Redeclared identifier in module parameter"},
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(Module(test->module))
- ->FailsWithMessage(test->error_message)) {
- std::cerr << "Test:\n" << test->module;
- CHECK(false);
- }
- }
-}
-
-TEST(ErrorsInGlobalVariableDefinition) {
- const struct {
- const char* decl;
- const char* error_message;
- } kTests[] = {
- {"var v;", "Global variable missing initializer"},
- {"var v = uninitialized;", "Undeclared identifier in global"},
- {"var v = 'use asm';", "type annotation - forbidden literal"},
- {"var v = 4294967296;", " - forbidden literal"},
- {"var v = not_fround;", "initialize a global must be a const"},
- {"var v = not_fround(1);", "expected call fround(literal)"},
- {"var v = __fround__(1.0);", "expected call fround(literal)"},
- {"var v = fround(1.0, 1.0);", "expected call fround(literal)"},
- {"var v = fround(not_fround);", "literal argument for call to fround"},
- {"var v = i?0:1;", "Invalid global variable initializer"},
- {"var v = stdlib.nan", "Invalid import"},
- {"var v = stdlib.Math.nan", "Invalid import"},
- {"var v = stdlib.Mathh.E", "Invalid import"},
- {"var v = stdlib.Math", "Invalid import"},
- {"var v = Stdlib.Math.E", "Invalid import"},
- {"var v = stdlib.Math.E[0]", "Invalid import"},
- {"var v = stdlibb.NaN", "Invalid import"},
- {"var v = ffi.NaN[0]", "Invalid import"},
- {"var v = heap.NaN[0]", "Invalid import"},
- {"var v = ffi.foo * 2.0;", "unrecognized annotation"},
- {"var v = ffi.foo|1;", "unrecognized annotation"},
- {"var v = ffi()|0;", "must import member"},
- {"var v = +ffi();", "must import member"},
- {"var v = ffi().a|0;", "object lookup failed"},
- {"var v = +ffi().a;", "object lookup failed"},
- {"var v = sstdlib.a|0;", "object lookup failed"},
- {"var v = +sstdlib.a;", "object lookup failed"},
- {"var v = stdlib.NaN|0;", "object is not the ffi"},
- {"var v = +stdlib.NaN;", "object is not the ffi"},
- {"var v = new f()", "Invalid type after new"},
- {"var v = new stdli.Uint8Array(heap)", "Unknown stdlib member in heap"},
- {"var v = new stdlib.dd(heap)", "Unknown stdlib member in heap"},
- {"var v = new stdlib.Math.fround(heap)", "Type is not a heap view type"},
- {"var v = new stdlib.Uint8Array(a, b)", "Invalid number of arguments"},
- {"var v = new stdlib.Uint8Array(heap())", "should be the module's heap"},
- {"var v = new stdlib.Uint8Array(heap_)", "instead of heap parameter"},
- {"var v = new stdlib.Uint8Array(ffi)", "should be the module's heap"},
- {"var eval = 0;", "in global variable"},
- {"var eval = 0.0;", "in global variable"},
- {"var eval = fround(0.0);", "in global variable"},
- {"var eval = +ffi.a;", "in global variable"},
- {"var eval = ffi.a|0;", "in global variable"},
- {"var eval = ffi.a;", "in global variable"},
- {"var eval = new stdlib.Uint8Array(heap);", "in global variable"},
- {"var arguments = 0;", "in global variable"},
- {"var arguments = 0.0;", "in global variable"},
- {"var arguments = fround(0.0);", "in global variable"},
- {"var arguments = +ffi.a;", "in global variable"},
- {"var arguments = ffi.a|0;", "in global variable"},
- {"var arguments = ffi.a;", "in global variable"},
- {"var arguments = new stdlib.Uint8Array(heap);", "in global variable"},
- {"var a = 0, a = 0.0;", "Redefined global variable"},
- {"var a = 0; var a = 0;", "Redefined global variable"},
- {"var a = 0, b = 0; var a = 0;", "Redefined global variable"},
- {"var a = 0, b = 0; var b = 0, a = 0.0;", "Redefined global variable"},
- {"var a = stdlib.Int8Array", "Heap view types can not be aliased"},
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(Globals(test->decl))
- ->WithStdlib(DynamicGlobal("stdlib"))
- ->WithFFI(DynamicGlobal("ffi"))
- ->WithHeap(DynamicGlobal("heap"))
- ->WithGlobal(DynamicGlobal("not_fround"), iw::AsmType::Int())
- ->WithImport(DynamicGlobal("fround"), iw::AsmTyper::kMathFround)
- ->FailsWithMessage(test->error_message)) {
- std::cerr << "Test:\n" << test->decl;
- CHECK(false);
- }
- }
-}
-
-TEST(ErrorsInFunctionTableDefinition) {
- const struct {
- const char* tables;
- const char* error_message;
- } kTests[] = {
- {"var a = [a, a, a];", "Invalid length for function pointer table"},
- {"var a = [d2s0()];", "must be a function name"},
- {"var a = [d2s44];", "Undefined identifier in function pointer"},
- {"var a = [fround];", "not be a member of the standard library"},
- {"var a = [imul];", "not be a member of the standard library"},
- {"var a = [ffi_import];", "must be an asm.js function"},
- {"var a = [dI];", "must be an asm.js function"},
- {"var a = [d2s0, d2s1, d2s0, f2s0];", "mismatch in function pointer"},
- {"var eval = [d2s0, d2s1];", "asm.js identifier in function table name"},
- {"var arguments = [d2s0, d2s1];", "asm.js identifier in function table"},
- {"var foo = [d2s0, d2s1];",
- "Identifier redefined as function pointer table"},
- {"var I = [d2s0, d2s1];",
- "Identifier redefined as function pointer table"},
- {"var d2s = [d2f0, d2f1];", "redefined as function pointer table"},
- {"var d2s_t = [d2s0];", "Function table size mismatch"},
- {"var d2s_t = [d2f0, d2f1];", "initializer does not match previous"},
- };
-
- auto d2s = [](Zone* zone) -> iw::AsmType* {
- auto* ret = iw::AsmType::Function(zone, iw::AsmType::Signed());
- ret->AsFunctionType()->AddArgument(iw::AsmType::Double());
- return ret;
- };
-
- auto d2s_tbl = [](Zone* zone) -> iw::AsmType* {
- auto* d2s = iw::AsmType::Function(zone, iw::AsmType::Signed());
- d2s->AsFunctionType()->AddArgument(iw::AsmType::Double());
-
- auto* ret = iw::AsmType::FunctionTableType(zone, 2, d2s);
- return ret;
- };
-
- auto f2s = [](Zone* zone) -> iw::AsmType* {
- auto* ret = iw::AsmType::Function(zone, iw::AsmType::Signed());
- ret->AsFunctionType()->AddArgument(iw::AsmType::Float());
- return ret;
- };
-
- auto d2f = [](Zone* zone) -> iw::AsmType* {
- auto* ret = iw::AsmType::Function(zone, iw::AsmType::Float());
- ret->AsFunctionType()->AddArgument(iw::AsmType::Double());
- return ret;
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(FunctionTables(test->tables))
- ->WithImport(DynamicGlobal("ffi_import"), iw::AsmTyper::kFFI)
- ->WithImport(DynamicGlobal("imul"), iw::AsmTyper::kMathImul)
- ->WithImport(DynamicGlobal("E"), iw::AsmTyper::kMathE)
- ->WithImport(DynamicGlobal("fround"), iw::AsmTyper::kMathFround)
- ->WithImport(DynamicGlobal("floor"), iw::AsmTyper::kMathFround)
- ->WithGlobal(DynamicGlobal("d2s0"), d2s)
- ->WithGlobal(DynamicGlobal("d2s1"), d2s)
- ->WithGlobal(DynamicGlobal("f2s0"), f2s)
- ->WithGlobal(DynamicGlobal("f2s1"), f2s)
- ->WithGlobal(DynamicGlobal("d2f0"), d2f)
- ->WithGlobal(DynamicGlobal("d2f1"), d2f)
- ->WithGlobal(DynamicGlobal("dI"), iw::AsmType::Int())
- ->WithGlobal(Var("I"), iw::AsmType::Int())
- ->WithUndefinedGlobal(Var("d2s"), d2s)
- ->WithUndefinedGlobal(Var("d2s_t"), d2s_tbl)
- ->FailsWithMessage(test->error_message)) {
- std::cerr << "Test:\n" << test->tables;
- CHECK(false);
- }
- }
-}
-
-TEST(ErrorsInModuleExport) {
- const struct {
- const char* module_export;
- const char* error_message;
- } kTests[] = {
- {"", "Missing asm.js module export"},
- {"return;", "Unrecognized expression in asm.js module export expression"},
- {"return f;", "Undefined identifier in asm.js module export"},
- {"return f();", "Unrecognized expression in asm.js module export"},
- {"return d2s_tbl;", "cannot export function tables"},
- {"return min;", "cannot export standard library functions"},
- {"return ffi;", "cannot export foreign functions"},
- {"return I;", "is not an asm.js function"},
- {"return {'a': d2s_tbl}", "cannot export function tables"},
- {"return {'a': min}", "cannot export standard library functions"},
- {"return {'a': ffi}", "cannot export foreign functions"},
- {"return {'a': f()}", "must be an asm.js function name"},
- {"return {'a': f}", "Undefined identifier in asm.js module export"},
- {"function v() { a(); } return {b: d2s}",
- "Invalid call of existing global function"},
- {"function v() {} return {b: v, 'a': d2s_tbl}",
- "cannot export function tables"},
- {"function v() {} return {b: v, 'a': min}",
- "cannot export standard library"},
- {"function v() {} return {b: v, 'a': ffi}",
- "cannot export foreign functions"},
- {"function v() {} return {b: v, 'a': f()}",
- "must be an asm.js function name"},
- {"function v() {} return {b: v, 'a': f}",
- "Undefined identifier in asm.js module"},
- };
-
- auto d2s_tbl = [](Zone* zone) -> iw::AsmType* {
- auto* d2s = iw::AsmType::Function(zone, iw::AsmType::Signed());
- d2s->AsFunctionType()->AddArgument(iw::AsmType::Double());
-
- auto* ret = iw::AsmType::FunctionTableType(zone, 2, d2s);
- return ret;
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(Export(test->module_export))
- ->WithGlobal(DynamicGlobal("d2s_tbl"), d2s_tbl)
- ->WithImport(DynamicGlobal("min"), iw::AsmTyper::kMathMin)
- ->WithImport(DynamicGlobal("ffi"), iw::AsmTyper::kFFI)
- ->WithGlobal(DynamicGlobal("I"), iw::AsmType::Int())
- ->FailsWithMessage(test->error_message)) {
- std::cerr << "Test:\n" << test->module_export;
- CHECK(false);
- }
- }
-}
-
-TEST(ErrorsInFunction) {
- auto d2s = [](Zone* zone) -> iw::AsmType* {
- auto* ret = iw::AsmType::Function(zone, iw::AsmType::Signed());
- ret->AsFunctionType()->AddArgument(iw::AsmType::Double());
- return ret;
- };
-
- const struct {
- const char* function;
- const char* error_message;
- } kTests[] = {
- {"function f(eval) {"
- " eval = eval|0;"
- "}\n",
- "Invalid asm.js identifier in parameter name"},
- {"function f(arguments) {"
- " arguments = arguments|0;"
- "}\n",
- "Invalid asm.js identifier in parameter name"},
- // The following error should actually be a "redeclared local," but the
- // AST "hides" the first parameter from us, so the parameter type checking
- // will fail because the validator will think that the a = a|0 is
- // annotating the second parameter.
- {"function f(a, a) {\n"
- " a = a|0;\n"
- " a = +a;\n"
- "}\n",
- "Incorrect parameter type annotations"},
- {"function f(b, a) {\n"
- " if (0) return;\n"
- " b = +b;\n"
- " a = a|0;\n"
- "}\n",
- "Incorrect parameter type annotations"},
- {"function f(b, a) {\n"
- " f();\n"
- " b = +b;\n"
- " a = a|0;\n"
- "}\n",
- "Incorrect parameter type annotations"},
- {"function f(b, a) {\n"
- " f.a = 0;\n"
- " b = +b;\n"
- " a = a|0;\n"
- "}\n",
- "Incorrect parameter type annotations"},
- {"function f(b, a) {\n"
- " a = a|0;\n"
- " b = +b;\n"
- "}\n",
- "Incorrect parameter type annotations"},
- {"function f(b, a) {\n"
- " b = +b;\n"
- " a = a|0;\n"
- " var eval = 0;\n"
- "}\n",
- "Invalid asm.js identifier in local variable"},
- {"function f(b, a) {\n"
- " b = +b;\n"
- " a = a|0;\n"
- " var b = 0;\n"
- "}\n",
- "Redeclared local"},
- {"function f(b, a) {\n"
- " b = +b;\n"
- " a = a|0;\n"
- " var c = 0, c = 1.0;\n"
- "}\n",
- "Redeclared local"},
- {"function f(b, a) {\n"
- " b = +b;\n"
- " a = a|0;\n"
- " var c = 0; var c = 1.0;\n"
- "}\n",
- "Redeclared local"},
- {"function f(b, a) {\n"
- " b = +b;\n"
- " a = a|0;\n"
- " f();\n"
- " var c = 0;\n"
- "}\n",
- "Local variable missing initializer in asm.js module"},
- {"function f(a) {\n"
- " a = a|0;\n"
- " var x = a;\n"
- "}\n",
- "variable declaration initializer must be const"},
- {"function f() {\n"
- " var x = 1+i;\n"
- "}\n",
- "should be a literal, const, or fround(literal"},
- {"function f() {\n"
- " var x = a;\n"
- "}\n",
- "Undeclared identifier in variable declaration initializer"},
- {"function f() {\n"
- " function ff() {}\n"
- "}\n",
- "Functions may only define inner variables"},
- {"function f() {\n"
- " return a+1;\n"
- "}\n",
- "Invalid return type annotation"},
- {"function f() {\n"
- " return ~~x;\n"
- "}\n",
- "Invalid return type annotation"},
- {"function f() {\n"
- " return d();\n"
- "}\n",
- "Invalid function call in return statement"},
- {"function f() {\n"
- " return 'use asm';\n"
- "}\n",
- "Invalid literal in return statement"},
- {"function f() {\n"
- " return 2147483648;\n"
- "}\n",
- "Invalid literal in return statement"},
- {"function f(a) {\n"
- " a = a|0;\n"
- " return a;\n"
- "}\n",
- "in return statement is not const"},
- {"function f() {\n"
- " return a;\n"
- "}\n",
- "Undeclared identifier in return statement"},
- {"function f() {\n"
- " var i = 0;\n"
- " return i?0:1;\n"
- "}\n",
- "Type mismatch in return statement"},
- {"function f() {\n"
- " return stdlib.Math.E;"
- "}\n",
- "Invalid return type expression"},
- {"function f() {\n"
- " return E[0];"
- "}\n",
- "Invalid return type expression"},
- {"function I() {}\n", "Identifier redefined as function"},
- {"function foo() {}\n", "Identifier redefined as function"},
- {"function d2s() {}\n", "Identifier redefined (function name)"},
- {"function d2s(x) {\n"
- " x = x|0;\n"
- " return -1;\n"
- "}\n",
- "Identifier redefined (function name)"},
- {"function d2s(x) {\n"
- " x = +x;\n"
- " return -1.0;\n"
- "}\n",
- "Identifier redefined (function name)"},
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(Function(test->function))
- ->WithGlobal(Var("I"), iw::AsmType::Int())
- ->WithGlobal(Var("d2s"), d2s)
- ->FailsWithMessage(test->error_message)) {
- std::cerr << "Test:\n" << test->function;
- CHECK(false);
- }
- }
-}
-
-TEST(ErrorsInStatement) {
- const struct {
- const char* statement;
- const char* error_message;
- } kTests[] = {
- {"if (fround(1));", "If condition must be type int"},
- {"return;", "Type mismatch in return statement"},
- {"return +1.0;", "Type mismatch in return statement"},
- {"return +d()", "Type mismatch in return statement"},
- {"while (fround(1));", "While condition must be type int"},
- {"do {} while (fround(1));", "Do {} While condition must be type int"},
- {"for (;fround(1););", "For condition must be type int"},
- {"switch(flocal){ case 0: return 0; }", "Switch tag must be signed"},
- {"switch(slocal){ default: case 0: return 0; }",
- "Switch default must appear last"},
- {"switch(slocal){ case 1: case 1: return 0; }", "Duplicated case label"},
- {"switch(slocal){ case 1: case 0: break; case 1: return 0; }",
- "Duplicated case label"},
- {"switch(slocal){ case 1.0: return 0; }",
- "Case label must be a 32-bit signed integer"},
- {"switch(slocal){ case 1.0: return 0; }",
- "Case label must be a 32-bit signed integer"},
- {"switch(slocal){ case -100000: case 2147483647: return 0; }",
- "Out-of-bounds case"},
- {"switch(slocal){ case 2147483648: return 0; }",
- "Case label must be a 32-bit signed"},
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(Statement(test->statement))
- ->WithReturnType(iw::AsmType::Signed())
- ->WithImport(DynamicGlobal("fround"), iw::AsmTyper::kMathFround)
- ->WithLocal(DynamicGlobal("flocal"), iw::AsmType::Float())
- ->WithLocal(DynamicGlobal("slocal"), iw::AsmType::Signed())
- ->WithGlobal(DynamicGlobal("d"), nullptr)
- ->FailsWithMessage(test->error_message)) {
- std::cerr << "Test:\n" << test->statement;
- CHECK(false);
- }
- }
-}
-
-TEST(ErrorsInExpression) {
- auto d2d = [](Zone* zone) -> iw::AsmType* {
- auto* ret = iw::AsmType::Function(zone, iw::AsmType::Double());
- ret->AsFunctionType()->AddArgument(iw::AsmType::Double());
- return ret;
- };
-
- auto d2s_tbl = [](Zone* zone) -> iw::AsmType* {
- auto* d2s = iw::AsmType::Function(zone, iw::AsmType::Signed());
- d2s->AsFunctionType()->AddArgument(iw::AsmType::Double());
-
- auto* ret = iw::AsmType::FunctionTableType(zone, 2, d2s);
- return ret;
- };
-
- const struct {
- const char* expression;
- const char* error_message;
- } kTests[] = {
- {"noy_a_function();", "Unanotated call to a function must be a call to"},
- {"a = 0;", "Undeclared identifier"},
- // we can't verify the module's name being referenced here because
- // expression validation does not invoke ValidateModule, which sets up the
- // module information in the AsmTyper.
- {"stdlib", "accessed by ordinary expressions"},
- {"ffi", "accessed by ordinary expressions"},
- {"heap", "accessed by ordinary expressions"},
- {"d2d", "accessed by ordinary expression"},
- {"fround", "accessed by ordinary expression"},
- {"d2s_tbl", "accessed by ordinary expression"},
- {"ilocal = +1.0", "Type mismatch in assignment"},
- {"!dlocal", "Invalid type for !"},
- {"2 * dlocal", "Invalid types for intish *"},
- {"dlocal * 2", "Invalid types for intish *"},
- {"1048577 * ilocal", "Invalid operands for *"},
- {"1048577 / ilocal", "Invalid operands for /"},
- {"1048577 % dlocal", "Invalid operands for %"},
- {"1048577 * dlocal", "Invalid operands for *"},
- {"1048577 / dlocal", "Invalid operands for /"},
- {"1048577 % ilocal", "Invalid operands for %"},
- {"ilocal * dlocal", "Invalid operands for *"},
- {"ilocal / dlocal", "Invalid operands for /"},
- {"ilocal % dlocal", "Invalid operands for %"},
- {"1048577 + dlocal", "Invalid operands for additive expression"},
- {"1048577 - dlocal", "Invalid operands for additive expression"},
- {"ilocal + dlocal", "Invalid operands for additive expression"},
- {"ilocal - dlocal", "Invalid operands for additive expression"},
- {"1048577 << dlocal", "Invalid operands for <<"},
- {"1048577 >> dlocal", "Invalid operands for >>"},
- {"1048577 >>> dlocal", "Invalid operands for >>"},
- {"ilocal << dlocal", "Invalid operands for <<"},
- {"ilocal >> dlocal", "Invalid operands for >>"},
- {"ilocal >>> dlocal", "Invalid operands for >>>"},
- {"1048577 < dlocal", "Invalid operands for <"},
- {"ilocal < dlocal", "Invalid operands for <"},
- {"1048577 > dlocal", "Invalid operands for >"},
- {"ilocal > dlocal", "Invalid operands for >"},
- {"1048577 <= dlocal", "Invalid operands for <="},
- {"ilocal <= dlocal", "Invalid operands for <="},
- {"1048577 >= dlocal", "Invalid operands for >="},
- {"ilocal >= dlocal", "Invalid operands for >="},
- {"1048577 == dlocal", "Invalid operands for =="},
- {"ilocal == dlocal", "Invalid operands for =="},
- /* NOTE: the parser converts a == b to !(a == b). */
- {"1048577 != dlocal", "Invalid operands for =="},
- {"ilocal != dlocal", "Invalid operands for =="},
- {"dlocal & dlocal", "Invalid operands for &"},
- {"1048577 & dlocal", "Invalid operands for &"},
- {"ilocal & dlocal", "Invalid operands for &"},
- {"dlocal | dlocal2", "Invalid operands for |"},
- {"1048577 | dlocal", "Invalid operands for |"},
- {"ilocal | dlocal", "Invalid operands for |"},
- {"dlocal ^ dlocal2", "Invalid operands for ^"},
- {"1048577 ^ dlocal", "Invalid operands for ^"},
- {"ilocal ^ dlocal", "Invalid operands for ^"},
- {"dlocal ? 0 : 1", "Ternary operation condition should be int"},
- {"ilocal ? dlocal : 1", "Type mismatch for ternary operation result"},
- {"ilocal ? 1 : dlocal", "Type mismatch for ternary operation result"},
- {"eval(10)|0", "Invalid asm.js identifier in (forward) function"},
- {"arguments(10)|0", "Invalid asm.js identifier in (forward) function"},
- {"not_a_function(10)|0", "Calling something that's not a function"},
- {"fround(FFI())", "Foreign functions can't return float"},
- {"FFI(fround(0))|0", "Function invocation does not match function type"},
- {"FFI(2147483648)|0", "Function invocation does not match function type"},
- {"d2d(2.0)|0", "Function invocation does not match function type"},
- {"+d2d(2)", "Function invocation does not match function type"},
- {"eval[ilocal & 3]()|0", "Invalid asm.js identifier in (forward)"},
- {"arguments[ilocal & 3]()|0", "Invalid asm.js identifier in (forward)"},
- {"not_a_function[ilocal & 3]()|0", "Identifier does not name a function"},
- {"d2s_tbl[ilocal & 3](0.0)|0", "Function table size does not match"},
- {"+d2s_tbl[ilocal & 1](0.0)", "does not match previous signature"},
- {"d2s_tbl[ilocal & 1](0)|0", "does not match previous signature"},
- {"a.b()|0", "Indirect call index must be in the expr & mask form"},
- {"HEAP32[0][0] = 0", "Invalid heap access"},
- {"heap32[0] = 0", "Undeclared identifier in heap access"},
- {"not_a_function[0] = 0", "Identifier does not represent a heap view"},
- {"HEAP32[0.0] = 0", "Heap access index must be int"},
- {"HEAP32[-1] = 0", "Heap access index must be a 32-bit unsigned integer"},
- {"HEAP32[ilocal >> 1] = 0", "Invalid heap access index"},
- // *VIOLATION* the following is invalid, but because of desugaring it is
- // accepted.
- // {"HEAP32[0 >> 1] = 0", "Invalid heap access index"},
- {"HEAP8[fround(0.0)] = 0", "Invalid heap access index for byte array"},
- {"HEAP8[iish] = 0", "Invalid heap access index for byte array"},
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(Expression(test->expression))
- ->WithStdlib(DynamicGlobal("stdlib"))
- ->WithFFI(DynamicGlobal("ffi"))
- ->WithHeap(DynamicGlobal("heap"))
- ->WithLocal(DynamicGlobal("iish"), iw::AsmType::Intish())
- ->WithLocal(DynamicGlobal("ilocal"), iw::AsmType::Int())
- ->WithLocal(DynamicGlobal("dlocal"), iw::AsmType::Double())
- ->WithLocal(DynamicGlobal("dlocal2"), iw::AsmType::Double())
- ->WithLocal(DynamicGlobal("not_a_function"), iw::AsmType::Int())
- ->WithImport(DynamicGlobal("fround"), iw::AsmTyper::kMathFround)
- ->WithImport(DynamicGlobal("FFI"), iw::AsmTyper::kFFI)
- ->WithGlobal(DynamicGlobal("d2d"), d2d)
- ->WithGlobal(DynamicGlobal("d2s_tbl"), d2s_tbl)
- ->WithGlobal(DynamicGlobal("HEAP32"), iw::AsmType::Int32Array())
- ->WithGlobal(DynamicGlobal("HEAP8"), iw::AsmType::Int8Array())
- ->WithGlobal(DynamicGlobal("a"), nullptr)
- ->FailsWithMessage(test->error_message)) {
- std::cerr << "Test:\n" << test->expression;
- CHECK(false);
- }
- }
-}
-
-TEST(ValidateNumericLiteral) {
- const struct {
- const char* expression;
- iw::AsmType* expected_type;
- } kTests[] = {
- {"0", iw::AsmType::FixNum()},
- {"-1", iw::AsmType::Signed()},
- {"2147483648", iw::AsmType::Unsigned()},
- {"0.0", iw::AsmType::Double()},
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(Expression(test->expression))
- ->SucceedsWithExactType(test->expected_type)) {
- std::cerr << "Test:\n" << test->expression;
- CHECK(false);
- }
- }
-}
-
-TEST(ValidateIdentifier) {
- const struct {
- const char* expression;
- iw::AsmType* expected_type;
- } kTests[] = {{"afixnum", iw::AsmType::FixNum()},
- {"adouble", iw::AsmType::Double()},
- {"afloat", iw::AsmType::Float()},
- {"anextern", iw::AsmType::Extern()},
- {"avoid", iw::AsmType::Void()}};
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(Expression(test->expression))
- ->WithLocal(DynamicGlobal(test->expression), test->expected_type)
- ->WithGlobal(DynamicGlobal(test->expression),
- iw::AsmType::Floatish())
- ->SucceedsWithExactType(test->expected_type)) {
- std::cerr << "Test (local identifiers):\n" << test->expression;
- CHECK(false);
- }
- }
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(Expression(test->expression))
- ->WithGlobal(DynamicGlobal(test->expression), test->expected_type)
- ->SucceedsWithExactType(test->expected_type)) {
- std::cerr << "Test (global identifiers):\n" << test->expression;
- CHECK(false);
- }
- }
-}
-
-TEST(ValidateCallExpression) {
- auto v2f = [](Zone* zone) -> iw::AsmType* {
- auto* ret = iw::AsmType::Function(zone, iw::AsmType::Float());
- return ret;
- };
-
- const struct {
- const char* expression;
- } kTests[] = {
- {"a_float_function()"},
- {"fround(0)"},
- {"slocal"},
- {"ulocal"},
- {"dqlocal"},
- {"fishlocal"},
- };
-
- char full_test[200];
- static const size_t kFullTestSize = arraysize(full_test);
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- CHECK(v8::base::OS::SNPrintF(full_test, kFullTestSize, "fround(%s)",
- test->expression) <
- static_cast<int>(kFullTestSize));
- if (!ValidationOf(Expression(full_test))
- ->WithImport(DynamicGlobal("fround"), iw::AsmTyper::kMathFround)
- ->WithGlobal(DynamicGlobal("a_float_function"), v2f)
- ->WithLocal(DynamicGlobal("slocal"), iw::AsmType::Signed())
- ->WithLocal(DynamicGlobal("ulocal"), iw::AsmType::Unsigned())
- ->WithLocal(DynamicGlobal("dqlocal"), iw::AsmType::DoubleQ())
- ->WithLocal(DynamicGlobal("fishlocal"), iw::AsmType::Floatish())
- ->SucceedsWithExactType(iw::AsmType::Float())) {
- std::cerr << "Test:\n" << full_test;
- CHECK(false);
- }
- }
-
- const struct {
- const char* expression;
- const char* error_message;
- } kFailureTests[] = {
- {"vlocal", "Invalid argument type to fround"},
- {"ilocal", "Invalid argument type to fround"},
- {"a_double_function()", "Function invocation does not match"},
- };
-
- auto v2d = [](Zone* zone) -> iw::AsmType* {
- auto* ret = iw::AsmType::Function(zone, iw::AsmType::Double());
- return ret;
- };
-
- for (size_t ii = 0; ii < arraysize(kFailureTests); ++ii) {
- const auto* test = kFailureTests + ii;
- CHECK(v8::base::OS::SNPrintF(full_test, kFullTestSize, "fround(%s)",
- test->expression) <
- static_cast<int>(kFullTestSize));
- if (!ValidationOf(Expression(full_test))
- ->WithImport(DynamicGlobal("fround"), iw::AsmTyper::kMathFround)
- ->WithLocal(DynamicGlobal("ilocal"), iw::AsmType::Int())
- ->WithLocal(DynamicGlobal("vlocal"), iw::AsmType::Void())
- ->WithGlobal(DynamicGlobal("a_double_function"), v2d)
- ->FailsWithMessage(test->error_message)) {
- std::cerr << "Test:\n" << full_test;
- CHECK(false);
- }
- }
-}
-
-TEST(ValidateMemberExpression) {
- const struct {
- const char* expression;
- iw::AsmType* load_type;
- } kTests[] = {
- {"I8[i]", iw::AsmType::Intish()}, // Legacy: no shift for 8-bit view.
- {"I8[iish >> 0]", iw::AsmType::Intish()},
- {"I8[0]", iw::AsmType::Intish()},
- {"I8[2147483648]", iw::AsmType::Intish()},
- {"U8[iish >> 0]", iw::AsmType::Intish()},
- {"U8[i]", iw::AsmType::Intish()}, // Legacy: no shift for 8-bit view.
- {"U8[0]", iw::AsmType::Intish()},
- {"U8[2147483648]", iw::AsmType::Intish()},
- {"I16[iish >> 1]", iw::AsmType::Intish()},
- {"I16[0]", iw::AsmType::Intish()},
- {"I16[1073741824]", iw::AsmType::Intish()},
- {"U16[iish >> 1]", iw::AsmType::Intish()},
- {"U16[0]", iw::AsmType::Intish()},
- {"U16[1073741824]", iw::AsmType::Intish()},
- {"I32[iish >> 2]", iw::AsmType::Intish()},
- {"I32[0]", iw::AsmType::Intish()},
- {"I32[536870912]", iw::AsmType::Intish()},
- {"U32[iish >> 2]", iw::AsmType::Intish()},
- {"U32[0]", iw::AsmType::Intish()},
- {"U32[536870912]", iw::AsmType::Intish()},
- {"F32[iish >> 2]", iw::AsmType::FloatQ()},
- {"F32[0]", iw::AsmType::FloatQ()},
- {"F32[536870912]", iw::AsmType::FloatQ()},
- {"F64[iish >> 3]", iw::AsmType::DoubleQ()},
- {"F64[0]", iw::AsmType::DoubleQ()},
- {"F64[268435456]", iw::AsmType::DoubleQ()},
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(Expression(test->expression))
- ->WithGlobal(DynamicGlobal("I8"), iw::AsmType::Int8Array())
- ->WithGlobal(DynamicGlobal("U8"), iw::AsmType::Uint8Array())
- ->WithGlobal(DynamicGlobal("I16"), iw::AsmType::Int16Array())
- ->WithGlobal(DynamicGlobal("U16"), iw::AsmType::Uint16Array())
- ->WithGlobal(DynamicGlobal("I32"), iw::AsmType::Int32Array())
- ->WithGlobal(DynamicGlobal("U32"), iw::AsmType::Uint32Array())
- ->WithGlobal(DynamicGlobal("F32"), iw::AsmType::Float32Array())
- ->WithGlobal(DynamicGlobal("F64"), iw::AsmType::Float64Array())
- ->WithLocal(DynamicGlobal("iish"), iw::AsmType::Intish())
- ->WithLocal(DynamicGlobal("i"), iw::AsmType::Int())
- ->SucceedsWithExactType(test->load_type)) {
- std::cerr << "Test:\n" << test->expression;
- CHECK(false);
- }
- }
-}
-
-TEST(ValidateAssignmentExpression) {
- const struct {
- const char* expression;
- iw::AsmType* load_type;
- } kTests[] = {
- // -----------------------------------------------------------------------
- // Array assignments.
- // Storing signed to int heap view.
- {"I8[1024] = -1024", iw::AsmType::Signed()},
- {"I8[1024 >> 0] = -1024", iw::AsmType::Signed()},
- {"I8[0] = -1024", iw::AsmType::Signed()},
- {"I8[2147483648] = -1024", iw::AsmType::Signed()},
- {"U8[1024 >> 0] = -1024", iw::AsmType::Signed()},
- {"U8[0] = -1024", iw::AsmType::Signed()},
- {"U8[2147483648] = -1024", iw::AsmType::Signed()},
- {"I16[1024 >> 1] = -1024", iw::AsmType::Signed()},
- {"I16[0] = -1024", iw::AsmType::Signed()},
- {"I16[1073741824] = -1024", iw::AsmType::Signed()}, // not pre-shifted.
- {"U16[1024 >> 1] = -1024", iw::AsmType::Signed()},
- {"U16[0] = -1024", iw::AsmType::Signed()},
- {"U16[1073741824] = -1024", iw::AsmType::Signed()}, // not pre-shifted.
- {"I32[1024 >> 2] = -1024", iw::AsmType::Signed()},
- {"I32[0] = -1024", iw::AsmType::Signed()},
- {"I32[536870912] = -1024", iw::AsmType::Signed()}, // not pre-shifted.
- {"U32[1024 >> 2] = -1024", iw::AsmType::Signed()},
- {"U32[0] = -1024", iw::AsmType::Signed()},
- {"U32[536870912] = -1024", iw::AsmType::Signed()}, // not pre-shifted.
- // Sroting fixnum to int heap view.
- {"I8[1024] = 1024", iw::AsmType::FixNum()},
- {"I8[1024 >> 0] = 1024", iw::AsmType::FixNum()},
- {"I8[0] = 1024", iw::AsmType::FixNum()},
- {"I8[2147483648] = 1024", iw::AsmType::FixNum()},
- {"U8[1024 >> 0] = 1024", iw::AsmType::FixNum()},
- {"U8[0] = 1024", iw::AsmType::FixNum()},
- {"U8[2147483648] = 1024", iw::AsmType::FixNum()},
- {"I16[1024 >> 1] = 1024", iw::AsmType::FixNum()},
- {"I16[0] = 1024", iw::AsmType::FixNum()},
- {"I16[1073741824] = 1024", iw::AsmType::FixNum()}, // not pre-shifted.
- {"U16[1024 >> 1] = 1024", iw::AsmType::FixNum()},
- {"U16[0] = 1024", iw::AsmType::FixNum()},
- {"U16[1073741824] = 1024", iw::AsmType::FixNum()}, // not pre-shifted.
- {"I32[1024 >> 2] = 1024", iw::AsmType::FixNum()},
- {"I32[0] = 1024", iw::AsmType::FixNum()},
- {"I32[536870912] = 1024", iw::AsmType::FixNum()}, // not pre-shifted.
- {"U32[1024 >> 2] = 1024", iw::AsmType::FixNum()},
- {"U32[0] = 1024", iw::AsmType::FixNum()},
- {"U32[536870912] = 1024", iw::AsmType::FixNum()}, // not pre-shifted.
- // Storing int to int heap view.
- {"I8[ilocal] = ilocal", iw::AsmType::Int()},
- {"I8[ilocal >> 0] = ilocal", iw::AsmType::Int()},
- {"I8[0] = ilocal", iw::AsmType::Int()},
- {"I8[2147483648] = ilocal", iw::AsmType::Int()},
- {"U8[ilocal >> 0] = ilocal", iw::AsmType::Int()},
- {"U8[0] = ilocal", iw::AsmType::Int()},
- {"U8[2147483648] = ilocal", iw::AsmType::Int()},
- {"I16[ilocal >> 1] = ilocal", iw::AsmType::Int()},
- {"I16[0] = ilocal", iw::AsmType::Int()},
- {"I16[1073741824] = ilocal", iw::AsmType::Int()}, // not pre-shifted.
- {"U16[ilocal >> 1] = ilocal", iw::AsmType::Int()},
- {"U16[0] = ilocal", iw::AsmType::Int()},
- {"U16[1073741824] = ilocal", iw::AsmType::Int()}, // not pre-shifted.
- {"I32[ilocal >> 2] = ilocal", iw::AsmType::Int()},
- {"I32[0] = ilocal", iw::AsmType::Int()},
- {"I32[536870912] = ilocal", iw::AsmType::Int()}, // not pre-shifted.
- {"U32[ilocal >> 2] = ilocal", iw::AsmType::Int()},
- {"U32[0] = ilocal", iw::AsmType::Int()},
- {"U32[536870912] = ilocal", iw::AsmType::Int()}, // not pre-shifted.
- // Storing intish to int heap view.
- {"I8[ilocal] = iish", iw::AsmType::Intish()},
- {"I8[iish >> 0] = iish", iw::AsmType::Intish()},
- {"I8[0] = iish", iw::AsmType::Intish()},
- {"I8[2147483648] = iish", iw::AsmType::Intish()},
- {"U8[iish >> 0] = iish", iw::AsmType::Intish()},
- {"U8[0] = iish", iw::AsmType::Intish()},
- {"U8[2147483648] = iish", iw::AsmType::Intish()},
- {"I16[iish >> 1] = iish", iw::AsmType::Intish()},
- {"I16[0] = iish", iw::AsmType::Intish()},
- {"I16[1073741824] = iish", iw::AsmType::Intish()}, // not pre-shifted.
- {"U16[iish >> 1] = iish", iw::AsmType::Intish()},
- {"U16[0] = iish", iw::AsmType::Intish()},
- {"U16[1073741824] = iish", iw::AsmType::Intish()}, // not pre-shifted.
- {"I32[iish >> 2] = iish", iw::AsmType::Intish()},
- {"I32[0] = iish", iw::AsmType::Intish()},
- {"I32[536870912] = iish", iw::AsmType::Intish()}, // not pre-shifted.
- {"U32[iish >> 2] = iish", iw::AsmType::Intish()},
- {"U32[0] = iish", iw::AsmType::Intish()},
- {"U32[536870912] = iish", iw::AsmType::Intish()}, // not pre-shifted.
- // Storing floatish to f32 heap view.
- {"F32[iish >> 2] = fish", iw::AsmType::Floatish()},
- {"F32[0] = fish", iw::AsmType::Floatish()},
- {"F32[536870912] = fish ", iw::AsmType::Floatish()}, // not pre-shifted.
- // Storing double? to f32 heap view.
- {"F32[iish >> 2] = dq", iw::AsmType::DoubleQ()},
- {"F32[0] = dq", iw::AsmType::DoubleQ()},
- {"F32[536870912] = dq", iw::AsmType::DoubleQ()}, // not pre-shifted.
- // Storing float? to f64 heap view.
- {"F64[iish >> 3] = fq", iw::AsmType::FloatQ()},
- {"F64[0] = fq", iw::AsmType::FloatQ()},
- {"F64[268435456] = fq", iw::AsmType::FloatQ()}, // not pre-shifted.
- // Storing double? to f64 heap view.
- {"F64[iish >> 3] = dq", iw::AsmType::DoubleQ()},
- {"F64[0] = dq", iw::AsmType::DoubleQ()},
- {"F64[268435456] = dq", iw::AsmType::DoubleQ()}, // not pre-shifted.
- // -----------------------------------------------------------------------
- // Scalar assignments.
- {"ilocal = 1024", iw::AsmType::FixNum()},
- {"ilocal = -1024", iw::AsmType::Signed()},
- {"ilocal = 2147483648", iw::AsmType::Unsigned()},
- {"ilocal = iglobal", iw::AsmType::Int()},
- {"iglobal = 1024", iw::AsmType::FixNum()},
- {"iglobal = -1024", iw::AsmType::Signed()},
- {"iglobal = 2147483648", iw::AsmType::Unsigned()},
- {"iglobal = ilocal", iw::AsmType::Int()},
- {"dlocal = 0.0", iw::AsmType::Double()},
- {"dlocal = +make_double()", iw::AsmType::Double()},
- {"dglobal = 0.0", iw::AsmType::Double()},
- {"dglobal = +make_double()", iw::AsmType::Double()},
- {"flocal = fround(0)", iw::AsmType::Float()},
- {"flocal = fround(make_float())", iw::AsmType::Float()},
- {"fglobal = fround(0)", iw::AsmType::Float()},
- {"fglobal = fround(make_float())", iw::AsmType::Float()},
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(Expression(test->expression))
- ->WithImport(DynamicGlobal("fround"), iw::AsmTyper::kMathFround)
- ->WithLocal(DynamicGlobal("fq"), iw::AsmType::FloatQ())
- ->WithLocal(DynamicGlobal("dq"), iw::AsmType::DoubleQ())
- ->WithLocal(DynamicGlobal("fish"), iw::AsmType::Floatish())
- ->WithLocal(DynamicGlobal("iish"), iw::AsmType::Intish())
- ->WithGlobal(DynamicGlobal("iglobal"), iw::AsmType::Int())
- ->WithGlobal(DynamicGlobal("dglobal"), iw::AsmType::Double())
- ->WithGlobal(DynamicGlobal("fglobal"), iw::AsmType::Float())
- ->WithLocal(DynamicGlobal("ilocal"), iw::AsmType::Int())
- ->WithLocal(DynamicGlobal("dlocal"), iw::AsmType::Double())
- ->WithLocal(DynamicGlobal("flocal"), iw::AsmType::Float())
- ->WithGlobal(DynamicGlobal("I8"), iw::AsmType::Int8Array())
- ->WithGlobal(DynamicGlobal("U8"), iw::AsmType::Uint8Array())
- ->WithGlobal(DynamicGlobal("I16"), iw::AsmType::Int16Array())
- ->WithGlobal(DynamicGlobal("U16"), iw::AsmType::Uint16Array())
- ->WithGlobal(DynamicGlobal("I32"), iw::AsmType::Int32Array())
- ->WithGlobal(DynamicGlobal("U32"), iw::AsmType::Uint32Array())
- ->WithGlobal(DynamicGlobal("F32"), iw::AsmType::Float32Array())
- ->WithGlobal(DynamicGlobal("F64"), iw::AsmType::Float64Array())
- ->WithGlobal(DynamicGlobal("make_float"), nullptr)
- ->WithGlobal(DynamicGlobal("make_double"), nullptr)
- ->SucceedsWithExactType(test->load_type)) {
- std::cerr << "Test:\n" << test->expression;
- CHECK(false);
- }
- }
-}
-
-TEST(ValidateUnaryExpression) {
- auto v2d = [](Zone* zone) -> iw::AsmType* {
- auto* ret = iw::AsmType::Function(zone, iw::AsmType::Double());
- return ret;
- };
-
- const struct {
- const char* expression;
- iw::AsmType* load_type;
- } kTests[] = {
- {"-2147483648", iw::AsmType::Signed()},
- {"-1024", iw::AsmType::Signed()},
- {"-1", iw::AsmType::Signed()},
- {"-2147483648.0", iw::AsmType::Double()},
- {"+make_double()", iw::AsmType::Double()},
- {"+dbl()", iw::AsmType::Double()},
- {"make_double() * 1.0", iw::AsmType::Double()}, // Violation.
- {"~~fq", iw::AsmType::Signed()},
- {"~~dglobal", iw::AsmType::Signed()},
- {"+slocal", iw::AsmType::Double()},
- {"slocal * 1.0", iw::AsmType::Double()}, // Violation.
- {"+ulocal", iw::AsmType::Double()},
- {"ulocal * 1.0", iw::AsmType::Double()}, // Violation.
- {"+dq", iw::AsmType::Double()},
- {"dq * 1.0", iw::AsmType::Double()}, // Violation.
- {"+fq", iw::AsmType::Double()},
- {"fq * 1.0", iw::AsmType::Double()}, // Violation.
- {"-ilocal", iw::AsmType::Intish()},
- {"ilocal * -1", iw::AsmType::Intish()}, // Violation.
- {"-dq", iw::AsmType::Double()},
- {"dq * -1", iw::AsmType::Double()}, // Violation.
- {"-fq", iw::AsmType::Floatish()},
- {"fq * -1", iw::AsmType::Floatish()}, // Violation.
- {"~iish", iw::AsmType::Signed()},
- {"iish ^ -1", iw::AsmType::Signed()}, // Violation, but OK.
- {"!ilocal", iw::AsmType::Int()},
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(Expression(test->expression))
- ->WithLocal(DynamicGlobal("fq"), iw::AsmType::FloatQ())
- ->WithLocal(DynamicGlobal("dq"), iw::AsmType::DoubleQ())
- ->WithLocal(DynamicGlobal("iish"), iw::AsmType::Intish())
- ->WithLocal(DynamicGlobal("slocal"), iw::AsmType::Signed())
- ->WithLocal(DynamicGlobal("ulocal"), iw::AsmType::Unsigned())
- ->WithLocal(DynamicGlobal("ilocal"), iw::AsmType::Int())
- ->WithGlobal(DynamicGlobal("dglobal"), iw::AsmType::Double())
- ->WithGlobal(DynamicGlobal("make_double"), nullptr)
- ->WithGlobal(DynamicGlobal("dbl"), v2d)
- ->SucceedsWithExactType(test->load_type)) {
- std::cerr << "Test:\n" << test->expression;
- CHECK(false);
- }
- }
-}
-
-TEST(ValidateMultiplicativeExpression) {
- const struct {
- const char* expression;
- iw::AsmType* load_type;
- } kTests[] = {
- {"dq * dq", iw::AsmType::Double()},
- {"fq * fq", iw::AsmType::Floatish()},
- {"slocal / slocal", iw::AsmType::Intish()},
- {"ulocal / ulocal", iw::AsmType::Intish()},
- {"dq / dq", iw::AsmType::Double()},
- {"fq / fq", iw::AsmType::Floatish()},
- {"slocal % slocal", iw::AsmType::Intish()},
- {"ulocal % ulocal", iw::AsmType::Intish()},
- {"dq % dq", iw::AsmType::Double()},
- {"-1048575 * ilocal", iw::AsmType::Intish()},
- {"ilocal * -1048575", iw::AsmType::Intish()},
- {"1048575 * ilocal", iw::AsmType::Intish()},
- {"ilocal * 1048575", iw::AsmType::Intish()},
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(Expression(test->expression))
- ->WithLocal(DynamicGlobal("fq"), iw::AsmType::FloatQ())
- ->WithLocal(DynamicGlobal("dq"), iw::AsmType::DoubleQ())
- ->WithLocal(DynamicGlobal("slocal"), iw::AsmType::Signed())
- ->WithLocal(DynamicGlobal("ulocal"), iw::AsmType::Unsigned())
- ->WithLocal(DynamicGlobal("ilocal"), iw::AsmType::Int())
- ->WithGlobal(DynamicGlobal("dglobal"), iw::AsmType::Double())
- ->SucceedsWithExactType(test->load_type)) {
- std::cerr << "Test:\n" << test->expression;
- CHECK(false);
- }
- }
-}
-
-TEST(ValidateAdditiveExpression) {
- const struct {
- const char* expression;
- iw::AsmType* load_type;
- } kTests[] = {
- {"dlocal + dlocal", iw::AsmType::Double()},
- {"fq + fq", iw::AsmType::Floatish()},
- {"dq - dq", iw::AsmType::Double()},
- {"fq - fq", iw::AsmType::Floatish()},
- {"ilocal + 1", iw::AsmType::Intish()},
- {"ilocal - 1", iw::AsmType::Intish()},
- {"slocal + ilocal + 1", iw::AsmType::Intish()},
- {"slocal - ilocal + 1", iw::AsmType::Intish()},
- {"ulocal + ilocal + 1", iw::AsmType::Intish()},
- {"ulocal - ilocal + 1", iw::AsmType::Intish()},
- {"ulocal + slocal + ilocal + 1", iw::AsmType::Intish()},
- {"ulocal + slocal - ilocal + 1", iw::AsmType::Intish()},
- {"ulocal - slocal + ilocal + 1", iw::AsmType::Intish()},
- {"ulocal - slocal - ilocal + 1", iw::AsmType::Intish()},
- {"1 + 1", iw::AsmType::FixNum()}, // Violation: intish.
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(Expression(test->expression))
- ->WithLocal(DynamicGlobal("fq"), iw::AsmType::FloatQ())
- ->WithLocal(DynamicGlobal("dq"), iw::AsmType::DoubleQ())
- ->WithLocal(DynamicGlobal("iish"), iw::AsmType::Intish())
- ->WithLocal(DynamicGlobal("dlocal"), iw::AsmType::Double())
- ->WithLocal(DynamicGlobal("slocal"), iw::AsmType::Signed())
- ->WithLocal(DynamicGlobal("ulocal"), iw::AsmType::Unsigned())
- ->WithLocal(DynamicGlobal("ilocal"), iw::AsmType::Int())
- ->SucceedsWithExactType(test->load_type)) {
- std::cerr << "Test:\n" << test->expression;
- CHECK(false);
- }
- }
-}
-
-TEST(ValidateShiftExpression) {
- const struct {
- const char* expression;
- iw::AsmType* load_type;
- } kTests[] = {
- {"iish << iish", iw::AsmType::Signed()},
- {"iish >> iish", iw::AsmType::Signed()},
- {"iish >>> iish", iw::AsmType::Unsigned()},
- {"1 << 0", iw::AsmType::FixNum()}, // Violation: signed.
- {"1 >> 0", iw::AsmType::FixNum()}, // Violation: signed.
- {"4294967295 >>> 0", iw::AsmType::Unsigned()},
- {"-1 >>> 0", iw::AsmType::Unsigned()},
- {"2147483647 >>> 0", iw::AsmType::FixNum()}, // Violation: unsigned.
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(Expression(test->expression))
- ->WithLocal(DynamicGlobal("iish"), iw::AsmType::Intish())
- ->SucceedsWithExactType(test->load_type)) {
- std::cerr << "Test:\n" << test->expression;
- CHECK(false);
- }
- }
-}
-
-TEST(ValidateComparisonExpression) {
- const struct {
- const char* expression;
- iw::AsmType* load_type;
- } kTests[] = {
- // -----------------------------------------------------------------------
- // Non const <op> Non const
- {"s0 == s1", iw::AsmType::Int()},
- {"u0 == u1", iw::AsmType::Int()},
- {"f0 == f1", iw::AsmType::Int()},
- {"d0 == d1", iw::AsmType::Int()},
- {"s0 != s1", iw::AsmType::Int()},
- {"u0 != u1", iw::AsmType::Int()},
- {"f0 != f1", iw::AsmType::Int()},
- {"d0 != d1", iw::AsmType::Int()},
- {"s0 < s1", iw::AsmType::Int()},
- {"u0 < u1", iw::AsmType::Int()},
- {"f0 < f1", iw::AsmType::Int()},
- {"d0 < d1", iw::AsmType::Int()},
- {"s0 <= s1", iw::AsmType::Int()},
- {"u0 <= u1", iw::AsmType::Int()},
- {"f0 <= f1", iw::AsmType::Int()},
- {"d0 <= d1", iw::AsmType::Int()},
- {"s0 > s1", iw::AsmType::Int()},
- {"u0 > u1", iw::AsmType::Int()},
- {"f0 > f1", iw::AsmType::Int()},
- {"d0 > d1", iw::AsmType::Int()},
- {"s0 >= s1", iw::AsmType::Int()},
- {"u0 >= u1", iw::AsmType::Int()},
- {"f0 >= f1", iw::AsmType::Int()},
- {"d0 >= d1", iw::AsmType::Int()},
- // -----------------------------------------------------------------------
- // Non const <op> Const
- {"s0 == -1025", iw::AsmType::Int()},
- {"u0 == 123456789", iw::AsmType::Int()},
- {"f0 == fround(123456.78)", iw::AsmType::Int()},
- {"d0 == 9876543.201", iw::AsmType::Int()},
- {"s0 != -1025", iw::AsmType::Int()},
- {"u0 != 123456789", iw::AsmType::Int()},
- {"f0 != fround(123456.78)", iw::AsmType::Int()},
- {"d0 != 9876543.201", iw::AsmType::Int()},
- {"s0 < -1025", iw::AsmType::Int()},
- {"u0 < 123456789", iw::AsmType::Int()},
- {"f0 < fround(123456.78)", iw::AsmType::Int()},
- {"d0 < 9876543.201", iw::AsmType::Int()},
- {"s0 <= -1025", iw::AsmType::Int()},
- {"u0 <= 123456789", iw::AsmType::Int()},
- {"f0 <= fround(123456.78)", iw::AsmType::Int()},
- {"d0 <= 9876543.201", iw::AsmType::Int()},
- {"s0 > -1025", iw::AsmType::Int()},
- {"u0 > 123456789", iw::AsmType::Int()},
- {"f0 > fround(123456.78)", iw::AsmType::Int()},
- {"d0 > 9876543.201", iw::AsmType::Int()},
- {"s0 >= -1025", iw::AsmType::Int()},
- {"u0 >= 123456789", iw::AsmType::Int()},
- {"f0 >= fround(123456.78)", iw::AsmType::Int()},
- {"d0 >= 9876543.201", iw::AsmType::Int()},
- // -----------------------------------------------------------------------
- // Const <op> Non const
- {"-1025 == s0", iw::AsmType::Int()},
- {"123456789 == u0", iw::AsmType::Int()},
- {"fround(123456.78) == f0", iw::AsmType::Int()},
- {"9876543.201 == d0", iw::AsmType::Int()},
- {"-1025 != s0", iw::AsmType::Int()},
- {"123456789 != u0", iw::AsmType::Int()},
- {"fround(123456.78) != f0", iw::AsmType::Int()},
- {"9876543.201 != d0", iw::AsmType::Int()},
- {"-1025 < s0", iw::AsmType::Int()},
- {"123456789 < u0", iw::AsmType::Int()},
- {"fround(123456.78) < f0", iw::AsmType::Int()},
- {"9876543.201 < d0", iw::AsmType::Int()},
- {"-1025 <= s0", iw::AsmType::Int()},
- {"123456789 <= u0", iw::AsmType::Int()},
- {"fround(123456.78) <= f0", iw::AsmType::Int()},
- {"9876543.201 <= d0", iw::AsmType::Int()},
- {"-1025 > s0", iw::AsmType::Int()},
- {"123456789 > u0", iw::AsmType::Int()},
- {"fround(123456.78) > f0", iw::AsmType::Int()},
- {"9876543.201 > d0", iw::AsmType::Int()},
- {"-1025 >= s0", iw::AsmType::Int()},
- {"123456789 >= u0", iw::AsmType::Int()},
- {"fround(123456.78) >= f0", iw::AsmType::Int()},
- {"9876543.201 >= d0", iw::AsmType::Int()},
- // TODO(jpp): maybe add Const <op> Const.
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(Expression(test->expression))
- ->WithImport(DynamicGlobal("fround"), iw::AsmTyper::kMathFround)
- ->WithLocal(DynamicGlobal("u0"), iw::AsmType::Unsigned())
- ->WithLocal(DynamicGlobal("u1"), iw::AsmType::Unsigned())
- ->WithLocal(DynamicGlobal("s0"), iw::AsmType::Signed())
- ->WithLocal(DynamicGlobal("s1"), iw::AsmType::Signed())
- ->WithLocal(DynamicGlobal("f0"), iw::AsmType::Float())
- ->WithLocal(DynamicGlobal("f1"), iw::AsmType::Float())
- ->WithLocal(DynamicGlobal("d0"), iw::AsmType::Double())
- ->WithLocal(DynamicGlobal("d1"), iw::AsmType::Double())
- ->SucceedsWithExactType(test->load_type)) {
- std::cerr << "Test:\n" << test->expression;
- CHECK(false);
- }
- }
-}
-
-TEST(ValidateBitwiseExpression) {
- auto v2s = [](Zone* zone) -> iw::AsmType* {
- auto* ret = iw::AsmType::Function(zone, iw::AsmType::Signed());
- return ret;
- };
-
- const struct {
- const char* expression;
- iw::AsmType* load_type;
- } kTests[] = {
- {"iish0 & iish1", iw::AsmType::Signed()},
- {"iish0 | iish1", iw::AsmType::Signed()},
- {"iish0 ^ iish1", iw::AsmType::Signed()},
- {"iish0 & -1", iw::AsmType::Signed()},
- {"iish0 | -1", iw::AsmType::Signed()},
- {"iish0 ^ -1", iw::AsmType::Signed()},
- {"2147483648 & iish1", iw::AsmType::Signed()},
- {"2147483648 | iish1", iw::AsmType::Signed()},
- {"2147483648 ^ iish1", iw::AsmType::Signed()},
- {"2147483648 & 0", iw::AsmType::FixNum()}, // Violation: signed.
- {"2147483648 | 0", iw::AsmType::Signed()},
- {"2147483648 ^ 0", iw::AsmType::Signed()},
- {"2134651 & 123", iw::AsmType::FixNum()}, // Violation: signed.
- {"2134651 | 123", iw::AsmType::FixNum()}, // Violation: signed.
- {"2134651 ^ 123", iw::AsmType::FixNum()}, // Violation: signed.
- {"make_signed()|0", iw::AsmType::Signed()},
- {"signed()|0", iw::AsmType::Signed()},
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(Expression(test->expression))
- ->WithLocal(DynamicGlobal("iish1"), iw::AsmType::Intish())
- ->WithLocal(DynamicGlobal("iish0"), iw::AsmType::Intish())
- ->WithGlobal(DynamicGlobal("signed"), v2s)
- ->WithGlobal(DynamicGlobal("make_signed"), nullptr)
- ->SucceedsWithExactType(test->load_type)) {
- std::cerr << "Test:\n" << test->expression;
- CHECK(false);
- }
- }
-}
-
-TEST(ValidateConditionalExpression) {
- const struct {
- const char* expression;
- iw::AsmType* load_type;
- } kTests[] = {
- {"i0 ? i0 : i1", iw::AsmType::Int()},
- {"i0 ? f0 : f1", iw::AsmType::Float()},
- {"i0 ? d0 : d1", iw::AsmType::Double()},
- {"0 ? -1 : 2147483648", iw::AsmType::Int()},
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(Expression(test->expression))
- ->WithLocal(DynamicGlobal("i0"), iw::AsmType::Int())
- ->WithLocal(DynamicGlobal("i1"), iw::AsmType::Int())
- ->WithLocal(DynamicGlobal("f0"), iw::AsmType::Float())
- ->WithLocal(DynamicGlobal("f1"), iw::AsmType::Float())
- ->WithLocal(DynamicGlobal("d0"), iw::AsmType::Double())
- ->WithLocal(DynamicGlobal("d1"), iw::AsmType::Double())
- ->SucceedsWithExactType(test->load_type)) {
- std::cerr << "Test:\n" << test->expression;
- CHECK(false);
- }
- }
-}
-
-TEST(ValidateCall) {
- auto v2f = [](Zone* zone) -> iw::AsmType* {
- auto* ret = iw::AsmType::Function(zone, iw::AsmType::Float());
- return ret;
- };
-
- // ifd2_ is a helper function that returns a lambda for creating a function
- // type that accepts an int, a float, and a double. ret_type_factory is a
- // pointer to an AsmType*() function, and (*ret_type_factory)() returns the
- // desired return type. For example,
- //
- // ifd2_(&iw::AsmType::Float)
- //
- // returns an AsmType representing an asm.js function with the following
- // signature:
- //
- // float(int, float, double)
- auto ifd2_ = [](iw::AsmType* (
- *ret_type_factory)()) -> std::function<iw::AsmType*(Zone*)> {
- return [ret_type_factory](Zone* zone) -> iw::AsmType* {
- auto* ret = iw::AsmType::Function(zone, (*ret_type_factory)());
- ret->AsFunctionType()->AddArgument(iw::AsmType::Int());
- ret->AsFunctionType()->AddArgument(iw::AsmType::Float());
- ret->AsFunctionType()->AddArgument(iw::AsmType::Double());
- return ret;
- };
- };
- auto ifd2f = ifd2_(&iw::AsmType::Float);
- auto ifd2d = ifd2_(&iw::AsmType::Double);
- auto ifd2i = ifd2_(&iw::AsmType::Signed);
-
- // Just like ifd2_, but this one returns a type representing a function table.
- auto tbl_ifd2_ = [](size_t tbl_size, iw::AsmType* (*ret_type_factory)())
- -> std::function<iw::AsmType*(Zone*)> {
- return [tbl_size, ret_type_factory](Zone* zone) -> iw::AsmType* {
- auto* signature = iw::AsmType::Function(zone, (*ret_type_factory)());
- signature->AsFunctionType()->AddArgument(iw::AsmType::Int());
- signature->AsFunctionType()->AddArgument(iw::AsmType::Float());
- signature->AsFunctionType()->AddArgument(iw::AsmType::Double());
-
- auto* ret = iw::AsmType::FunctionTableType(zone, tbl_size, signature);
- return ret;
- };
- };
- auto ifd2f_tbl = tbl_ifd2_(32, &iw::AsmType::Float);
- auto ifd2d_tbl = tbl_ifd2_(64, &iw::AsmType::Double);
- auto ifd2i_tbl = tbl_ifd2_(4096, &iw::AsmType::Signed);
-
- const struct {
- const char* expression;
- iw::AsmType* load_type;
- } kTests[] = {
- // -----------------------------------------------------------------------
- // Functions.
- {"fround(v2f())", iw::AsmType::Float()},
- {"fround(fish)", iw::AsmType::Float()},
- {"fround(dq)", iw::AsmType::Float()},
- {"fround(s)", iw::AsmType::Float()},
- {"fround(u)", iw::AsmType::Float()},
- {"ffi()|0", iw::AsmType::Signed()},
- {"ffi(1.0)|0", iw::AsmType::Signed()},
- {"ffi(1.0, 2.0)|0", iw::AsmType::Signed()},
- {"ffi(1.0, 2.0, 3)|0", iw::AsmType::Signed()},
- {"ffi(1.0, 2.0, 3, 4)|0", iw::AsmType::Signed()},
- {"+ffi()", iw::AsmType::Double()},
- {"+ffi(1.0)", iw::AsmType::Double()},
- {"+ffi(1.0, 2.0)", iw::AsmType::Double()},
- {"+ffi(1.0, 2.0, 3)", iw::AsmType::Double()},
- {"+ffi(1.0, 2.0, 3, 4)", iw::AsmType::Double()},
- {"fround(ifd2f(1, fround(1), 1.0))", iw::AsmType::Float()},
- {"+ifd2d(1, fround(1), 1.0)", iw::AsmType::Double()},
- {"ifd2i(1, fround(1), 1.0)|0", iw::AsmType::Signed()},
- // -----------------------------------------------------------------------
- // Function tables.
- {"fround(ifd2f_tbl[iish & 31](1, fround(1), 1.0))", iw::AsmType::Float()},
- {"+ifd2d_tbl[iish & 63](1, fround(1), 1.0)", iw::AsmType::Double()},
- {"ifd2i_tbl[iish & 4095](1, fround(1), 1.0)|0", iw::AsmType::Signed()},
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(Expression(test->expression))
- ->WithImport(DynamicGlobal("fround"), iw::AsmTyper::kMathFround)
- ->WithImport(DynamicGlobal("ffi"), iw::AsmTyper::kFFI)
- ->WithLocal(DynamicGlobal("fish"), iw::AsmType::Floatish())
- ->WithLocal(DynamicGlobal("dq"), iw::AsmType::DoubleQ())
- ->WithLocal(DynamicGlobal("s"), iw::AsmType::Signed())
- ->WithLocal(DynamicGlobal("u"), iw::AsmType::Unsigned())
- ->WithLocal(DynamicGlobal("iish"), iw::AsmType::Intish())
- ->WithGlobal(DynamicGlobal("v2f"), v2f)
- ->WithGlobal(DynamicGlobal("ifd2f"), nullptr)
- ->WithGlobal(DynamicGlobal("ifd2d"), nullptr)
- ->WithGlobal(DynamicGlobal("ifd2i"), nullptr)
- ->WithGlobal(DynamicGlobal("ifd2f_tbl"), ifd2f_tbl)
- ->WithGlobal(DynamicGlobal("ifd2d_tbl"), ifd2d_tbl)
- ->WithGlobal(DynamicGlobal("ifd2i_tbl"), ifd2i_tbl)
- ->SucceedsWithExactType(test->load_type)) {
- std::cerr << "Test:\n" << test->expression;
- CHECK(false);
- }
- }
-}
-
-TEST(CannotReferenceModuleName) {
- v8::V8::Initialize();
-
- const struct {
- const char* module;
- const char* error_message;
- } kTests[] = {
- {"function asm() {\n"
- " 'use asm';\n"
- " function f() { asm; }\n"
- "}",
- "accessed by ordinary expressions"},
- {"function asm() { 'use asm'; return asm; }", "Module cannot export"},
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- const auto* test = kTests + ii;
- if (!ValidationOf(Module(test->module))
- ->FailsWithMessage(test->error_message)) {
- std::cerr << "Test:\n" << test->module;
- CHECK(false);
- }
- }
-}
-
-TEST(InvalidSourceLayout) {
- const char* kTests[] = {
- "function asm() {\n"
- " 'use asm';\n"
- " function f() {}\n"
- " var v = 0;\n"
- " var v_v = [f];\n"
- " return f;\n"
- "}",
- "function asm() {\n"
- " 'use asm';\n"
- " function f() {}\n"
- " var v_v = [f];\n"
- " var v = 0;\n"
- " return f;\n"
- "}",
- "function asm() {\n"
- " 'use asm';\n"
- " function f() {}\n"
- " var v_v = [f];\n"
- " return f;\n"
- " var v = 0;\n"
- "}",
- "function asm() {\n"
- " 'use asm';\n"
- " var v = 0;\n"
- " var v_v = [f];\n"
- " function f() {}\n"
- " return f;\n"
- "}",
- "function asm() {\n"
- " 'use asm';\n"
- " var v = 0;\n"
- " var v_v = [f];\n"
- " return f;\n"
- " function f() {}\n"
- "}",
- "function asm() {\n"
- " 'use asm';\n"
- " var v = 0;\n"
- " function f() {}\n"
- " return f;\n"
- " var v_v = [f];\n"
- "}",
- "function asm() {\n"
- " 'use asm';\n"
- " var v = 0;\n"
- " function f() {}\n"
- " var v1 = 0;\n"
- " var v_v = [f];\n"
- " return f;\n"
- "}",
- "function asm() {\n"
- " 'use asm';\n"
- " var v = 0;\n"
- " function f() {}\n"
- " var v_v = [f];\n"
- " var v1 = 0;\n"
- " return f;\n"
- "}",
- "function asm() {\n"
- " 'use asm';\n"
- " var v = 0;\n"
- " function f() {}\n"
- " var v_v = [f];\n"
- " return f;\n"
- " var v1 = 0;\n"
- "}",
- "function asm() {\n"
- " function f() {}\n"
- " 'use asm';\n"
- " var v_v = [f];\n"
- " return f;\n"
- "}",
- "function asm() {\n"
- " 'use asm';\n"
- " return f;\n"
- " var v = 0;\n"
- " function f() {}\n"
- " var v_v = [f];\n"
- "}",
- "function asm() {\n"
- " 'use asm';\n"
- " return f;\n"
- " function f() {}\n"
- "}",
- "function __f_59() {\n"
- " 'use asm';\n"
- " function __f_110() {\n"
- " return 71;\n"
- " }\n"
- " function __f_21() {\n"
- " var __v_38 = 0;\n"
- " return __v_23[__v_38&0]() | 0;\n"
- " }\n"
- " return {__f_21:__f_21};\n"
- " var __v_23 = [__f_110];\n"
- "}",
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- if (!ValidationOf(Module(kTests[ii]))
- ->FailsWithMessage("Invalid asm.js source code layout")) {
- std::cerr << "Test:\n" << kTests[ii];
- CHECK(false);
- }
- }
-}
-
-// This issue was triggered because of the "lenient" 8-bit heap access code
-// path. The canonical heap access index validation fails because __34 is not an
-// intish. Then, during the "lenient" code path for accessing elements in 8-bit
-// heap views, the __34 node in the indexing expression would be re-tagged, thus
-// causing the assertion failure.
-TEST(B63099) {
- const char* kTests[] = {
- "function __f_109(stdlib, __v_36, buffer) {\n"
- " 'use asm';\n"
- " var __v_34 = new stdlib.Uint8Array(buffer);\n"
- " function __f_22() {__v_34[__v_34>>0]|0 + 1 | 0;\n"
- " }\n"
- "}",
- "function __f_109(stdlib, __v_36, buffer) {\n"
- " 'use asm';\n"
- " var __v_34 = new stdlib.Int8Array(buffer);\n"
- " function __f_22() {__v_34[__v_34>>0]|0 + 1 | 0;\n"
- " }\n"
- "}",
- };
-
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- if (!ValidationOf(Module(kTests[ii]))
- ->FailsWithMessage("Invalid heap access index")) {
- std::cerr << "Test:\n" << kTests[ii];
- CHECK(false);
- }
- }
-}
-
-// This issue was triggered because assignments to immutable symbols (e.g., the
-// module's name, or any of the asm.js' module parameters) was not being
-// handled.
-TEST(B640194) {
- const char* kTests[] = {
- "function asm() {\n"
- " 'use asm';\n"
- " function f() {\n"
- " asm = 0;\n"
- " }\n"
- " return f;\n"
- "}",
- "function asm(stdlib) {\n"
- " 'use asm';\n"
- " function f() {\n"
- " stdlib = 0;\n"
- " }\n"
- " return f;\n"
- "}",
- "function asm(stdlib, foreign) {\n"
- " 'use asm';\n"
- " function f() {\n"
- " foreign = 0;\n"
- " }\n"
- " return f;\n"
- "}",
- "function asm(stdlib, foreign, heap) {\n"
- " 'use asm';\n"
- " function f() {\n"
- " heap = 0;\n"
- " }\n"
- " return f;\n"
- "}",
- "function asm(stdlib, foreign, heap) {\n"
- " 'use asm';\n"
- " var f = stdlib.Math.fround;\n"
- " function f() {\n"
- " f = 0;\n"
- " }\n"
- " return f;\n"
- "}",
- "function asm(stdlib, foreign, heap) {\n"
- " 'use asm';\n"
- " var E = stdlib.Math.E;\n"
- " function f() {\n"
- " E = 0;\n"
- " }\n"
- " return f;\n"
- "}",
- };
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- if (!ValidationOf(Module(kTests[ii]))
- ->FailsWithMessage("Can't assign to immutable symbol")) {
- std::cerr << "Test:\n" << kTests[ii];
- CHECK(false);
- }
- }
-}
-
-TEST(B660813) {
- const char* kTests[] = {
- "function asm() {\n"
- " 'use asm';\n"
- " const i = 0xffffffff;\n"
- " function f() {\n"
- " return i;\n"
- " }\n"
- "}",
- "function asm() {\n"
- " 'use asm';\n"
- " const i = -(-2147483648);\n"
- " function f() {\n"
- " return i;\n"
- " }\n"
- "}",
- };
- for (size_t ii = 0; ii < arraysize(kTests); ++ii) {
- if (!ValidationOf(Module(kTests[ii]))
- ->FailsWithMessage(
- "Constant in return must be signed, float, or double.")) {
- std::cerr << "Test:\n" << kTests[ii];
- CHECK(false);
- }
- }
-}
-
-} // namespace
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index f94d9bc858..68bcd653bd 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -43,8 +43,8 @@
#endif
#endif
-enum InitializationState {kUnset, kUnintialized, kInitialized};
-static InitializationState initialization_state_ = kUnset;
+enum InitializationState { kUnset, kUninitialized, kInitialized };
+static InitializationState initialization_state_ = kUnset;
static bool disable_automatic_dispose_ = false;
CcTest* CcTest::last_ = NULL;
@@ -83,10 +83,10 @@ CcTest::CcTest(TestFunction* callback, const char* file, const char* name,
void CcTest::Run() {
if (!initialize_) {
CHECK(initialization_state_ != kInitialized);
- initialization_state_ = kUnintialized;
+ initialization_state_ = kUninitialized;
CHECK(CcTest::isolate_ == NULL);
} else {
- CHECK(initialization_state_ != kUnintialized);
+ CHECK(initialization_state_ != kUninitialized);
initialization_state_ = kInitialized;
if (isolate_ == NULL) {
v8::Isolate::CreateParams create_params;
@@ -113,6 +113,10 @@ void CcTest::CollectGarbage(i::AllocationSpace space) {
heap()->CollectGarbage(space, i::GarbageCollectionReason::kTesting);
}
+void CcTest::CollectAllGarbage() {
+ CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+}
+
void CcTest::CollectAllGarbage(int flags) {
heap()->CollectAllGarbage(flags, i::GarbageCollectionReason::kTesting);
}
@@ -157,7 +161,7 @@ v8::Local<v8::Context> CcTest::NewContext(CcTestExtensionFlags extensions,
void CcTest::DisableAutomaticDispose() {
- CHECK_EQ(kUnintialized, initialization_state_);
+ CHECK_EQ(kUninitialized, initialization_state_);
disable_automatic_dispose_ = true;
}
@@ -210,20 +214,6 @@ static void PrintTestList(CcTest* current) {
}
-class CcTestArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
- virtual void* Allocate(size_t length) {
- void* data = AllocateUninitialized(length == 0 ? 1 : length);
- return data == NULL ? data : memset(data, 0, length);
- }
- virtual void* AllocateUninitialized(size_t length) {
- return malloc(length == 0 ? 1 : length);
- }
- virtual void Free(void* data, size_t length) { free(data); }
- // TODO(dslomov): Remove when v8:2823 is fixed.
- virtual void Free(void* data) { UNREACHABLE(); }
-};
-
-
static void SuggestTestHarness(int tests) {
if (tests == 0) return;
printf("Running multiple tests in sequence is deprecated and may cause "
@@ -273,8 +263,8 @@ int main(int argc, char* argv[]) {
v8::V8::RegisterDefaultSignalHandler();
}
- CcTestArrayBufferAllocator array_buffer_allocator;
- CcTest::set_array_buffer_allocator(&array_buffer_allocator);
+ CcTest::set_array_buffer_allocator(
+ v8::ArrayBuffer::Allocator::NewDefaultAllocator());
i::PrintExtension print_extension;
v8::RegisterExtension(&print_extension);
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index 55581ea404..cf30741769 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -32,7 +32,6 @@
'v8_code': 1,
'generated_file': '<(SHARED_INTERMEDIATE_DIR)/resources.cc',
'cctest_sources': [ ### gcmole(all) ###
- 'asmjs/test-asm-typer.cc',
'ast-types-fuzz.h',
'compiler/c-signature.h',
'compiler/call-tester.h',
@@ -337,6 +336,7 @@
],
'sources': [
'../common/wasm/test-signatures.h',
+ '../common/wasm/wasm-macro-gen.h',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
'<@(cctest_sources)',
@@ -444,6 +444,9 @@
}, {
'dependencies': ['../../src/v8.gyp:v8'],
}],
+ ['v8_use_snapshot=="true"', {
+ 'dependencies': ['../../src/v8.gyp:v8_builtins_generators'],
+ }],
],
},
{
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 7ff55446b1..8ef5d23418 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -126,6 +126,7 @@ class CcTest {
static i::Heap* heap();
static void CollectGarbage(i::AllocationSpace space);
+ static void CollectAllGarbage();
static void CollectAllGarbage(int flags);
static void CollectAllAvailableGarbage();
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 60cc1a444c..b2a21a33a3 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -81,9 +81,13 @@
'test-cpu-profiler/SampleWhenFrameIsNotSetup': [SKIP],
# BUG(v8:5193): Flaky.
+ 'test-cpu-profiler/CollectDeoptEvents': [SKIP],
'test-cpu-profiler/FunctionApplySample': [SKIP],
'test-cpu-profiler/JsNative1JsNative2JsSample': [SKIP],
'test-cpu-profiler/JsNativeJsRuntimeJsSample': [SKIP],
+ 'test-cpu-profiler/JsNativeJsRuntimeJsSampleMultiple': [SKIP],
+ 'test-cpu-profiler/JsNativeJsSample': [SKIP],
+ 'test-sampler/LibSamplerCollectSample': [SKIP],
# BUG(2340). Preprocessing stack traces is disabled at the moment.
'test-heap/PreprocessStackTrace': [FAIL],
@@ -140,14 +144,14 @@
' test-api/LoadICFastApi_DirectCall_GCMoveStubWithProfiler': [SKIP],
}], # 'arch == arm64'
-['arch == arm64 and simulator_run == True', {
+['arch == arm64 and simulator_run', {
# Pass but take too long with the simulator.
'test-api/ExternalArrays': [PASS, TIMEOUT],
'test-api/Threading*': [PASS, SKIP],
-}], # 'arch == arm64 and simulator_run == True'
+}], # 'arch == arm64 and simulator_run'
-['arch == arm64 and mode == debug and simulator_run == True', {
+['arch == arm64 and mode == debug and simulator_run', {
# Pass but take too long with the simulator in debug mode.
'test-api/ExternalDoubleArray': [SKIP],
@@ -159,7 +163,7 @@
# BUG(v8:5193): Flaky crash.
'test-sampler/LibSamplerCollectSample': [SKIP],
-}], # 'arch == arm64 and mode == debug and simulator_run == True'
+}], # 'arch == arm64 and mode == debug and simulator_run'
##############################################################################
['asan == True', {
@@ -243,6 +247,17 @@
}], # 'system == windows'
##############################################################################
+['system == windows and arch == x64 and mode == debug', {
+ # BUG(v8:6328).
+ 'test-serialize/PartialSerializerCustomContext': [SKIP],
+ 'test-serialize/PartialSerializerObject': [SKIP],
+ 'test-serialize/StartupSerializerOnce': [SKIP],
+ 'test-serialize/StartupSerializerOnceRunScript': [SKIP],
+ 'test-serialize/StartupSerializerTwiceRunScript': [SKIP],
+ 'test-serialize/StartupSerializerTwice': [SKIP],
+}], # 'system == windows and arch == x64 and mode == debug'
+
+##############################################################################
['system == macos', {
# BUG(v8:5193) Flaky failures.
'test-cpu-profiler/JsNativeJsSample': [SKIP],
@@ -250,6 +265,13 @@
}], # 'system == macos'
##############################################################################
+['arch == arm and simulator_run', {
+
+ # Pass but take too long with the simulator.
+ 'test-api/Threading*': [PASS, SKIP],
+}], # 'arch == arm and simulator_run'
+
+##############################################################################
['arch == arm', {
'test-cpu-profiler/CollectDeoptEvents': [PASS, FAIL],
@@ -291,6 +313,9 @@
'test-parsing/TooManyArguments': [SKIP],
'test-api/Threading5': [SKIP],
'test-api/Threading6': [SKIP],
+
+ # BUG(v8:5193): Flaky timeout.
+ 'test-cpu-profiler/JsNativeJsRuntimeJsSampleMultiple': [SKIP],
}], # 'arch == mips'
##############################################################################
@@ -311,6 +336,16 @@
}], # 'arch == mips64el or arch == mips64'
##############################################################################
+['arch == mips or arch == mipsel or arch == mips64 or arch == mips64el', {
+ # For now skip WASM SIMD tests that fail when MSA instr. extension is not
+ # available (currently simd-scalar-lowering mechanism doesn't work properly
+ # for all SIMD operations)
+ 'test-run-wasm-simd/RunWasmCompiled_S32x4Select': [SKIP],
+ 'test-run-wasm-simd/RunWasmCompiled_F32x4RecipSqrtApprox': [SKIP],
+ 'test-run-wasm-simd/RunWasmCompiled_F32x4RecipApprox': [SKIP],
+}], # 'arch == mips or arch == mipsel or arch == mips64 or arch == mips64el'
+
+##############################################################################
['arch == x87', {
'test-run-machops/RunFloat64InsertLowWord32': [SKIP],
'test-run-native-calls/MixedParams_0': [SKIP],
@@ -360,17 +395,19 @@
'test-run-wasm-module/Run_WasmModule_Return114' : [SKIP],
'test-run-wasm-module/Run_WasmModule_CallAdd' : [SKIP],
'test-run-wasm-module/Run_WasmModule_CallMain_recursive' : [SKIP],
+ # TODO(ppc): Implement load/store reverse byte instructions
+ 'test-run-wasm-simd/RunWasmCompiled_SimdLoadStoreLoad': [SKIP],
}], # 'system == aix or (arch == ppc64 and byteorder == big)'
##############################################################################
-['arch == ppc and simulator_run == True or arch == ppc64 and simulator_run == True', {
+['arch == ppc and simulator_run or arch == ppc64 and simulator_run', {
# Pass but take too long with the simulator.
'test-api/Threading*': [PASS, SLOW],
'test-api/ExternalArrays': [PASS, SLOW],
-}], # 'arch == ppc64 and simulator_run == True'
+}], # 'arch == ppc64 and simulator_run'
##############################################################################
['variant == turbofan_opt', {
@@ -385,6 +422,13 @@
}], # variant == turbofan
##############################################################################
+['variant == noturbofan and no_snap', {
+ # Too slow for old pipeline and nosnap.
+ 'test-lockers/SeparateIsolatesLocksNonexclusive': [SKIP],
+ 'test-lockers/ExtensionsRegistration': [SKIP],
+}], # variant == noturbofan and no_snap
+
+##############################################################################
['variant == wasm_traps', {
'test-accessors/*': [SKIP],
'test-api-interceptors/*': [SKIP],
diff --git a/deps/v8/test/cctest/compiler/call-tester.h b/deps/v8/test/cctest/compiler/call-tester.h
index 77d2ce1e95..06fb1b5202 100644
--- a/deps/v8/test/cctest/compiler/call-tester.h
+++ b/deps/v8/test/cctest/compiler/call-tester.h
@@ -5,6 +5,7 @@
#ifndef V8_CCTEST_COMPILER_CALL_TESTER_H_
#define V8_CCTEST_COMPILER_CALL_TESTER_H_
+#include "src/handles.h"
#include "src/simulator.h"
#include "test/cctest/compiler/c-signature.h"
@@ -127,45 +128,11 @@ class CallHelper {
}
virtual ~CallHelper() {}
- R Call() {
- typedef R V8_CDECL FType();
- CSignature::VerifyParams(csig_);
- return DoCall(FUNCTION_CAST<FType*>(Generate()));
- }
-
- template <typename P1>
- R Call(P1 p1) {
- typedef R V8_CDECL FType(P1);
- CSignature::VerifyParams<P1>(csig_);
- return DoCall(FUNCTION_CAST<FType*>(Generate()), p1);
- }
-
- template <typename P1, typename P2>
- R Call(P1 p1, P2 p2) {
- typedef R V8_CDECL FType(P1, P2);
- CSignature::VerifyParams<P1, P2>(csig_);
- return DoCall(FUNCTION_CAST<FType*>(Generate()), p1, p2);
- }
-
- template <typename P1, typename P2, typename P3>
- R Call(P1 p1, P2 p2, P3 p3) {
- typedef R V8_CDECL FType(P1, P2, P3);
- CSignature::VerifyParams<P1, P2, P3>(csig_);
- return DoCall(FUNCTION_CAST<FType*>(Generate()), p1, p2, p3);
- }
-
- template <typename P1, typename P2, typename P3, typename P4>
- R Call(P1 p1, P2 p2, P3 p3, P4 p4) {
- typedef R V8_CDECL FType(P1, P2, P3, P4);
- CSignature::VerifyParams<P1, P2, P3, P4>(csig_);
- return DoCall(FUNCTION_CAST<FType*>(Generate()), p1, p2, p3, p4);
- }
-
- template <typename P1, typename P2, typename P3, typename P4, typename P5>
- R Call(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
- typedef R V8_CDECL FType(P1, P2, P3, P4, P5);
- CSignature::VerifyParams<P1, P2, P3, P4, P5>(csig_);
- return DoCall(FUNCTION_CAST<FType*>(Generate()), p1, p2, p3, p4, p5);
+ template <typename... Params>
+ R Call(Params... args) {
+ using FType = R(V8_CDECL*)(Params...);
+ CSignature::VerifyParams<Params...>(csig_);
+ return DoCall(FUNCTION_CAST<FType>(Generate()), args...);
}
protected:
@@ -180,47 +147,11 @@ class CallHelper {
return static_cast<uintptr_t>(simulator->CallInt64(f, args));
}
- template <typename F>
- R DoCall(F* f) {
- Simulator::CallArgument args[] = {Simulator::CallArgument::End()};
- return CastReturnValue<R>(CallSimulator(FUNCTION_ADDR(f), args));
- }
- template <typename F, typename P1>
- R DoCall(F* f, P1 p1) {
- Simulator::CallArgument args[] = {Simulator::CallArgument(p1),
- Simulator::CallArgument::End()};
- return CastReturnValue<R>(CallSimulator(FUNCTION_ADDR(f), args));
- }
- template <typename F, typename P1, typename P2>
- R DoCall(F* f, P1 p1, P2 p2) {
- Simulator::CallArgument args[] = {Simulator::CallArgument(p1),
- Simulator::CallArgument(p2),
- Simulator::CallArgument::End()};
- return CastReturnValue<R>(CallSimulator(FUNCTION_ADDR(f), args));
- }
- template <typename F, typename P1, typename P2, typename P3>
- R DoCall(F* f, P1 p1, P2 p2, P3 p3) {
- Simulator::CallArgument args[] = {
- Simulator::CallArgument(p1), Simulator::CallArgument(p2),
- Simulator::CallArgument(p3), Simulator::CallArgument::End()};
- return CastReturnValue<R>(CallSimulator(FUNCTION_ADDR(f), args));
- }
- template <typename F, typename P1, typename P2, typename P3, typename P4>
- R DoCall(F* f, P1 p1, P2 p2, P3 p3, P4 p4) {
- Simulator::CallArgument args[] = {
- Simulator::CallArgument(p1), Simulator::CallArgument(p2),
- Simulator::CallArgument(p3), Simulator::CallArgument(p4),
- Simulator::CallArgument::End()};
- return CastReturnValue<R>(CallSimulator(FUNCTION_ADDR(f), args));
- }
- template <typename F, typename P1, typename P2, typename P3, typename P4,
- typename P5>
- R DoCall(F* f, P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
- Simulator::CallArgument args[] = {
- Simulator::CallArgument(p1), Simulator::CallArgument(p2),
- Simulator::CallArgument(p3), Simulator::CallArgument(p4),
- Simulator::CallArgument(p5), Simulator::CallArgument::End()};
- return CastReturnValue<R>(CallSimulator(FUNCTION_ADDR(f), args));
+ template <typename F, typename... Params>
+ R DoCall(F* f, Params... args) {
+ Simulator::CallArgument args_arr[] = {Simulator::CallArgument(args)...,
+ Simulator::CallArgument::End()};
+ return CastReturnValue<R>(CallSimulator(FUNCTION_ADDR(f), args_arr));
}
#elif USE_SIMULATOR && \
(V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390X)
@@ -230,42 +161,10 @@ class CallHelper {
return static_cast<uintptr_t>(simulator->Call(f, 5, p1, p2, p3, p4, p5));
}
-
- template <typename F>
- R DoCall(F* f) {
- return CastReturnValue<R>(CallSimulator(FUNCTION_ADDR(f)));
- }
- template <typename F, typename P1>
- R DoCall(F* f, P1 p1) {
- return CastReturnValue<R>(
- CallSimulator(FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1)));
- }
- template <typename F, typename P1, typename P2>
- R DoCall(F* f, P1 p1, P2 p2) {
- return CastReturnValue<R>(CallSimulator(FUNCTION_ADDR(f),
- ParameterTraits<P1>::Cast(p1),
- ParameterTraits<P2>::Cast(p2)));
- }
- template <typename F, typename P1, typename P2, typename P3>
- R DoCall(F* f, P1 p1, P2 p2, P3 p3) {
- return CastReturnValue<R>(CallSimulator(
- FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
- ParameterTraits<P2>::Cast(p2), ParameterTraits<P3>::Cast(p3)));
- }
- template <typename F, typename P1, typename P2, typename P3, typename P4>
- R DoCall(F* f, P1 p1, P2 p2, P3 p3, P4 p4) {
- return CastReturnValue<R>(CallSimulator(
- FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
- ParameterTraits<P2>::Cast(p2), ParameterTraits<P3>::Cast(p3),
- ParameterTraits<P4>::Cast(p4)));
- }
- template <typename F, typename P1, typename P2, typename P3, typename P4,
- typename P5>
- R DoCall(F* f, P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
+ template <typename F, typename... Params>
+ R DoCall(F* f, Params... args) {
return CastReturnValue<R>(CallSimulator(
- FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
- ParameterTraits<P2>::Cast(p2), ParameterTraits<P3>::Cast(p3),
- ParameterTraits<P4>::Cast(p4), ParameterTraits<P5>::Cast(p5)));
+ FUNCTION_ADDR(f), ParameterTraits<Params>::Cast(args)...));
}
#elif USE_SIMULATOR && (V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390)
@@ -274,67 +173,16 @@ class CallHelper {
Simulator* simulator = Simulator::current(isolate_);
return static_cast<uintptr_t>(simulator->Call(f, 5, p1, p2, p3, p4, p5));
}
- template <typename F>
- R DoCall(F* f) {
- return CastReturnValue<R>(CallSimulator(FUNCTION_ADDR(f)));
- }
- template <typename F, typename P1>
- R DoCall(F* f, P1 p1) {
- return CastReturnValue<R>(
- CallSimulator(FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1)));
- }
- template <typename F, typename P1, typename P2>
- R DoCall(F* f, P1 p1, P2 p2) {
- return CastReturnValue<R>(CallSimulator(FUNCTION_ADDR(f),
- ParameterTraits<P1>::Cast(p1),
- ParameterTraits<P2>::Cast(p2)));
- }
- template <typename F, typename P1, typename P2, typename P3>
- R DoCall(F* f, P1 p1, P2 p2, P3 p3) {
- return CastReturnValue<R>(CallSimulator(
- FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
- ParameterTraits<P2>::Cast(p2), ParameterTraits<P3>::Cast(p3)));
- }
- template <typename F, typename P1, typename P2, typename P3, typename P4>
- R DoCall(F* f, P1 p1, P2 p2, P3 p3, P4 p4) {
- return CastReturnValue<R>(CallSimulator(
- FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
- ParameterTraits<P2>::Cast(p2), ParameterTraits<P3>::Cast(p3),
- ParameterTraits<P4>::Cast(p4)));
- }
- template <typename F, typename P1, typename P2, typename P3, typename P4,
- typename P5>
- R DoCall(F* f, P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
+
+ template <typename F, typename... Params>
+ R DoCall(F* f, Params... args) {
return CastReturnValue<R>(CallSimulator(
- FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
- ParameterTraits<P2>::Cast(p2), ParameterTraits<P3>::Cast(p3),
- ParameterTraits<P4>::Cast(p4), ParameterTraits<P5>::Cast(p5)));
+ FUNCTION_ADDR(f), ParameterTraits<Params>::Cast(args)...));
}
#else
- template <typename F>
- R DoCall(F* f) {
- return f();
- }
- template <typename F, typename P1>
- R DoCall(F* f, P1 p1) {
- return f(p1);
- }
- template <typename F, typename P1, typename P2>
- R DoCall(F* f, P1 p1, P2 p2) {
- return f(p1, p2);
- }
- template <typename F, typename P1, typename P2, typename P3>
- R DoCall(F* f, P1 p1, P2 p2, P3 p3) {
- return f(p1, p2, p3);
- }
- template <typename F, typename P1, typename P2, typename P3, typename P4>
- R DoCall(F* f, P1 p1, P2 p2, P3 p3, P4 p4) {
- return f(p1, p2, p3, p4);
- }
- template <typename F, typename P1, typename P2, typename P3, typename P4,
- typename P5>
- R DoCall(F* f, P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
- return f(p1, p2, p3, p4, p5);
+ template <typename F, typename... Params>
+ R DoCall(F* f, Params... args) {
+ return f(args...);
}
#endif
diff --git a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
index 01cd82c9df..ba4f7638f2 100644
--- a/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
+++ b/deps/v8/test/cctest/compiler/test-js-context-specialization.cc
@@ -21,7 +21,7 @@ namespace compiler {
class ContextSpecializationTester : public HandleAndZoneScope {
public:
- explicit ContextSpecializationTester(MaybeHandle<Context> context)
+ explicit ContextSpecializationTester(Maybe<OuterContext> context)
: graph_(new (main_zone()) Graph(main_zone())),
common_(main_zone()),
javascript_(main_zone()),
@@ -103,7 +103,7 @@ void ContextSpecializationTester::CheckContextInputAndDepthChanges(
static const int slot_index = Context::NATIVE_CONTEXT_INDEX;
TEST(ReduceJSLoadContext0) {
- ContextSpecializationTester t((MaybeHandle<Context>()));
+ ContextSpecializationTester t(Nothing<OuterContext>());
Node* start = t.graph()->NewNode(t.common()->Start(0));
t.graph()->SetStart(start);
@@ -174,7 +174,7 @@ TEST(ReduceJSLoadContext1) {
//
// context2 <-- context1 <-- context0 (= Parameter(0))
- ContextSpecializationTester t((MaybeHandle<Context>()));
+ ContextSpecializationTester t(Nothing<OuterContext>());
Node* start = t.graph()->NewNode(t.common()->Start(0));
t.graph()->SetStart(start);
@@ -239,12 +239,12 @@ TEST(ReduceJSLoadContext1) {
TEST(ReduceJSLoadContext2) {
// The graph's context chain ends in a constant context (context_object1),
- // which has has another outer context (context_object0).
+ // which has another outer context (context_object0).
//
// context2 <-- context1 <-- context0 (= HeapConstant(context_object1))
// context_object1 <~~ context_object0
- ContextSpecializationTester t((MaybeHandle<Context>()));
+ ContextSpecializationTester t(Nothing<OuterContext>());
Node* start = t.graph()->NewNode(t.common()->Start(0));
t.graph()->SetStart(start);
@@ -335,7 +335,7 @@ TEST(ReduceJSLoadContext3) {
context_object0->set(slot_index, *slot_value0);
context_object1->set(slot_index, *slot_value1);
- ContextSpecializationTester t(context_object1);
+ ContextSpecializationTester t(Just(OuterContext(context_object1, 0)));
Node* start = t.graph()->NewNode(t.common()->Start(2));
t.graph()->SetStart(start);
@@ -399,7 +399,7 @@ TEST(ReduceJSLoadContext3) {
}
TEST(ReduceJSStoreContext0) {
- ContextSpecializationTester t((MaybeHandle<Context>()));
+ ContextSpecializationTester t(Nothing<OuterContext>());
Node* start = t.graph()->NewNode(t.common()->Start(0));
t.graph()->SetStart(start);
@@ -461,7 +461,7 @@ TEST(ReduceJSStoreContext0) {
}
TEST(ReduceJSStoreContext1) {
- ContextSpecializationTester t((MaybeHandle<Context>()));
+ ContextSpecializationTester t(Nothing<OuterContext>());
Node* start = t.graph()->NewNode(t.common()->Start(0));
t.graph()->SetStart(start);
@@ -505,7 +505,7 @@ TEST(ReduceJSStoreContext1) {
}
TEST(ReduceJSStoreContext2) {
- ContextSpecializationTester t((MaybeHandle<Context>()));
+ ContextSpecializationTester t(Nothing<OuterContext>());
Node* start = t.graph()->NewNode(t.common()->Start(0));
t.graph()->SetStart(start);
@@ -570,7 +570,7 @@ TEST(ReduceJSStoreContext3) {
context_object0->set(slot_index, *slot_value0);
context_object1->set(slot_index, *slot_value1);
- ContextSpecializationTester t(context_object1);
+ ContextSpecializationTester t(Just(OuterContext(context_object1, 0)));
Node* start = t.graph()->NewNode(t.common()->Start(2));
t.graph()->SetStart(start);
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index b5eeb8717e..c694d8aa1d 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -6163,6 +6163,11 @@ int32_t foo8(int32_t a, int32_t b, int32_t c, int32_t d, int32_t e, int32_t f,
return a + b + c + d + e + f + g + h;
}
+int32_t foo9(int32_t a, int32_t b, int32_t c, int32_t d, int32_t e, int32_t f,
+ int32_t g, int32_t h, int32_t i) {
+ return a + b + c + d + e + f + g + h + i;
+}
+
} // namespace
@@ -6221,6 +6226,30 @@ TEST(RunCallCFunction8) {
CHECK_EQ(x * 8, m.Call(x));
}
}
+
+TEST(RunCallCFunction9) {
+ auto* foo9_ptr = &foo9;
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
+ Node* function = m.LoadFromPointer(&foo9_ptr, MachineType::Pointer());
+ Node* param = m.Parameter(0);
+ m.Return(m.CallCFunction9(
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), function, param,
+ m.Int32Add(param, m.Int32Constant(1)),
+ m.Int32Add(param, m.Int32Constant(2)),
+ m.Int32Add(param, m.Int32Constant(3)),
+ m.Int32Add(param, m.Int32Constant(4)),
+ m.Int32Add(param, m.Int32Constant(5)),
+ m.Int32Add(param, m.Int32Constant(6)),
+ m.Int32Add(param, m.Int32Constant(7)),
+ m.Int32Add(param, m.Int32Constant(8))));
+ FOR_INT32_INPUTS(i) {
+ int32_t const x = *i;
+ CHECK_EQ(x * 9 + 36, m.Call(x));
+ }
+}
#endif // USE_SIMULATOR
#if V8_TARGET_ARCH_64_BIT
@@ -6710,6 +6739,34 @@ TEST(ParentFramePointer) {
CHECK_EQ(1, r.Call(1));
}
+#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
+
+TEST(StackSlotAlignment) {
+ RawMachineAssemblerTester<int32_t> r;
+ RawMachineLabel tlabel;
+ RawMachineLabel flabel;
+ RawMachineLabel merge;
+
+ int alignments[] = {4, 8, 16};
+ int alignment_count = arraysize(alignments);
+
+ Node* alignment_counter = r.Int32Constant(0);
+ for (int i = 0; i < alignment_count; i++) {
+ for (int j = 0; j < 5; j++) {
+ Node* stack_slot =
+ r.StackSlot(MachineRepresentation::kWord32, alignments[i]);
+ alignment_counter = r.Int32Add(
+ alignment_counter,
+ r.Word32And(stack_slot, r.Int32Constant(alignments[i] - 1)));
+ }
+ }
+
+ r.Return(alignment_counter);
+ CHECK_EQ(0, r.Call());
+}
+
+#endif // V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
+
#if V8_TARGET_ARCH_64_BIT
TEST(Regression5923) {
@@ -6778,6 +6835,27 @@ TEST(Regression6028) {
CHECK_EQ(1, m.Call());
}
+TEST(Regression5951_32bit) {
+ BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Int32());
+ m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(0)),
+ m.Int32Constant(0xffffffff)));
+ int32_t input = 1234;
+ CHECK_EQ(input, m.Call(input));
+}
+
+TEST(Regression738952) {
+ RawMachineAssemblerTester<int32_t> m;
+
+ int32_t sentinel = 1234;
+ // The index can be any value where the lower bits are 0 and the upper bits
+ // are not 0;
+ int64_t index = 3224;
+ index <<= 32;
+ double d = static_cast<double>(index);
+ m.Return(m.Load(MachineType::Int32(), m.PointerConstant(&sentinel),
+ m.TruncateFloat64ToWord32(m.Float64Constant(d))));
+ CHECK_EQ(sentinel, m.Call());
+}
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
index 11bc4c0727..04cc881b25 100644
--- a/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
+++ b/deps/v8/test/cctest/heap/test-array-buffer-tracker.cc
@@ -132,7 +132,7 @@ TEST(ArrayBuffer_Compaction) {
heap::ForceEvacuationCandidate(page_before_gc);
CHECK(IsTracked(*buf1));
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
Page* page_after_gc = Page::FromAddress(buf1->address());
CHECK(IsTracked(*buf1));
@@ -310,6 +310,12 @@ UNINITIALIZED_TEST(ArrayBuffer_SemiSpaceCopyMultipleTasks) {
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
+ // Ensure heap is in a clean state.
+ heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
+ GarbageCollectionReason::kTesting);
+ heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
+ GarbageCollectionReason::kTesting);
+
Local<v8::ArrayBuffer> ab1 = v8::ArrayBuffer::New(isolate, 100);
Handle<JSArrayBuffer> buf1 = v8::Utils::OpenHandle(*ab1);
heap::FillCurrentPage(heap->new_space());
diff --git a/deps/v8/test/cctest/heap/test-compaction.cc b/deps/v8/test/cctest/heap/test-compaction.cc
index b7780fa409..64e7a09255 100644
--- a/deps/v8/test/cctest/heap/test-compaction.cc
+++ b/deps/v8/test/cctest/heap/test-compaction.cc
@@ -68,7 +68,7 @@ HEAP_TEST(CompactionFullAbortedPage) {
CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page);
heap->set_force_oom(true);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
heap->mark_compact_collector()->EnsureSweepingCompleted();
// Check that all handles still point to the same page, i.e., compaction
@@ -128,7 +128,7 @@ HEAP_TEST(CompactionPartiallyAbortedPage) {
Page::FromAddress(page_to_fill_handles.front()->address());
heap->set_force_oom(true);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
heap->mark_compact_collector()->EnsureSweepingCompleted();
bool migration_aborted = false;
@@ -210,7 +210,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
Page::FromAddress(page_to_fill_handles.front()->address());
heap->set_force_oom(true);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
heap->mark_compact_collector()->EnsureSweepingCompleted();
// The following check makes sure that we compacted "some" objects, while
@@ -303,7 +303,7 @@ HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
Page::FromAddress(page_to_fill_handles.front()->address());
heap->set_force_oom(true);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
heap->mark_compact_collector()->EnsureSweepingCompleted();
// The following check makes sure that we compacted "some" objects, while
diff --git a/deps/v8/test/cctest/heap/test-concurrent-marking.cc b/deps/v8/test/cctest/heap/test-concurrent-marking.cc
index ce52018e29..03ee6c4b8c 100644
--- a/deps/v8/test/cctest/heap/test-concurrent-marking.cc
+++ b/deps/v8/test/cctest/heap/test-concurrent-marking.cc
@@ -18,8 +18,9 @@ TEST(ConcurrentMarking) {
if (!i::FLAG_concurrent_marking) return;
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
- ConcurrentMarking* concurrent_marking = new ConcurrentMarking(heap);
- concurrent_marking->AddRoot(heap->undefined_value());
+ ConcurrentMarkingDeque deque(heap);
+ deque.Push(heap->undefined_value());
+ ConcurrentMarking* concurrent_marking = new ConcurrentMarking(heap, &deque);
concurrent_marking->StartTask();
concurrent_marking->WaitForTaskToComplete();
delete concurrent_marking;
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index 6446adba6a..fc2b68e809 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -529,6 +529,129 @@ TEST(WeakGlobalHandlesScavenge) {
GlobalHandles::Destroy(h2.location());
}
+TEST(WeakGlobalUnmodifiedApiHandlesScavenge) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ LocalContext context;
+ Factory* factory = isolate->factory();
+ GlobalHandles* global_handles = isolate->global_handles();
+
+ WeakPointerCleared = false;
+
+ Handle<Object> h1;
+ Handle<Object> h2;
+
+ {
+ HandleScope scope(isolate);
+
+ // Create an Api object that is unmodified.
+ auto function = FunctionTemplate::New(context->GetIsolate())
+ ->GetFunction(context.local())
+ .ToLocalChecked();
+ auto i = function->NewInstance(context.local()).ToLocalChecked();
+ Handle<Object> u = factory->NewNumber(1.12344);
+
+ h1 = global_handles->Create(*u);
+ h2 = global_handles->Create(*(reinterpret_cast<internal::Object**>(*i)));
+ }
+
+ std::pair<Handle<Object>*, int> handle_and_id(&h2, 1234);
+ GlobalHandles::MakeWeak(
+ h2.location(), reinterpret_cast<void*>(&handle_and_id),
+ &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter);
+
+ CcTest::CollectGarbage(NEW_SPACE);
+
+ CHECK((*h1)->IsHeapNumber());
+ CHECK(WeakPointerCleared);
+ CHECK(!global_handles->IsNearDeath(h1.location()));
+
+ GlobalHandles::Destroy(h1.location());
+}
+
+TEST(WeakGlobalApiHandleModifiedMapScavenge) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ LocalContext context;
+ GlobalHandles* global_handles = isolate->global_handles();
+
+ WeakPointerCleared = false;
+
+ Handle<Object> h1;
+
+ {
+ HandleScope scope(isolate);
+
+ // Create an API object which does not have the same map as constructor.
+ auto function_template = FunctionTemplate::New(context->GetIsolate());
+ auto instance_t = function_template->InstanceTemplate();
+ instance_t->Set(v8::String::NewFromUtf8(context->GetIsolate(), "a",
+ NewStringType::kNormal)
+ .ToLocalChecked(),
+ v8::Number::New(context->GetIsolate(), 10));
+ auto function =
+ function_template->GetFunction(context.local()).ToLocalChecked();
+ auto i = function->NewInstance(context.local()).ToLocalChecked();
+
+ h1 = global_handles->Create(*(reinterpret_cast<internal::Object**>(*i)));
+ }
+
+ std::pair<Handle<Object>*, int> handle_and_id(&h1, 1234);
+ GlobalHandles::MakeWeak(
+ h1.location(), reinterpret_cast<void*>(&handle_and_id),
+ &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter);
+
+ CcTest::CollectGarbage(NEW_SPACE);
+
+ CHECK(!WeakPointerCleared);
+ CHECK(!global_handles->IsNearDeath(h1.location()));
+
+ GlobalHandles::Destroy(h1.location());
+}
+
+TEST(WeakGlobalApiHandleWithElementsScavenge) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ LocalContext context;
+ GlobalHandles* global_handles = isolate->global_handles();
+
+ WeakPointerCleared = false;
+
+ Handle<Object> h1;
+
+ {
+ HandleScope scope(isolate);
+
+ // Create an API object which has elements.
+ auto function_template = FunctionTemplate::New(context->GetIsolate());
+ auto instance_t = function_template->InstanceTemplate();
+ instance_t->Set(v8::String::NewFromUtf8(context->GetIsolate(), "1",
+ NewStringType::kNormal)
+ .ToLocalChecked(),
+ v8::Number::New(context->GetIsolate(), 10));
+ instance_t->Set(v8::String::NewFromUtf8(context->GetIsolate(), "2",
+ NewStringType::kNormal)
+ .ToLocalChecked(),
+ v8::Number::New(context->GetIsolate(), 10));
+ auto function =
+ function_template->GetFunction(context.local()).ToLocalChecked();
+ auto i = function->NewInstance(context.local()).ToLocalChecked();
+
+ h1 = global_handles->Create(*(reinterpret_cast<internal::Object**>(*i)));
+ }
+
+ std::pair<Handle<Object>*, int> handle_and_id(&h1, 1234);
+ GlobalHandles::MakeWeak(
+ h1.location(), reinterpret_cast<void*>(&handle_and_id),
+ &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter);
+
+ CcTest::CollectGarbage(NEW_SPACE);
+
+ CHECK(!WeakPointerCleared);
+ CHECK(!global_handles->IsNearDeath(h1.location()));
+
+ GlobalHandles::Destroy(h1.location());
+}
TEST(WeakGlobalHandlesMark) {
CcTest::InitializeVM();
@@ -565,7 +688,7 @@ TEST(WeakGlobalHandlesMark) {
CHECK(!GlobalHandles::IsNearDeath(h2.location()));
// Incremental marking potentially marked handles before they turned weak.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK((*h1)->IsString());
@@ -653,7 +776,7 @@ TEST(BytecodeArray) {
// evacuation candidate.
Page* evac_page = Page::FromAddress(constant_pool->address());
heap::ForceEvacuationCandidate(evac_page);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// BytecodeArray should survive.
CHECK_EQ(array->length(), kRawBytesSize);
@@ -1184,12 +1307,12 @@ TEST(TestCodeFlushingPreAged) {
CHECK(function->shared()->is_compiled());
// The code has been run so will survive at least one GC.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(function->shared()->is_compiled());
// The code was only run once, so it should be pre-aged and collected on the
// next GC.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(!function->shared()->is_compiled() || function->IsOptimized() ||
function->IsInterpreted());
@@ -1200,14 +1323,14 @@ TEST(TestCodeFlushingPreAged) {
}
// The code will survive at least two GC now that it is young again.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
CHECK(function->shared()->is_compiled());
// Simulate several GCs that use full marking.
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
// foo should no longer be in the compilation cache
@@ -1253,15 +1376,15 @@ TEST(TestCodeFlushingIncremental) {
CHECK(function->shared()->is_compiled());
// The code will survive at least two GCs.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
CHECK(function->shared()->is_compiled());
// Simulate several GCs that use incremental marking.
const int kAgingThreshold = 6;
for (int i = 0; i < kAgingThreshold; i++) {
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
CHECK(!function->shared()->is_compiled() || function->IsOptimized() ||
function->IsInterpreted());
@@ -1279,7 +1402,7 @@ TEST(TestCodeFlushingIncremental) {
heap::SimulateIncrementalMarking(CcTest::heap());
if (!function->next_function_link()->IsUndefined(CcTest::i_isolate()))
break;
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
// Force optimization while incremental marking is active and while
@@ -1289,7 +1412,7 @@ TEST(TestCodeFlushingIncremental) {
}
// Simulate one final GC to make sure the candidate queue is sane.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(function->shared()->is_compiled() || !function->IsOptimized());
CHECK(function->is_compiled() || !function->IsOptimized());
}
@@ -1319,7 +1442,7 @@ TEST(TestCodeFlushingIncrementalScavenge) {
Handle<String> bar_name = factory->InternalizeUtf8String("bar");
// Perfrom one initial GC to enable code flushing.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// This compile will add the code to the compilation cache.
{ v8::HandleScope scope(CcTest::isolate());
@@ -1359,7 +1482,7 @@ TEST(TestCodeFlushingIncrementalScavenge) {
CcTest::CollectGarbage(NEW_SPACE);
// Simulate one final GC to make sure the candidate queue is sane.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(!function->shared()->is_compiled() || function->IsOptimized() ||
function->IsInterpreted());
CHECK(!function->is_compiled() || function->IsOptimized() ||
@@ -1399,8 +1522,8 @@ TEST(TestCodeFlushingIncrementalAbort) {
CHECK(function->shared()->is_compiled());
// The code will survive at least two GCs.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
CHECK(function->shared()->is_compiled());
// Bump the code age so that flushing is triggered.
@@ -1429,7 +1552,7 @@ TEST(TestCodeFlushingIncrementalAbort) {
}
// Simulate one final GC to make sure the candidate queue is sane.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(function->shared()->is_compiled() || !function->IsOptimized());
CHECK(function->is_compiled() || !function->IsOptimized());
}
@@ -1515,7 +1638,7 @@ TEST(CompilationCacheCachingBehavior) {
// (Unless --optimize-for-size, in which case it might get collected
// immediately.)
if (!FLAG_optimize_for_size) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
pair = compilation_cache->LookupScript(source, Handle<Object>(), 0, 0,
v8::ScriptOriginOptions(true, false),
native_context, language_mode);
@@ -1531,7 +1654,7 @@ TEST(CompilationCacheCachingBehavior) {
}
}
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Ensure code aging cleared the entry from the cache.
pair = compilation_cache->LookupScript(source, Handle<Object>(), 0, 0,
v8::ScriptOriginOptions(true, false),
@@ -1595,7 +1718,7 @@ TEST(TestInternalWeakLists) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
v8::Local<v8::Context> ctx[kNumTestContexts];
- if (!isolate->use_crankshaft()) return;
+ if (!isolate->use_optimizer()) return;
CHECK_EQ(0, CountNativeContexts());
@@ -1606,7 +1729,7 @@ TEST(TestInternalWeakLists) {
// Collect garbage that might have been created by one of the
// installed extensions.
isolate->compilation_cache()->Clear();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(i + 1, CountNativeContexts());
@@ -1638,7 +1761,7 @@ TEST(TestInternalWeakLists) {
// Mark compact handles the weak references.
isolate->compilation_cache()->Clear();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i]));
// Get rid of f3 and f5 in the same way.
@@ -1647,14 +1770,14 @@ TEST(TestInternalWeakLists) {
CcTest::CollectGarbage(NEW_SPACE);
CHECK_EQ(4, CountOptimizedUserFunctions(ctx[i]));
}
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i]));
CompileRun("f5=null");
for (int j = 0; j < 10; j++) {
CcTest::CollectGarbage(NEW_SPACE);
CHECK_EQ(3, CountOptimizedUserFunctions(ctx[i]));
}
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(2, CountOptimizedUserFunctions(ctx[i]));
ctx[i]->Exit();
@@ -1662,7 +1785,7 @@ TEST(TestInternalWeakLists) {
// Force compilation cache cleanup.
CcTest::heap()->NotifyContextDisposed(true);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Dispose the native contexts one by one.
for (int i = 0; i < kNumTestContexts; i++) {
@@ -1678,7 +1801,7 @@ TEST(TestInternalWeakLists) {
}
// Mark compact handles the weak references.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(kNumTestContexts - i - 1, CountNativeContexts());
}
@@ -1694,8 +1817,7 @@ static int CountNativeContextsWithGC(Isolate* isolate, int n) {
Handle<Object> object(heap->native_contexts_list(), isolate);
while (!object->IsUndefined(isolate)) {
count++;
- if (count == n)
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ if (count == n) CcTest::CollectAllGarbage();
object =
Handle<Object>(Context::cast(*object)->next_context_link(), isolate);
}
@@ -1738,7 +1860,7 @@ TEST(TestInternalWeakListsTraverseWithGC) {
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
v8::Local<v8::Context> ctx[kNumTestContexts];
- if (!isolate->use_crankshaft()) return;
+ if (!isolate->use_optimizer()) return;
CHECK_EQ(0, CountNativeContexts());
@@ -1810,11 +1932,11 @@ TEST(TestSizeOfRegExpCode) {
int initial_size = static_cast<int>(CcTest::heap()->SizeOfObjects());
CompileRun("'foo'.match(reg_exp_source);");
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
int size_with_regexp = static_cast<int>(CcTest::heap()->SizeOfObjects());
CompileRun("'foo'.match(half_size_reg_exp);");
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
int size_with_optimized_regexp =
static_cast<int>(CcTest::heap()->SizeOfObjects());
@@ -1861,7 +1983,7 @@ HEAP_TEST(TestSizeOfObjects) {
// The heap size should go back to initial size after a full GC, even
// though sweeping didn't finish yet.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Normally sweeping would not be complete here, but no guarantees.
CHECK_EQ(initial_size, static_cast<int>(heap->SizeOfObjects()));
// Waiting for sweeper threads should not change heap size.
@@ -2107,8 +2229,7 @@ TEST(GrowAndShrinkNewSpace) {
}
// Make sure we're in a consistent state to start out.
- CcTest::CollectGarbage(NEW_SPACE);
- CcTest::CollectGarbage(NEW_SPACE);
+ CcTest::CollectAllGarbage();
// Explicitly growing should double the space capacity.
size_t old_capacity, new_capacity;
@@ -2387,7 +2508,7 @@ TEST(InstanceOfStubWriteBarrier) {
#endif
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft()) return;
+ if (!CcTest::i_isolate()->use_optimizer()) return;
if (i::FLAG_force_marking_deque_overflows) return;
v8::HandleScope outer_scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
@@ -2456,7 +2577,7 @@ TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
#endif
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft()) return;
+ if (!CcTest::i_isolate()->use_optimizer()) return;
v8::HandleScope outer_scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
@@ -2485,7 +2606,7 @@ TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
// The following calls will increment CcTest::heap()->global_ic_age().
CcTest::isolate()->ContextDisposedNotification();
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(CcTest::heap()->global_ic_age(), f->shared()->ic_age());
CHECK_EQ(0, f->shared()->opt_count());
@@ -2501,7 +2622,7 @@ TEST(ResetSharedFunctionInfoCountersDuringMarkSweep) {
#endif
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft()) return;
+ if (!CcTest::i_isolate()->use_optimizer()) return;
v8::HandleScope outer_scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
@@ -2527,7 +2648,7 @@ TEST(ResetSharedFunctionInfoCountersDuringMarkSweep) {
// The following two calls will increment CcTest::heap()->global_ic_age().
CcTest::isolate()->ContextDisposedNotification();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(CcTest::heap()->global_ic_age(), f->shared()->ic_age());
CHECK_EQ(0, f->shared()->opt_count());
@@ -2619,7 +2740,7 @@ TEST(IdleNotificationFinishMarking) {
TEST(OptimizedAllocationAlwaysInNewSpace) {
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
@@ -2654,7 +2775,7 @@ TEST(OptimizedPretenuringAllocationFolding) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_expose_gc = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
@@ -2705,7 +2826,7 @@ TEST(OptimizedPretenuringObjectArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_expose_gc = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
@@ -2745,7 +2866,7 @@ TEST(OptimizedPretenuringMixedInObjectProperties) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_expose_gc = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
@@ -2803,7 +2924,7 @@ TEST(OptimizedPretenuringDoubleArrayProperties) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_expose_gc = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
@@ -2843,7 +2964,7 @@ TEST(OptimizedPretenuringdoubleArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_expose_gc = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
@@ -2883,7 +3004,7 @@ TEST(OptimizedPretenuringNestedMixedArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_expose_gc = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
@@ -2933,7 +3054,7 @@ TEST(OptimizedPretenuringNestedObjectLiterals) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_expose_gc = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
@@ -2984,7 +3105,7 @@ TEST(OptimizedPretenuringNestedDoubleLiterals) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_expose_gc = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
@@ -3035,7 +3156,7 @@ TEST(OptimizedPretenuringNestedDoubleLiterals) {
TEST(OptimizedAllocationArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> ctx = CcTest::isolate()->GetCurrentContext();
@@ -3100,7 +3221,7 @@ TEST(Regress1465) {
CHECK_EQ(transitions_count, transitions_before);
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Count number of live transitions after marking. Note that one transition
// is left, because 'o' still holds an instance of one transition target.
@@ -3284,7 +3405,7 @@ TEST(Regress2143a) {
CcTest::heap()->AgeInlineCaches();
// Explicitly request GC to perform final marking step and sweeping.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
Handle<JSReceiver> root = v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
CcTest::global()
@@ -3327,7 +3448,7 @@ TEST(Regress2143b) {
CcTest::heap()->AgeInlineCaches();
// Explicitly request GC to perform final marking step and sweeping.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
Handle<JSReceiver> root = v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(
CcTest::global()
@@ -3344,7 +3465,7 @@ TEST(ReleaseOverReservedPages) {
if (FLAG_never_compact) return;
i::FLAG_trace_gc = true;
// The optimizer can allocate stuff, messing up the test.
- i::FLAG_crankshaft = false;
+ i::FLAG_opt = false;
i::FLAG_always_opt = false;
// Parallel compaction increases fragmentation, depending on how existing
// memory is distributed. Since this is non-deterministic because of
@@ -3378,14 +3499,14 @@ TEST(ReleaseOverReservedPages) {
// Triggering one GC will cause a lot of garbage to be discovered but
// even spread across all allocated pages.
- CcTest::CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_GE(overall_page_count, old_space->CountTotalPages());
// Triggering subsequent GCs should cause at least half of the pages
// to be released to the OS after at most two cycles.
- CcTest::CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_GE(overall_page_count, old_space->CountTotalPages());
- CcTest::CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_GE(overall_page_count, old_space->CountTotalPages() * 2);
// Triggering a last-resort GC should cause all pages to be released to the
@@ -3481,7 +3602,7 @@ TEST(IncrementalMarkingPreservesMonomorphicCallIC) {
CHECK(feedback_vector->Get(feedback_helper.slot(slot2))->IsWeakCell());
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(!WeakCell::cast(feedback_vector->Get(feedback_helper.slot(slot1)))
->cleared());
@@ -3538,7 +3659,7 @@ TEST(IncrementalMarkingPreservesMonomorphicConstructor) {
CHECK(vector->Get(FeedbackSlot(0))->IsWeakCell());
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(vector->Get(FeedbackSlot(0))->IsWeakCell());
}
@@ -3560,7 +3681,7 @@ TEST(IncrementalMarkingPreservesMonomorphicIC) {
CheckVectorIC(f, 0, MONOMORPHIC);
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CheckVectorIC(f, 0, MONOMORPHIC);
}
@@ -3598,7 +3719,7 @@ TEST(IncrementalMarkingPreservesPolymorphicIC) {
// Fire context dispose notification.
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CheckVectorIC(f, 0, POLYMORPHIC);
}
@@ -3637,7 +3758,7 @@ TEST(ContextDisposeDoesntClearPolymorphicIC) {
// Fire context dispose notification.
CcTest::isolate()->ContextDisposedNotification();
heap::SimulateIncrementalMarking(CcTest::heap());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CheckVectorIC(f, 0, POLYMORPHIC);
}
@@ -3769,7 +3890,7 @@ TEST(Regress159140) {
HandleScope scope(isolate);
// Perform one initial GC to enable code flushing.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Prepare several closures that are all eligible for code flushing
// because all reachable ones are not optimized. Make sure that the
@@ -3813,7 +3934,7 @@ TEST(Regress159140) {
// finish the GC to complete code flushing.
heap::SimulateIncrementalMarking(heap);
CompileRun("%OptimizeFunctionOnNextCall(g); g(3);");
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Unoptimized code is missing and the deoptimizer will go ballistic.
CompileRun("g('bozo');");
@@ -3829,7 +3950,7 @@ TEST(Regress165495) {
HandleScope scope(isolate);
// Perform one initial GC to enable code flushing.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Prepare an optimized closure that the optimized code map will get
// populated. Then age the unoptimized code to trigger code flushing
@@ -3859,7 +3980,7 @@ TEST(Regress165495) {
// Simulate incremental marking so that unoptimized code is flushed
// even though it still is cached in the optimized code map.
heap::SimulateIncrementalMarking(heap);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Make a new closure that will get code installed from the code map.
// Unoptimized code is missing and the deoptimizer will go ballistic.
@@ -3881,7 +4002,7 @@ TEST(Regress169209) {
HandleScope scope(isolate);
// Perform one initial GC to enable code flushing.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Prepare a shared function info eligible for code flushing for which
// the unoptimized code will be replaced during optimization.
@@ -3939,14 +4060,14 @@ TEST(Regress169209) {
"g(false);");
// Finish garbage collection cycle.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(shared1->code()->gc_metadata() == NULL);
}
TEST(Regress169928) {
i::FLAG_allow_natives_syntax = true;
- i::FLAG_crankshaft = false;
+ i::FLAG_opt = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
LocalContext env;
@@ -4068,8 +4189,8 @@ TEST(Regress513507) {
heap->set_allocation_timeout(5);
FLAG_gc_interval = 1000;
for (int i = 0; i < 10; ++i) {
- BailoutId id = BailoutId(i);
- SharedFunctionInfo::AddToOptimizedCodeMap(shared, context, code, id);
+ BailoutId id = BailoutId(i + 1);
+ Context::AddToOSROptimizedCodeCache(context, shared, code, id);
}
}
#endif // DEBUG
@@ -4081,12 +4202,12 @@ TEST(Regress513496) {
HandleScope scope(isolate);
// Perfrom one initial GC to enable code flushing.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Prepare an optimized closure with containing an inlined function. Then age
// the inlined unoptimized code to trigger code flushing but make sure the
// outer optimized code is kept in the optimized code map.
- Handle<SharedFunctionInfo> shared;
+ Handle<SharedFunctionInfo> optimized_code;
{
LocalContext context;
HandleScope inner_scope(isolate);
@@ -4114,18 +4235,19 @@ TEST(Regress513496) {
->Get(context.local(), v8_str("f"))
.ToLocalChecked())));
CHECK(f->is_compiled());
- shared = inner_scope.CloseAndEscape(handle(f->shared(), isolate));
+
+ // Lookup the optimized code and keep it alive.
+ Code* result = f->feedback_vector()->optimized_code();
+ Handle<Code> optimized_code(result, isolate);
+ optimized_code = inner_scope.CloseAndEscape(handle(result, isolate));
+
CompileRun("f = null");
}
- // Lookup the optimized code and keep it alive.
- Code* result = shared->SearchOptimizedCodeMap(
- isolate->context()->native_context(), BailoutId::None());
- Handle<Code> optimized_code(result, isolate);
// Finish a full GC cycle so that the unoptimized code of 'g' is flushed even
// though the optimized code for 'f' is reachable via the optimized code map.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Make a new closure that will get code installed from the code map.
// Unoptimized code is missing and the deoptimizer will go ballistic.
@@ -4169,7 +4291,7 @@ TEST(LargeObjectSlotRecording) {
}
// Move the evaucation candidate object.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Verify that the pointers in the large object got updated.
for (int i = 0; i < size; i += kStep) {
@@ -4178,10 +4300,9 @@ TEST(LargeObjectSlotRecording) {
}
}
-
-class DummyVisitor : public ObjectVisitor {
+class DummyVisitor : public RootVisitor {
public:
- void VisitPointers(Object** start, Object** end) override {}
+ void VisitRootPointers(Root root, Object** start, Object** end) override {}
};
@@ -4269,14 +4390,14 @@ static int AllocationSitesCount(Heap* heap) {
TEST(EnsureAllocationSiteDependentCodesProcessed) {
- if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
+ if (i::FLAG_always_opt || !i::FLAG_opt) return;
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
v8::internal::Heap* heap = CcTest::heap();
GlobalHandles* global_handles = isolate->global_handles();
- if (!isolate->use_crankshaft()) return;
+ if (!isolate->use_optimizer()) return;
// The allocation site at the head of the list is ours.
Handle<AllocationSite> site;
@@ -4329,7 +4450,7 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
// Now make sure that a gc should get rid of the function, even though we
// still have the allocation site alive.
for (int i = 0; i < 4; i++) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
// The site still exists because of our global handle, but the code is no
@@ -4340,13 +4461,13 @@ TEST(EnsureAllocationSiteDependentCodesProcessed) {
TEST(CellsInOptimizedCodeAreWeak) {
- if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
+ if (i::FLAG_always_opt || !i::FLAG_opt) return;
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
v8::internal::Heap* heap = CcTest::heap();
- if (!isolate->use_crankshaft()) return;
+ if (!isolate->use_optimizer()) return;
HandleScope outer_scope(heap->isolate());
Handle<Code> code;
{
@@ -4376,7 +4497,7 @@ TEST(CellsInOptimizedCodeAreWeak) {
// Now make sure that a gc should get rid of the function
for (int i = 0; i < 4; i++) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
CHECK(code->marked_for_deoptimization());
@@ -4384,13 +4505,13 @@ TEST(CellsInOptimizedCodeAreWeak) {
TEST(ObjectsInOptimizedCodeAreWeak) {
- if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
+ if (i::FLAG_always_opt || !i::FLAG_opt) return;
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
v8::internal::Heap* heap = CcTest::heap();
- if (!isolate->use_crankshaft()) return;
+ if (!isolate->use_optimizer()) return;
HandleScope outer_scope(heap->isolate());
Handle<Code> code;
{
@@ -4418,20 +4539,20 @@ TEST(ObjectsInOptimizedCodeAreWeak) {
// Now make sure that a gc should get rid of the function
for (int i = 0; i < 4; i++) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
CHECK(code->marked_for_deoptimization());
}
TEST(NewSpaceObjectsInOptimizedCode) {
- if (i::FLAG_always_opt || !i::FLAG_crankshaft || i::FLAG_turbo) return;
+ if (i::FLAG_always_opt || !i::FLAG_opt || i::FLAG_turbo) return;
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
v8::internal::Heap* heap = CcTest::heap();
- if (!isolate->use_crankshaft()) return;
+ if (!isolate->use_optimizer()) return;
HandleScope outer_scope(heap->isolate());
Handle<Code> code;
{
@@ -4479,14 +4600,14 @@ TEST(NewSpaceObjectsInOptimizedCode) {
// Now make sure that a gc should get rid of the function
for (int i = 0; i < 4; i++) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
CHECK(code->marked_for_deoptimization());
}
TEST(NoWeakHashTableLeakWithIncrementalMarking) {
- if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
+ if (i::FLAG_always_opt || !i::FLAG_opt) return;
if (!i::FLAG_incremental_marking) return;
i::FLAG_allow_natives_syntax = true;
i::FLAG_compilation_cache = false;
@@ -4501,9 +4622,9 @@ TEST(NoWeakHashTableLeakWithIncrementalMarking) {
// Get a clean slate regarding optimized functions on the heap.
i::Deoptimizer::DeoptimizeAll(isolate);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
- if (!isolate->use_crankshaft()) return;
+ if (!isolate->use_optimizer()) return;
HandleScope outer_scope(heap->isolate());
for (int i = 0; i < 3; i++) {
heap::SimulateIncrementalMarking(heap);
@@ -4571,7 +4692,7 @@ TEST(NextCodeLinkIsWeak) {
Isolate* isolate = CcTest::i_isolate();
v8::internal::Heap* heap = CcTest::heap();
- if (!isolate->use_crankshaft()) return;
+ if (!isolate->use_optimizer()) return;
HandleScope outer_scope(heap->isolate());
Handle<Code> code;
CcTest::CollectAllAvailableGarbage();
@@ -4617,7 +4738,7 @@ TEST(NextCodeLinkIsWeak2) {
Isolate* isolate = CcTest::i_isolate();
v8::internal::Heap* heap = CcTest::heap();
- if (!isolate->use_crankshaft()) return;
+ if (!isolate->use_optimizer()) return;
HandleScope outer_scope(heap->isolate());
CcTest::CollectAllAvailableGarbage();
Handle<Context> context(Context::cast(heap->native_contexts_list()), isolate);
@@ -4682,7 +4803,7 @@ TEST(WeakFunctionInConstructor) {
}
weak_ic_cleared = false;
garbage.SetWeak(&garbage, &ClearWeakIC, v8::WeakCallbackType::kParameter);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(weak_ic_cleared);
// We've determined the constructor in createObj has had it's weak cell
@@ -4694,7 +4815,7 @@ TEST(WeakFunctionInConstructor) {
Object* slot_value = feedback_vector->Get(FeedbackSlot(0));
CHECK(slot_value->IsWeakCell());
if (WeakCell::cast(slot_value)->cleared()) break;
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
Object* slot_value = feedback_vector->Get(FeedbackSlot(0));
@@ -4724,7 +4845,7 @@ void CheckWeakness(const char* source) {
}
weak_ic_cleared = false;
garbage.SetWeak(&garbage, &ClearWeakIC, v8::WeakCallbackType::kParameter);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(weak_ic_cleared);
}
@@ -4931,7 +5052,7 @@ TEST(MonomorphicStaysMonomorphicAfterGC) {
v8::HandleScope scope(CcTest::isolate());
CompileRun("(testIC())");
}
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CheckIC(loadIC, Code::LOAD_IC, 0, MONOMORPHIC);
{
v8::HandleScope scope(CcTest::isolate());
@@ -4966,7 +5087,7 @@ TEST(PolymorphicStaysPolymorphicAfterGC) {
v8::HandleScope scope(CcTest::isolate());
CompileRun("(testIC())");
}
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CheckIC(loadIC, Code::LOAD_IC, 0, POLYMORPHIC);
{
v8::HandleScope scope(CcTest::isolate());
@@ -5040,8 +5161,8 @@ TEST(WeakCellsWithIncrementalMarking) {
}
// Call collect all twice to make sure that we also cleared
// weak cells that were allocated on black pages.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
CHECK_EQ(*survivor, weak_cells[0]->value());
for (int i = 1; i < N; i++) {
CHECK(weak_cells[i]->cleared());
@@ -5088,7 +5209,7 @@ TEST(AddInstructionChangesNewSpacePromotion) {
heap->DisableInlineAllocation();
heap->set_allocation_timeout(1);
g->Call(env.local(), global, 1, args1).ToLocalChecked();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
@@ -5421,14 +5542,14 @@ TEST(Regress3877) {
"a.x = new cls();"
"cls.prototype = null;");
for (int i = 0; i < 4; i++) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
// The map of a.x keeps prototype alive
CHECK(!weak_prototype->cleared());
// Change the map of a.x and make the previous map garbage collectable.
CompileRun("a.x.__proto__ = {};");
for (int i = 0; i < 4; i++) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
CHECK(weak_prototype->cleared());
}
@@ -5873,7 +5994,7 @@ TEST(ScriptIterator) {
Heap* heap = CcTest::heap();
LocalContext context;
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
int script_count = 0;
{
@@ -5899,8 +6020,8 @@ TEST(SharedFunctionInfoIterator) {
Heap* heap = CcTest::heap();
LocalContext context;
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
int sfi_count = 0;
{
@@ -6157,7 +6278,7 @@ TEST(Regress615489) {
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
@@ -6185,7 +6306,7 @@ TEST(Regress615489) {
}
CHECK(marking->IsComplete());
intptr_t size_before = heap->SizeOfObjects();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
intptr_t size_after = heap->SizeOfObjects();
// Live size does not increase after garbage collection.
CHECK_LE(size_after, size_before);
@@ -6258,7 +6379,7 @@ TEST(LeftTrimFixedArrayInBlackArea) {
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
@@ -6297,7 +6418,7 @@ TEST(ContinuousLeftTrimFixedArrayInBlackArea) {
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
@@ -6363,7 +6484,7 @@ TEST(ContinuousRightTrimFixedArrayInBlackArea) {
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
i::MarkCompactCollector* collector = heap->mark_compact_collector();
i::IncrementalMarking* marking = heap->incremental_marking();
@@ -6451,7 +6572,7 @@ TEST(UncommitUnusedLargeObjectMemory) {
array->Shrink(1);
CHECK(array->Size() < size_before);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(chunk->CommittedPhysicalMemory() < committed_memory_before);
size_t shrinked_size =
RoundUp((array->address() - chunk->address()) + array->Size(),
@@ -6532,7 +6653,7 @@ HEAP_TEST(Regress670675) {
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
i::MarkCompactCollector* collector = heap->mark_compact_collector();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();
@@ -6609,26 +6730,5 @@ HEAP_TEST(Regress5831) {
CHECK(chunk->NeverEvacuate());
}
-UNINITIALIZED_TEST(ReinitializeStringHashSeed) {
- // Enable rehashing and create an isolate and context.
- i::FLAG_rehash_snapshot = true;
- for (int i = 1; i < 3; i++) {
- i::FLAG_hash_seed = 1337 * i;
- v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- v8::Isolate* isolate = v8::Isolate::New(create_params);
- {
- v8::Isolate::Scope isolate_scope(isolate);
- CHECK_EQ(1337 * i,
- reinterpret_cast<i::Isolate*>(isolate)->heap()->HashSeed());
- v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- CHECK(!context.IsEmpty());
- v8::Context::Scope context_scope(context);
- }
- isolate->Dispose();
- }
-}
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/heap/test-mark-compact.cc b/deps/v8/test/cctest/heap/test-mark-compact.cc
index f77586e878..36c5ce9e11 100644
--- a/deps/v8/test/cctest/heap/test-mark-compact.cc
+++ b/deps/v8/test/cctest/heap/test-mark-compact.cc
@@ -43,6 +43,7 @@
#include "src/global-handles.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
+#include "src/heap/sequential-marking-deque.h"
#include "src/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
@@ -51,10 +52,9 @@
using namespace v8::internal;
using v8::Just;
-
-TEST(MarkingDeque) {
+TEST(SequentialMarkingDeque) {
CcTest::InitializeVM();
- MarkingDeque s(CcTest::i_isolate()->heap());
+ SequentialMarkingDeque s(CcTest::i_isolate()->heap());
s.SetUp();
s.StartUsing();
Address original_address = reinterpret_cast<Address>(&s);
@@ -90,8 +90,8 @@ TEST(Promotion) {
// Array should be in the new space.
CHECK(heap->InSpace(*array, NEW_SPACE));
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
CHECK(heap->InSpace(*array, OLD_SPACE));
}
}
@@ -115,8 +115,8 @@ HEAP_TEST(NoPromotion) {
heap->set_force_oom(true);
// Array should be in the new space.
CHECK(heap->InSpace(*array, NEW_SPACE));
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
CHECK(heap->InSpace(*array, NEW_SPACE));
}
}
diff --git a/deps/v8/test/cctest/heap/test-page-promotion.cc b/deps/v8/test/cctest/heap/test-page-promotion.cc
index 17096bc7ca..beab159b8c 100644
--- a/deps/v8/test/cctest/heap/test-page-promotion.cc
+++ b/deps/v8/test/cctest/heap/test-page-promotion.cc
@@ -21,6 +21,10 @@ namespace {
v8::Isolate* NewIsolateForPagePromotion(int min_semi_space_size = 8,
int max_semi_space_size = 8) {
+ // Parallel evacuation messes with fragmentation in a way that objects that
+ // should be copied in semi space are promoted to old space because of
+ // fragmentation.
+ i::FLAG_parallel_compaction = false;
i::FLAG_page_promotion = true;
i::FLAG_page_promotion_threshold = 0; // %
i::FLAG_min_semi_space_size = min_semi_space_size;
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index 63f9627048..0d625ca408 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -156,8 +156,7 @@ static void VerifyMemoryChunk(Isolate* isolate,
size_t second_commit_area_size,
Executability executable) {
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
- CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
- 0));
+ CHECK(memory_allocator->SetUp(heap->MaxReserved(), 0));
{
TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
TestCodeRangeScope test_code_range_scope(isolate, code_range);
@@ -208,8 +207,7 @@ TEST(Regress3540) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
- CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
- 0));
+ CHECK(memory_allocator->SetUp(heap->MaxReserved(), 0));
TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
CodeRange* code_range = new CodeRange(isolate);
size_t code_range_size =
@@ -309,8 +307,7 @@ TEST(MemoryAllocator) {
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
CHECK(memory_allocator != nullptr);
- CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
- 0));
+ CHECK(memory_allocator->SetUp(heap->MaxReserved(), 0));
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
{
@@ -357,8 +354,7 @@ TEST(NewSpace) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
- CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
- 0));
+ CHECK(memory_allocator->SetUp(heap->MaxReserved(), 0));
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
NewSpace new_space(heap);
@@ -383,8 +379,7 @@ TEST(OldSpace) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
- CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
- 0));
+ CHECK(memory_allocator->SetUp(heap->MaxReserved(), 0));
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
OldSpace* s = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
@@ -476,7 +471,7 @@ TEST(SizeOfInitialHeap) {
page_count[i] = heap->paged_space(i)->CountTotalPages();
// Check that the initial heap is also below the limit.
- CHECK_LT(heap->paged_space(i)->CommittedMemory(), kMaxInitialSizePerSpace);
+ CHECK_LE(heap->paged_space(i)->CommittedMemory(), kMaxInitialSizePerSpace);
}
// Executing the empty script gets by with the same number of pages, i.e.,
@@ -637,6 +632,12 @@ UNINITIALIZED_TEST(InlineAllocationObserverCadence) {
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ // Clear out any pre-existing garbage to make the test consistent
+ // across snapshot/no-snapshot builds.
+ i_isolate->heap()->CollectAllGarbage(
+ i::Heap::kFinalizeIncrementalMarkingMask,
+ i::GarbageCollectionReason::kTesting);
+
NewSpace* new_space = i_isolate->heap()->new_space();
Observer observer1(512);
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
index 2a1731f75a..318372bb5b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
@@ -308,7 +308,7 @@ void BytecodeExpectationsPrinter::PrintBytecodeSequence(
<< "\nbytecodes: [\n";
SourcePositionTableIterator source_iterator(
- bytecode_array->source_position_table());
+ bytecode_array->SourcePositionTable());
BytecodeArrayIterator bytecode_iterator(bytecode_array);
for (; !bytecode_iterator.done(); bytecode_iterator.Advance()) {
stream << kIndent;
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
index 5bb33d4709..7ffbb970d5 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiterals.golden
@@ -14,11 +14,11 @@ parameter count: 1
bytecode array length: 6
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateArrayLiteral), U8(0), U8(2), U8(9),
+ /* 34 S> */ B(CreateArrayLiteral), U8(0), U8(3), U8(17),
/* 51 S> */ B(Return),
]
constant pool: [
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
]
handlers: [
]
@@ -34,22 +34,22 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(CreateArrayLiteral), U8(0), U8(3), U8(9),
+ /* 45 S> */ B(CreateArrayLiteral), U8(0), U8(4), U8(17),
B(Star), R(2),
B(LdaZero),
B(Star), R(1),
B(Ldar), R(0),
- /* 54 E> */ B(StaKeyedPropertySloppy), R(2), R(1), U8(4),
+ /* 54 E> */ B(StaKeyedPropertySloppy), R(2), R(1), U8(5),
B(LdaSmi), I8(1),
B(Star), R(1),
B(Ldar), R(0),
- /* 59 E> */ B(AddSmi), I8(1), U8(2),
- B(StaKeyedPropertySloppy), R(2), R(1), U8(4),
+ /* 59 E> */ B(AddSmi), I8(1), U8(3),
+ B(StaKeyedPropertySloppy), R(2), R(1), U8(5),
B(Ldar), R(2),
/* 66 S> */ B(Return),
]
constant pool: [
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
]
handlers: [
]
@@ -63,11 +63,11 @@ parameter count: 1
bytecode array length: 6
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateArrayLiteral), U8(0), U8(4), U8(0),
+ /* 34 S> */ B(CreateArrayLiteral), U8(0), U8(5), U8(0),
/* 62 S> */ B(Return),
]
constant pool: [
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
]
handlers: [
]
@@ -83,36 +83,36 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(CreateArrayLiteral), U8(0), U8(9), U8(0),
+ /* 45 S> */ B(CreateArrayLiteral), U8(0), U8(10), U8(0),
B(Star), R(2),
B(LdaZero),
B(Star), R(1),
- B(CreateArrayLiteral), U8(1), U8(2), U8(9),
+ B(CreateArrayLiteral), U8(1), U8(3), U8(17),
B(Star), R(4),
B(LdaZero),
B(Star), R(3),
B(Ldar), R(0),
- /* 56 E> */ B(StaKeyedPropertySloppy), R(4), R(3), U8(3),
+ /* 56 E> */ B(StaKeyedPropertySloppy), R(4), R(3), U8(4),
B(Ldar), R(4),
- B(StaKeyedPropertySloppy), R(2), R(1), U8(10),
+ B(StaKeyedPropertySloppy), R(2), R(1), U8(11),
B(LdaSmi), I8(1),
B(Star), R(1),
- B(CreateArrayLiteral), U8(2), U8(6), U8(9),
+ B(CreateArrayLiteral), U8(2), U8(7), U8(17),
B(Star), R(4),
B(LdaZero),
B(Star), R(3),
B(Ldar), R(0),
- /* 68 E> */ B(AddSmi), I8(2), U8(5),
- B(StaKeyedPropertySloppy), R(4), R(3), U8(7),
+ /* 68 E> */ B(AddSmi), I8(2), U8(6),
+ B(StaKeyedPropertySloppy), R(4), R(3), U8(8),
B(Ldar), R(4),
- B(StaKeyedPropertySloppy), R(2), R(1), U8(10),
+ B(StaKeyedPropertySloppy), R(2), R(1), U8(11),
B(Ldar), R(2),
/* 77 S> */ B(Return),
]
constant pool: [
- CONSTANT_ELEMENTS_PAIR_TYPE,
- CONSTANT_ELEMENTS_PAIR_TYPE,
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
+ TUPLE2_TYPE,
+ TUPLE2_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden
index 6755fb80a0..0d6065ce3e 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ArrayLiteralsWide.golden
@@ -783,7 +783,7 @@ bytecodes: [
B(Star), R(0),
/* 2591 S> */ B(LdaConstant), U8(255),
B(Star), R(0),
- /* 2601 S> */ B(Wide), B(CreateArrayLiteral), U16(256), U16(2), U8(9),
+ /* 2601 S> */ B(Wide), B(CreateArrayLiteral), U16(256), U16(3), U8(17),
/* 2619 S> */ B(Return),
]
constant pool: [
@@ -1043,7 +1043,7 @@ constant pool: [
HEAP_NUMBER_TYPE [1.23],
HEAP_NUMBER_TYPE [1.23],
HEAP_NUMBER_TYPE [1.23],
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
index e729ec44e3..bee7514ea5 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/AssignmentsInBinaryExpression.golden
@@ -74,11 +74,11 @@ bytecodes: [
/* 46 S> */ B(LdaSmi), I8(100),
B(Mov), R(0), R(1),
B(Star), R(0),
- /* 52 E> */ B(Add), R(1), U8(2),
+ /* 52 E> */ B(Add), R(1), U8(3),
B(Star), R(1),
B(LdaSmi), I8(101),
B(Star), R(0),
- /* 64 E> */ B(Add), R(1), U8(3),
+ /* 64 E> */ B(Add), R(1), U8(4),
B(Star), R(0),
/* 77 S> */ B(Nop),
/* 87 S> */ B(Return),
@@ -104,13 +104,13 @@ bytecodes: [
B(Star), R(0),
/* 46 S> */ B(LdaSmi), I8(56),
B(Star), R(0),
- /* 59 E> */ B(Sub), R(0), U8(2),
+ /* 59 E> */ B(Sub), R(0), U8(3),
B(Star), R(1),
B(LdaSmi), I8(57),
B(Star), R(0),
- /* 63 E> */ B(Add), R(1), U8(3),
+ /* 63 E> */ B(Add), R(1), U8(4),
B(Star), R(0),
- /* 75 S> */ B(Inc), U8(4),
+ /* 75 S> */ B(Inc), U8(5),
B(Star), R(0),
/* 80 S> */ B(Nop),
/* 90 S> */ B(Return),
@@ -136,15 +136,15 @@ bytecodes: [
/* 76 S> */ B(LdaSmi), I8(1),
B(Mov), R(0), R(2),
B(Star), R(0),
- /* 56 E> */ B(Add), R(2), U8(2),
+ /* 56 E> */ B(Add), R(2), U8(3),
B(Star), R(2),
B(LdaSmi), I8(2),
B(Star), R(0),
- /* 66 E> */ B(Add), R(2), U8(3),
+ /* 66 E> */ B(Add), R(2), U8(4),
B(Star), R(2),
B(LdaSmi), I8(3),
B(Star), R(0),
- /* 76 E> */ B(Add), R(2), U8(4),
+ /* 76 E> */ B(Add), R(2), U8(5),
B(Star), R(1),
/* 87 S> */ B(Nop),
/* 97 S> */ B(Return),
@@ -170,15 +170,15 @@ bytecodes: [
/* 76 S> */ B(LdaSmi), I8(1),
B(Mov), R(0), R(1),
B(Star), R(0),
- /* 56 E> */ B(Add), R(1), U8(2),
+ /* 56 E> */ B(Add), R(1), U8(3),
B(Star), R(1),
B(LdaSmi), I8(2),
B(Star), R(0),
- /* 66 E> */ B(Add), R(1), U8(3),
+ /* 66 E> */ B(Add), R(1), U8(4),
B(Star), R(1),
B(LdaSmi), I8(3),
B(Star), R(0),
- /* 76 E> */ B(Add), R(1), U8(4),
+ /* 76 E> */ B(Add), R(1), U8(5),
B(Star), R(0),
/* 87 S> */ B(Nop),
/* 97 S> */ B(Return),
@@ -205,30 +205,30 @@ bytecodes: [
/* 54 S> */ B(LdaSmi), I8(1),
B(Mov), R(0), R(2),
B(Star), R(0),
- /* 63 E> */ B(Add), R(2), U8(2),
+ /* 63 E> */ B(Add), R(2), U8(3),
B(Star), R(2),
B(Ldar), R(0),
- /* 78 E> */ B(AddSmi), I8(1), U8(3),
+ /* 78 E> */ B(AddSmi), I8(1), U8(4),
B(Star), R(3),
B(LdaSmi), I8(2),
B(Star), R(1),
- /* 83 E> */ B(Mul), R(3), U8(4),
- /* 73 E> */ B(Add), R(2), U8(5),
+ /* 83 E> */ B(Mul), R(3), U8(5),
+ /* 73 E> */ B(Add), R(2), U8(6),
B(Star), R(2),
B(LdaSmi), I8(3),
B(Star), R(1),
- /* 93 E> */ B(Add), R(2), U8(6),
+ /* 93 E> */ B(Add), R(2), U8(7),
B(Star), R(2),
B(LdaSmi), I8(4),
B(Star), R(0),
- /* 103 E> */ B(Add), R(2), U8(7),
+ /* 103 E> */ B(Add), R(2), U8(8),
B(Star), R(2),
B(LdaSmi), I8(5),
B(Star), R(1),
- /* 113 E> */ B(Add), R(2), U8(8),
+ /* 113 E> */ B(Add), R(2), U8(9),
B(Star), R(2),
B(Ldar), R(1),
- /* 123 E> */ B(Add), R(2), U8(9),
+ /* 123 E> */ B(Add), R(2), U8(10),
/* 128 S> */ B(Return),
]
constant pool: [
@@ -251,20 +251,20 @@ bytecodes: [
/* 46 S> */ B(LdaSmi), I8(1),
B(Star), R(1),
B(Ldar), R(0),
- /* 55 E> */ B(Add), R(1), U8(2),
+ /* 55 E> */ B(Add), R(1), U8(3),
B(Star), R(1),
B(Ldar), R(0),
- B(ToNumber), R(2), U8(3),
+ B(ToNumber), R(2), U8(4),
B(Ldar), R(2),
- B(Inc), U8(3),
+ B(Inc), U8(4),
B(Star), R(0),
B(Ldar), R(2),
- /* 59 E> */ B(Add), R(1), U8(4),
+ /* 59 E> */ B(Add), R(1), U8(5),
B(Star), R(1),
B(Ldar), R(0),
- B(Inc), U8(5),
+ B(Inc), U8(6),
B(Star), R(0),
- /* 67 E> */ B(Add), R(1), U8(6),
+ /* 67 E> */ B(Add), R(1), U8(7),
/* 76 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden
index acb9c36ce3..e86b7909f9 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicBlockToBoolean.golden
@@ -18,7 +18,7 @@ bytecodes: [
B(Star), R(0),
/* 45 S> */ B(JumpIfToBooleanTrue), U8(8),
B(LdaZero),
- /* 56 E> */ B(TestLessThan), R(0), U8(2),
+ /* 56 E> */ B(TestLessThan), R(0), U8(3),
B(JumpIfFalse), U8(5),
/* 63 S> */ B(LdaSmi), I8(1),
/* 75 S> */ B(Return),
@@ -43,7 +43,7 @@ bytecodes: [
B(Star), R(0),
/* 45 S> */ B(JumpIfToBooleanFalse), U8(11),
B(LdaZero),
- /* 56 E> */ B(TestLessThan), R(0), U8(2),
+ /* 56 E> */ B(TestLessThan), R(0), U8(3),
B(JumpIfFalse), U8(5),
/* 63 S> */ B(LdaSmi), I8(1),
/* 75 S> */ B(Return),
@@ -68,7 +68,7 @@ bytecodes: [
B(Star), R(0),
/* 45 S> */ B(JumpIfToBooleanTrue), U8(8),
B(LdaZero),
- /* 57 E> */ B(TestLessThan), R(0), U8(2),
+ /* 57 E> */ B(TestLessThan), R(0), U8(3),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(2),
B(Jump), U8(4),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
index 75514d3ec7..0f24178a2d 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BasicLoops.golden
@@ -71,21 +71,21 @@ bytecodes: [
/* 53 S> */ B(LdaSmi), I8(1),
B(Star), R(1),
/* 65 S> */ B(LdaSmi), I8(10),
- /* 65 E> */ B(TestLessThan), R(0), U8(2),
+ /* 65 E> */ B(TestLessThan), R(0), U8(3),
B(JumpIfFalse), U8(38),
/* 56 E> */ B(StackCheck),
/* 75 S> */ B(Ldar), R(1),
- /* 81 E> */ B(MulSmi), I8(12), U8(3),
+ /* 81 E> */ B(MulSmi), I8(12), U8(4),
B(Star), R(1),
/* 89 S> */ B(Ldar), R(0),
- /* 95 E> */ B(AddSmi), I8(1), U8(4),
+ /* 95 E> */ B(AddSmi), I8(1), U8(5),
B(Star), R(0),
/* 102 S> */ B(LdaSmi), I8(3),
- /* 108 E> */ B(TestEqual), R(0), U8(5),
+ /* 108 E> */ B(TestEqual), R(0), U8(6),
B(JumpIfFalse), U8(4),
/* 114 S> */ B(Jump), U8(11),
/* 126 S> */ B(LdaSmi), I8(4),
- /* 132 E> */ B(TestEqual), R(0), U8(6),
+ /* 132 E> */ B(TestEqual), R(0), U8(7),
B(JumpIfFalse), U8(4),
/* 138 S> */ B(Jump), U8(5),
B(JumpLoop), U8(40), I8(0),
@@ -119,27 +119,27 @@ bytecodes: [
B(Star), R(0),
/* 45 E> */ B(StackCheck),
/* 62 S> */ B(LdaZero),
- /* 68 E> */ B(TestLessThan), R(0), U8(2),
+ /* 68 E> */ B(TestLessThan), R(0), U8(3),
B(JumpIfFalse), U8(4),
/* 73 S> */ B(Jump), U8(45),
/* 85 S> */ B(LdaSmi), I8(3),
- /* 91 E> */ B(TestEqual), R(0), U8(3),
+ /* 91 E> */ B(TestEqual), R(0), U8(4),
B(JumpIfFalse), U8(4),
/* 97 S> */ B(Jump), U8(39),
/* 106 S> */ B(LdaSmi), I8(4),
- /* 112 E> */ B(TestEqual), R(0), U8(4),
+ /* 112 E> */ B(TestEqual), R(0), U8(5),
B(JumpIfFalse), U8(4),
/* 118 S> */ B(Jump), U8(30),
/* 127 S> */ B(LdaSmi), I8(10),
- /* 133 E> */ B(TestEqual), R(0), U8(5),
+ /* 133 E> */ B(TestEqual), R(0), U8(6),
B(JumpIfFalse), U8(4),
/* 140 S> */ B(Jump), U8(18),
/* 152 S> */ B(LdaSmi), I8(5),
- /* 158 E> */ B(TestEqual), R(0), U8(6),
+ /* 158 E> */ B(TestEqual), R(0), U8(7),
B(JumpIfFalse), U8(4),
/* 164 S> */ B(Jump), U8(12),
/* 173 S> */ B(Ldar), R(0),
- /* 179 E> */ B(AddSmi), I8(1), U8(7),
+ /* 179 E> */ B(AddSmi), I8(1), U8(8),
B(Star), R(0),
B(JumpLoop), U8(52), I8(0),
/* 186 S> */ B(Ldar), R(0),
@@ -172,19 +172,19 @@ bytecodes: [
B(Star), R(0),
/* 45 E> */ B(StackCheck),
/* 71 S> */ B(LdaSmi), I8(3),
- /* 71 E> */ B(TestLessThan), R(0), U8(2),
+ /* 71 E> */ B(TestLessThan), R(0), U8(3),
B(JumpIfFalse), U8(22),
/* 62 E> */ B(StackCheck),
/* 82 S> */ B(LdaSmi), I8(2),
- /* 88 E> */ B(TestEqual), R(0), U8(3),
+ /* 88 E> */ B(TestEqual), R(0), U8(4),
B(JumpIfFalse), U8(4),
/* 94 S> */ B(Jump), U8(12),
/* 105 S> */ B(Ldar), R(0),
- /* 111 E> */ B(AddSmi), I8(1), U8(4),
+ /* 111 E> */ B(AddSmi), I8(1), U8(5),
B(Star), R(0),
B(JumpLoop), U8(24), I8(1),
/* 122 S> */ B(Ldar), R(0),
- /* 128 E> */ B(AddSmi), I8(1), U8(5),
+ /* 128 E> */ B(AddSmi), I8(1), U8(6),
B(Star), R(0),
/* 135 S> */ B(Jump), U8(2),
/* 144 S> */ B(Ldar), R(0),
@@ -218,10 +218,10 @@ bytecodes: [
B(JumpIfToBooleanFalse), U8(20),
/* 57 E> */ B(StackCheck),
/* 71 S> */ B(Ldar), R(1),
- /* 77 E> */ B(MulSmi), I8(12), U8(2),
+ /* 77 E> */ B(MulSmi), I8(12), U8(3),
B(Star), R(1),
/* 85 S> */ B(Ldar), R(0),
- /* 91 E> */ B(SubSmi), I8(1), U8(3),
+ /* 91 E> */ B(SubSmi), I8(1), U8(4),
B(Star), R(0),
B(JumpLoop), U8(19), I8(0),
/* 98 S> */ B(Ldar), R(1),
@@ -254,21 +254,21 @@ bytecodes: [
B(Star), R(1),
/* 56 E> */ B(StackCheck),
/* 63 S> */ B(Ldar), R(1),
- /* 69 E> */ B(MulSmi), I8(10), U8(2),
+ /* 69 E> */ B(MulSmi), I8(10), U8(3),
B(Star), R(1),
/* 77 S> */ B(LdaSmi), I8(5),
- /* 83 E> */ B(TestEqual), R(0), U8(3),
+ /* 83 E> */ B(TestEqual), R(0), U8(4),
B(JumpIfFalse), U8(4),
/* 89 S> */ B(Jump), U8(28),
/* 98 S> */ B(LdaSmi), I8(6),
- /* 104 E> */ B(TestEqual), R(0), U8(4),
+ /* 104 E> */ B(TestEqual), R(0), U8(5),
B(JumpIfFalse), U8(4),
/* 110 S> */ B(Jump), U8(9),
/* 122 S> */ B(Ldar), R(0),
- /* 128 E> */ B(AddSmi), I8(1), U8(5),
+ /* 128 E> */ B(AddSmi), I8(1), U8(6),
B(Star), R(0),
/* 144 S> */ B(LdaSmi), I8(10),
- /* 144 E> */ B(TestLessThan), R(0), U8(6),
+ /* 144 E> */ B(TestLessThan), R(0), U8(7),
B(JumpIfFalse), U8(5),
B(JumpLoop), U8(40), I8(0),
/* 151 S> */ B(Ldar), R(1),
@@ -300,10 +300,10 @@ bytecodes: [
B(Star), R(1),
/* 57 E> */ B(StackCheck),
/* 64 S> */ B(Ldar), R(1),
- /* 70 E> */ B(MulSmi), I8(12), U8(2),
+ /* 70 E> */ B(MulSmi), I8(12), U8(3),
B(Star), R(1),
/* 78 S> */ B(Ldar), R(0),
- /* 84 E> */ B(SubSmi), I8(1), U8(3),
+ /* 84 E> */ B(SubSmi), I8(1), U8(4),
B(Star), R(0),
/* 98 S> */ B(JumpIfToBooleanFalse), U8(5),
B(JumpLoop), U8(17), I8(0),
@@ -337,17 +337,17 @@ bytecodes: [
B(Star), R(1),
/* 56 E> */ B(StackCheck),
/* 63 S> */ B(Nop),
- /* 69 E> */ B(MulSmi), I8(10), U8(2),
+ /* 69 E> */ B(MulSmi), I8(10), U8(3),
B(Star), R(1),
/* 77 S> */ B(LdaSmi), I8(5),
- /* 83 E> */ B(TestEqual), R(0), U8(3),
+ /* 83 E> */ B(TestEqual), R(0), U8(4),
B(JumpIfFalse), U8(4),
/* 89 S> */ B(Jump), U8(18),
/* 98 S> */ B(Ldar), R(0),
- /* 104 E> */ B(AddSmi), I8(1), U8(4),
+ /* 104 E> */ B(AddSmi), I8(1), U8(5),
B(Star), R(0),
/* 111 S> */ B(LdaSmi), I8(6),
- /* 117 E> */ B(TestEqual), R(0), U8(5),
+ /* 117 E> */ B(TestEqual), R(0), U8(6),
B(JumpIfFalse), U8(4),
/* 123 S> */ B(Jump), U8(2),
/* 150 S> */ B(Ldar), R(1),
@@ -380,17 +380,17 @@ bytecodes: [
B(Star), R(1),
/* 56 E> */ B(StackCheck),
/* 63 S> */ B(Ldar), R(1),
- /* 69 E> */ B(MulSmi), I8(10), U8(2),
+ /* 69 E> */ B(MulSmi), I8(10), U8(3),
B(Star), R(1),
/* 77 S> */ B(LdaSmi), I8(5),
- /* 83 E> */ B(TestEqual), R(0), U8(3),
+ /* 83 E> */ B(TestEqual), R(0), U8(4),
B(JumpIfFalse), U8(4),
/* 89 S> */ B(Jump), U8(21),
/* 98 S> */ B(Ldar), R(0),
- /* 104 E> */ B(AddSmi), I8(1), U8(4),
+ /* 104 E> */ B(AddSmi), I8(1), U8(5),
B(Star), R(0),
/* 111 S> */ B(LdaSmi), I8(6),
- /* 117 E> */ B(TestEqual), R(0), U8(5),
+ /* 117 E> */ B(TestEqual), R(0), U8(6),
B(JumpIfFalse), U8(4),
/* 123 S> */ B(Jump), U8(2),
B(JumpLoop), U8(33), I8(0),
@@ -420,15 +420,15 @@ bytecodes: [
B(Star), R(0),
/* 45 E> */ B(StackCheck),
/* 58 S> */ B(LdaSmi), I8(1),
- /* 64 E> */ B(TestEqual), R(0), U8(2),
+ /* 64 E> */ B(TestEqual), R(0), U8(3),
B(JumpIfFalse), U8(4),
/* 70 S> */ B(Jump), U8(21),
/* 79 S> */ B(LdaSmi), I8(2),
- /* 85 E> */ B(TestEqual), R(0), U8(3),
+ /* 85 E> */ B(TestEqual), R(0), U8(4),
B(JumpIfFalse), U8(4),
/* 91 S> */ B(Jump), U8(9),
/* 103 S> */ B(Ldar), R(0),
- /* 109 E> */ B(AddSmi), I8(1), U8(4),
+ /* 109 E> */ B(AddSmi), I8(1), U8(5),
B(Star), R(0),
B(JumpLoop), U8(26), I8(0),
B(LdaUndefined),
@@ -456,15 +456,15 @@ bytecodes: [
B(Star), R(0),
/* 34 E> */ B(StackCheck),
/* 56 S> */ B(LdaSmi), I8(1),
- /* 62 E> */ B(TestEqual), R(0), U8(2),
+ /* 62 E> */ B(TestEqual), R(0), U8(3),
B(JumpIfFalse), U8(4),
/* 68 S> */ B(Jump), U8(21),
/* 77 S> */ B(LdaSmi), I8(2),
- /* 83 E> */ B(TestEqual), R(0), U8(3),
+ /* 83 E> */ B(TestEqual), R(0), U8(4),
B(JumpIfFalse), U8(4),
/* 89 S> */ B(Jump), U8(9),
/* 101 S> */ B(Ldar), R(0),
- /* 107 E> */ B(AddSmi), I8(1), U8(4),
+ /* 107 E> */ B(AddSmi), I8(1), U8(5),
B(Star), R(0),
B(JumpLoop), U8(26), I8(0),
B(LdaUndefined),
@@ -492,15 +492,15 @@ bytecodes: [
B(Star), R(0),
/* 45 E> */ B(StackCheck),
/* 68 S> */ B(LdaSmi), I8(1),
- /* 74 E> */ B(TestEqual), R(0), U8(3),
+ /* 74 E> */ B(TestEqual), R(0), U8(4),
B(JumpIfFalse), U8(4),
/* 80 S> */ B(Jump), U8(21),
/* 89 S> */ B(LdaSmi), I8(2),
- /* 95 E> */ B(TestEqual), R(0), U8(4),
+ /* 95 E> */ B(TestEqual), R(0), U8(5),
B(JumpIfFalse), U8(4),
/* 101 S> */ B(Jump), U8(2),
/* 55 S> */ B(Ldar), R(0),
- /* 59 E> */ B(AddSmi), I8(1), U8(2),
+ /* 59 E> */ B(AddSmi), I8(1), U8(3),
B(Star), R(0),
B(JumpLoop), U8(26), I8(0),
B(LdaUndefined),
@@ -527,15 +527,15 @@ bytecodes: [
B(Star), R(0),
/* 34 E> */ B(StackCheck),
/* 66 S> */ B(LdaSmi), I8(1),
- /* 72 E> */ B(TestEqual), R(0), U8(3),
+ /* 72 E> */ B(TestEqual), R(0), U8(4),
B(JumpIfFalse), U8(4),
/* 78 S> */ B(Jump), U8(21),
/* 87 S> */ B(LdaSmi), I8(2),
- /* 93 E> */ B(TestEqual), R(0), U8(4),
+ /* 93 E> */ B(TestEqual), R(0), U8(5),
B(JumpIfFalse), U8(4),
/* 99 S> */ B(Jump), U8(2),
/* 53 S> */ B(Ldar), R(0),
- /* 57 E> */ B(AddSmi), I8(1), U8(2),
+ /* 57 E> */ B(AddSmi), I8(1), U8(3),
B(Star), R(0),
B(JumpLoop), U8(26), I8(0),
B(LdaUndefined),
@@ -564,15 +564,15 @@ bytecodes: [
/* 58 S> */ B(LdaZero),
B(Star), R(1),
/* 63 S> */ B(LdaSmi), I8(100),
- /* 63 E> */ B(TestLessThan), R(1), U8(2),
+ /* 63 E> */ B(TestLessThan), R(1), U8(3),
B(JumpIfFalse), U8(22),
/* 45 E> */ B(StackCheck),
/* 85 S> */ B(Ldar), R(0),
- /* 91 E> */ B(AddSmi), I8(1), U8(4),
+ /* 91 E> */ B(AddSmi), I8(1), U8(5),
B(Star), R(0),
/* 98 S> */ B(Jump), U8(2),
/* 72 S> */ B(Ldar), R(1),
- /* 76 E> */ B(AddSmi), I8(1), U8(3),
+ /* 76 E> */ B(AddSmi), I8(1), U8(4),
B(Star), R(1),
B(JumpLoop), U8(24), I8(0),
B(LdaUndefined),
@@ -604,10 +604,10 @@ bytecodes: [
B(JumpIfToBooleanFalse), U8(19),
/* 45 E> */ B(StackCheck),
/* 74 S> */ B(Ldar), R(0),
- /* 80 E> */ B(MulSmi), I8(12), U8(3),
+ /* 80 E> */ B(MulSmi), I8(12), U8(4),
B(Star), R(0),
/* 67 S> */ B(Ldar), R(1),
- B(Dec), U8(2),
+ B(Dec), U8(3),
B(Star), R(1),
B(JumpLoop), U8(18), I8(0),
/* 88 S> */ B(Ldar), R(0),
@@ -663,14 +663,14 @@ bytecodes: [
B(Star), R(1),
/* 45 E> */ B(StackCheck),
/* 76 S> */ B(Ldar), R(0),
- /* 82 E> */ B(AddSmi), I8(1), U8(3),
+ /* 82 E> */ B(AddSmi), I8(1), U8(4),
B(Star), R(0),
/* 89 S> */ B(LdaSmi), I8(20),
- /* 95 E> */ B(TestEqual), R(0), U8(4),
+ /* 95 E> */ B(TestEqual), R(0), U8(5),
B(JumpIfFalse), U8(4),
/* 102 S> */ B(Jump), U8(11),
/* 69 S> */ B(Ldar), R(1),
- B(Inc), U8(2),
+ B(Inc), U8(3),
B(Star), R(1),
B(JumpLoop), U8(23), I8(0),
/* 112 S> */ B(Ldar), R(0),
@@ -708,7 +708,7 @@ bytecodes: [
B(PushContext), R(3),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(CreateClosure), U8(1), U8(2), U8(2),
+ B(CreateClosure), U8(1), U8(3), U8(2),
B(Star), R(0),
/* 73 S> */ B(LdaSmi), I8(1),
/* 73 E> */ B(StaCurrentContextSlot), U8(4),
@@ -719,7 +719,7 @@ bytecodes: [
B(PopContext), R(3),
B(Jump), U8(10),
/* 126 S> */ B(LdaCurrentContextSlot), U8(4),
- B(Inc), U8(3),
+ B(Inc), U8(4),
/* 127 E> */ B(StaCurrentContextSlot), U8(4),
B(PopContext), R(3),
B(JumpLoop), U8(45), I8(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
index 4794f7f459..11073635cb 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/BreakableBlocks.golden
@@ -23,7 +23,7 @@ bytecodes: [
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 56 S> */ B(Nop),
- /* 62 E> */ B(AddSmi), I8(1), U8(2),
+ /* 62 E> */ B(AddSmi), I8(1), U8(3),
B(Star), R(0),
/* 69 S> */ B(Jump), U8(2),
/* 97 S> */ B(Ldar), R(0),
@@ -57,31 +57,31 @@ bytecodes: [
/* 71 S> */ B(LdaZero),
B(Star), R(1),
/* 76 S> */ B(LdaSmi), I8(10),
- /* 76 E> */ B(TestLessThan), R(1), U8(2),
+ /* 76 E> */ B(TestLessThan), R(1), U8(3),
B(JumpIfFalse), U8(54),
/* 58 E> */ B(StackCheck),
/* 106 S> */ B(LdaZero),
B(Star), R(2),
/* 111 S> */ B(LdaSmi), I8(3),
- /* 111 E> */ B(TestLessThan), R(2), U8(4),
+ /* 111 E> */ B(TestLessThan), R(2), U8(5),
B(JumpIfFalse), U8(34),
/* 93 E> */ B(StackCheck),
/* 129 S> */ B(Ldar), R(0),
- B(Inc), U8(6),
+ B(Inc), U8(7),
B(Star), R(0),
/* 142 S> */ B(Ldar), R(2),
- /* 148 E> */ B(Add), R(1), U8(7),
+ /* 148 E> */ B(Add), R(1), U8(8),
B(Star), R(3),
B(LdaSmi), I8(12),
- /* 152 E> */ B(TestEqual), R(3), U8(8),
+ /* 152 E> */ B(TestEqual), R(3), U8(9),
B(JumpIfFalse), U8(4),
/* 161 S> */ B(Jump), U8(20),
/* 118 S> */ B(Ldar), R(2),
- B(Inc), U8(5),
+ B(Inc), U8(6),
B(Star), R(2),
B(JumpLoop), U8(36), I8(1),
/* 84 S> */ B(Ldar), R(1),
- B(Inc), U8(3),
+ B(Inc), U8(4),
B(Star), R(1),
B(JumpLoop), U8(56), I8(0),
/* 188 S> */ B(Ldar), R(0),
@@ -110,7 +110,7 @@ bytecodes: [
B(PushContext), R(2),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(CreateClosure), U8(1), U8(2), U8(2),
+ B(CreateClosure), U8(1), U8(3), U8(2),
B(Star), R(0),
/* 53 S> */ B(LdaSmi), I8(10),
/* 53 E> */ B(StaCurrentContextSlot), U8(4),
@@ -157,7 +157,7 @@ bytecodes: [
B(PushContext), R(3),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(CreateClosure), U8(1), U8(2), U8(2),
+ B(CreateClosure), U8(1), U8(3), U8(2),
B(Star), R(0),
/* 76 S> */ B(LdaSmi), I8(2),
/* 76 E> */ B(StaCurrentContextSlot), U8(4),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
index c2acbec102..519d4a3e41 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallAndSpread.golden
@@ -14,11 +14,11 @@ parameter count: 1
bytecode array length: 24
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(LdaGlobal), U8(0), U8(4),
+ /* 34 S> */ B(LdaGlobal), U8(0), U8(5),
B(Star), R(1),
- /* 38 E> */ B(LdaNamedProperty), R(1), U8(1), U8(6),
+ /* 38 E> */ B(LdaNamedProperty), R(1), U8(1), U8(7),
B(Star), R(0),
- B(CreateArrayLiteral), U8(2), U8(8), U8(9),
+ B(CreateArrayLiteral), U8(2), U8(9), U8(17),
B(Star), R(2),
/* 39 E> */ B(CallWithSpread), R(0), R(1), U8(2),
B(LdaUndefined),
@@ -27,7 +27,7 @@ bytecodes: [
constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["Math"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["max"],
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
]
handlers: [
]
@@ -41,13 +41,13 @@ parameter count: 1
bytecode array length: 27
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(LdaGlobal), U8(0), U8(4),
+ /* 34 S> */ B(LdaGlobal), U8(0), U8(5),
B(Star), R(1),
- /* 38 E> */ B(LdaNamedProperty), R(1), U8(1), U8(6),
+ /* 38 E> */ B(LdaNamedProperty), R(1), U8(1), U8(7),
B(Star), R(0),
B(LdaZero),
B(Star), R(2),
- B(CreateArrayLiteral), U8(2), U8(8), U8(9),
+ B(CreateArrayLiteral), U8(2), U8(9), U8(17),
B(Star), R(3),
/* 39 E> */ B(CallWithSpread), R(0), R(1), U8(3),
B(LdaUndefined),
@@ -56,7 +56,7 @@ bytecodes: [
constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["Math"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["max"],
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
]
handlers: [
]
@@ -72,21 +72,21 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 34 S> */ B(LdaUndefined),
B(Star), R(1),
- /* 34 E> */ B(LdaGlobal), U8(0), U8(2),
+ /* 34 E> */ B(LdaGlobal), U8(0), U8(3),
B(Star), R(0),
- B(LdaNamedProperty), R(0), U8(1), U8(4),
+ B(LdaNamedProperty), R(0), U8(1), U8(5),
B(Star), R(2),
B(LdaUndefined),
B(Star), R(4),
- B(CreateArrayLiteral), U8(2), U8(6), U8(9),
+ B(CreateArrayLiteral), U8(2), U8(7), U8(17),
B(Star), R(5),
B(LdaUndefined),
B(Star), R(6),
- B(CreateArrayLiteral), U8(3), U8(7), U8(9),
+ B(CreateArrayLiteral), U8(3), U8(8), U8(17),
B(Star), R(7),
B(CallJSRuntime), U8(%spread_iterable), R(6), U8(2),
B(Star), R(6),
- B(CreateArrayLiteral), U8(4), U8(8), U8(9),
+ B(CreateArrayLiteral), U8(4), U8(9), U8(17),
B(Star), R(7),
B(CallJSRuntime), U8(%spread_arguments), R(4), U8(4),
B(Star), R(4),
@@ -98,9 +98,9 @@ bytecodes: [
constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["Math"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["max"],
- CONSTANT_ELEMENTS_PAIR_TYPE,
- CONSTANT_ELEMENTS_PAIR_TYPE,
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
+ TUPLE2_TYPE,
+ TUPLE2_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
index c9c96e7a1f..fa0a905e6f 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallGlobal.golden
@@ -17,9 +17,9 @@ parameter count: 1
bytecode array length: 10
bytecodes: [
/* 27 E> */ B(StackCheck),
- /* 32 S> */ B(LdaGlobal), U8(0), U8(4),
+ /* 32 S> */ B(LdaGlobal), U8(0), U8(5),
B(Star), R(0),
- /* 39 E> */ B(CallUndefinedReceiver0), R(0), U8(2),
+ /* 39 E> */ B(CallUndefinedReceiver0), R(0), U8(3),
/* 44 S> */ B(Return),
]
constant pool: [
@@ -39,7 +39,7 @@ parameter count: 1
bytecode array length: 24
bytecodes: [
/* 34 E> */ B(StackCheck),
- /* 39 S> */ B(LdaGlobal), U8(0), U8(4),
+ /* 39 S> */ B(LdaGlobal), U8(0), U8(5),
B(Star), R(0),
B(LdaSmi), I8(1),
B(Star), R(1),
@@ -47,7 +47,7 @@ bytecodes: [
B(Star), R(2),
B(LdaSmi), I8(3),
B(Star), R(3),
- /* 46 E> */ B(CallUndefinedReceiver), R(0), R(1), U8(3), U8(2),
+ /* 46 E> */ B(CallUndefinedReceiver), R(0), R(1), U8(3), U8(3),
/* 58 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
index 33681df144..3f188e1703 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallLookupSlot.golden
@@ -22,9 +22,9 @@ bytecodes: [
B(Ldar), R(new_target),
B(StaCurrentContextSlot), U8(5),
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
+ /* 34 S> */ B(CreateClosure), U8(0), U8(3), U8(2),
/* 36 E> */ B(StaLookupSlotSloppy), U8(1),
- /* 52 S> */ B(LdaLookupGlobalSlot), U8(2), U8(5), U8(1),
+ /* 52 S> */ B(LdaLookupGlobalSlot), U8(2), U8(6), U8(1),
B(Star), R(1),
B(LdaConstant), U8(3),
B(Star), R(2),
@@ -39,10 +39,10 @@ bytecodes: [
B(Mov), R(closure), R(5),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(3), U8(6),
B(Star), R(1),
- /* 52 E> */ B(CallUndefinedReceiver1), R(1), R(2), U8(3),
- /* 62 S> */ B(LdaLookupGlobalSlot), U8(1), U8(9), U8(1),
+ /* 52 E> */ B(CallUndefinedReceiver1), R(1), R(2), U8(4),
+ /* 62 S> */ B(LdaLookupGlobalSlot), U8(1), U8(10), U8(1),
B(Star), R(1),
- /* 69 E> */ B(CallUndefinedReceiver0), R(1), U8(7),
+ /* 69 E> */ B(CallUndefinedReceiver0), R(1), U8(8),
/* 74 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
index bd3067686d..988efb4554 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallNew.golden
@@ -17,9 +17,9 @@ parameter count: 1
bytecode array length: 12
bytecodes: [
/* 45 E> */ B(StackCheck),
- /* 50 S> */ B(LdaGlobal), U8(0), U8(4),
+ /* 50 S> */ B(LdaGlobal), U8(0), U8(5),
B(Star), R(0),
- /* 57 E> */ B(Construct), R(0), R(0), U8(0), U8(2),
+ /* 57 E> */ B(Construct), R(0), R(0), U8(0), U8(3),
/* 68 S> */ B(Return),
]
constant pool: [
@@ -39,12 +39,12 @@ parameter count: 1
bytecode array length: 18
bytecodes: [
/* 58 E> */ B(StackCheck),
- /* 63 S> */ B(LdaGlobal), U8(0), U8(4),
+ /* 63 S> */ B(LdaGlobal), U8(0), U8(5),
B(Star), R(0),
B(LdaSmi), I8(3),
B(Star), R(1),
B(Ldar), R(0),
- /* 70 E> */ B(Construct), R(0), R(1), U8(1), U8(2),
+ /* 70 E> */ B(Construct), R(0), R(1), U8(1), U8(3),
/* 82 S> */ B(Return),
]
constant pool: [
@@ -69,7 +69,7 @@ parameter count: 1
bytecode array length: 26
bytecodes: [
/* 100 E> */ B(StackCheck),
- /* 105 S> */ B(LdaGlobal), U8(0), U8(4),
+ /* 105 S> */ B(LdaGlobal), U8(0), U8(5),
B(Star), R(0),
B(LdaSmi), I8(3),
B(Star), R(1),
@@ -78,7 +78,7 @@ bytecodes: [
B(LdaSmi), I8(5),
B(Star), R(3),
B(Ldar), R(0),
- /* 112 E> */ B(Construct), R(0), R(1), U8(3), U8(2),
+ /* 112 E> */ B(Construct), R(0), R(1), U8(3), U8(3),
/* 130 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
index 45f504ede1..3741fd6a23 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CallRuntime.golden
@@ -77,13 +77,13 @@ bytecodes: [
/* 10 E> */ B(StackCheck),
/* 15 S> */ B(LdaUndefined),
B(Star), R(0),
- B(CreateArrayLiteral), U8(0), U8(2), U8(9),
+ B(CreateArrayLiteral), U8(0), U8(3), U8(17),
B(Star), R(1),
B(CallJSRuntime), U8(%spread_iterable), R(0), U8(2),
/* 44 S> */ B(Return),
]
constant pool: [
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
index 6c57aaf3cd..49996b8fc3 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassAndSuperClass.golden
@@ -27,15 +27,15 @@ bytecodes: [
B(Mov), R(closure), R(0),
/* 99 E> */ B(StackCheck),
/* 104 S> */ B(LdaConstant), U8(0),
- /* 111 E> */ B(LdaKeyedProperty), R(closure), U8(4),
+ /* 111 E> */ B(LdaKeyedProperty), R(closure), U8(5),
B(Star), R(4),
B(LdaConstant), U8(1),
B(Star), R(5),
B(Mov), R(this), R(3),
B(CallRuntime), U16(Runtime::kLoadFromSuper), R(3), U8(3),
B(Star), R(1),
- /* 117 E> */ B(CallAnyReceiver), R(1), R(this), U8(1), U8(2),
- /* 126 E> */ B(AddSmi), I8(1), U8(8),
+ /* 117 E> */ B(CallAnyReceiver), R(1), R(this), U8(1), U8(3),
+ /* 126 E> */ B(AddSmi), I8(1), U8(9),
/* 131 S> */ B(Return),
]
constant pool: [
@@ -67,7 +67,7 @@ bytecodes: [
B(Mov), R(closure), R(0),
/* 125 E> */ B(StackCheck),
/* 130 S> */ B(LdaConstant), U8(0),
- /* 130 E> */ B(LdaKeyedProperty), R(closure), U8(2),
+ /* 130 E> */ B(LdaKeyedProperty), R(closure), U8(3),
B(Star), R(2),
B(LdaConstant), U8(1),
B(Star), R(3),
@@ -76,7 +76,7 @@ bytecodes: [
B(Mov), R(this), R(1),
/* 138 E> */ B(CallRuntime), U16(Runtime::kStoreToSuper_Strict), R(1), U8(4),
/* 143 S> */ B(LdaConstant), U8(0),
- /* 150 E> */ B(LdaKeyedProperty), R(closure), U8(4),
+ /* 150 E> */ B(LdaKeyedProperty), R(closure), U8(5),
B(Star), R(2),
B(LdaConstant), U8(1),
B(Star), R(3),
@@ -117,7 +117,7 @@ bytecodes: [
B(LdaSmi), I8(1),
B(Star), R(3),
B(Ldar), R(0),
- /* 118 E> */ B(Construct), R(2), R(3), U8(1), U8(2),
+ /* 118 E> */ B(Construct), R(2), R(3), U8(1), U8(3),
B(Star), R(2),
B(Ldar), R(this),
B(JumpIfNotHole), U8(4),
@@ -129,7 +129,7 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kThrowSuperNotCalled), R(0), U8(0),
B(Star), R(2),
B(LdaSmi), I8(2),
- /* 136 E> */ B(StaNamedPropertyStrict), R(2), U8(0), U8(4),
+ /* 136 E> */ B(StaNamedPropertyStrict), R(2), U8(0), U8(5),
B(Ldar), R(this),
B(JumpIfNotHole), U8(7),
B(CallRuntime), U16(Runtime::kThrowSuperNotCalled), R(0), U8(0),
@@ -165,7 +165,7 @@ bytecodes: [
/* 117 S> */ B(Ldar), R(1),
B(GetSuperConstructor), R(2),
B(Ldar), R(0),
- /* 117 E> */ B(Construct), R(2), R(0), U8(0), U8(2),
+ /* 117 E> */ B(Construct), R(2), R(0), U8(0), U8(3),
B(Star), R(2),
B(Ldar), R(this),
B(JumpIfNotHole), U8(4),
@@ -177,7 +177,7 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kThrowSuperNotCalled), R(0), U8(0),
B(Star), R(2),
B(LdaSmi), I8(2),
- /* 134 E> */ B(StaNamedPropertyStrict), R(2), U8(0), U8(4),
+ /* 134 E> */ B(StaNamedPropertyStrict), R(2), U8(0), U8(5),
B(Ldar), R(this),
B(JumpIfNotHole), U8(7),
B(CallRuntime), U16(Runtime::kThrowSuperNotCalled), R(0), U8(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
index b95d6cb4df..ba739ca212 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ClassDeclarations.golden
@@ -12,39 +12,34 @@ snippet: "
speak() { console.log(this.name + ' is speaking.'); }
}
"
-frame size: 9
+frame size: 8
parameter count: 1
-bytecode array length: 75
+bytecode array length: 67
bytecodes: [
- B(LdaTheHole),
- B(Star), R(2),
/* 30 E> */ B(StackCheck),
+ B(CreateClosure), U8(0), U8(3), U8(2),
+ B(Star), R(2),
B(LdaTheHole),
- B(Star), R(0),
- /* 34 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
B(Star), R(3),
- B(LdaTheHole),
- B(Star), R(4),
B(LdaSmi), I8(34),
- B(Star), R(6),
+ B(Star), R(5),
B(Wide), B(LdaSmi), I16(148),
- B(Star), R(7),
- B(Mov), R(3), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
- B(Star), R(4),
+ B(Star), R(6),
+ B(Mov), R(2), R(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(Star), R(3),
B(LdaConstant), U8(1),
+ B(Star), R(5),
+ B(CreateClosure), U8(2), U8(4), U8(2),
B(Star), R(6),
- B(CreateClosure), U8(2), U8(3), U8(2),
- B(Star), R(7),
B(LdaSmi), I8(2),
- B(Star), R(8),
- B(Ldar), R(7),
- B(StaDataPropertyInLiteral), R(4), R(6), U8(1), U8(4),
- B(CallRuntime), U16(Runtime::kInstallClassNameAccessor), R(3), U8(1),
- B(CallRuntime), U16(Runtime::kToFastProperties), R(3), U8(1),
+ B(Star), R(7),
+ B(Ldar), R(6),
+ B(StaDataPropertyInLiteral), R(3), R(5), U8(1), U8(5),
+ B(CallRuntime), U16(Runtime::kInstallClassNameAccessor), R(2), U8(1),
+ B(CallRuntime), U16(Runtime::kToFastProperties), R(2), U8(1),
B(Star), R(0),
B(Star), R(1),
- B(Star), R(2),
B(LdaUndefined),
/* 149 S> */ B(Return),
]
@@ -63,39 +58,34 @@ snippet: "
speak() { console.log(this.name + ' is speaking.'); }
}
"
-frame size: 9
+frame size: 8
parameter count: 1
-bytecode array length: 75
+bytecode array length: 67
bytecodes: [
- B(LdaTheHole),
- B(Star), R(2),
/* 30 E> */ B(StackCheck),
+ B(CreateClosure), U8(0), U8(3), U8(2),
+ B(Star), R(2),
B(LdaTheHole),
- B(Star), R(0),
- /* 34 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
B(Star), R(3),
- B(LdaTheHole),
- B(Star), R(4),
B(LdaSmi), I8(34),
- B(Star), R(6),
+ B(Star), R(5),
B(Wide), B(LdaSmi), I16(148),
- B(Star), R(7),
- B(Mov), R(3), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
- B(Star), R(4),
+ B(Star), R(6),
+ B(Mov), R(2), R(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(Star), R(3),
B(LdaConstant), U8(1),
+ B(Star), R(5),
+ B(CreateClosure), U8(2), U8(4), U8(2),
B(Star), R(6),
- B(CreateClosure), U8(2), U8(3), U8(2),
- B(Star), R(7),
B(LdaSmi), I8(2),
- B(Star), R(8),
- B(Ldar), R(7),
- B(StaDataPropertyInLiteral), R(4), R(6), U8(1), U8(4),
- B(CallRuntime), U16(Runtime::kInstallClassNameAccessor), R(3), U8(1),
- B(CallRuntime), U16(Runtime::kToFastProperties), R(3), U8(1),
+ B(Star), R(7),
+ B(Ldar), R(6),
+ B(StaDataPropertyInLiteral), R(3), R(5), U8(1), U8(5),
+ B(CallRuntime), U16(Runtime::kInstallClassNameAccessor), R(2), U8(1),
+ B(CallRuntime), U16(Runtime::kToFastProperties), R(2), U8(1),
B(Star), R(0),
B(Star), R(1),
- B(Star), R(2),
B(LdaUndefined),
/* 149 S> */ B(Return),
]
@@ -116,55 +106,49 @@ snippet: "
static [n1]() { return n1; }
}
"
-frame size: 10
+frame size: 9
parameter count: 1
-bytecode array length: 116
+bytecode array length: 106
bytecodes: [
B(CreateFunctionContext), U8(2),
- B(PushContext), R(3),
- B(LdaTheHole),
- B(Star), R(2),
+ B(PushContext), R(2),
/* 30 E> */ B(StackCheck),
/* 43 S> */ B(LdaConstant), U8(0),
/* 43 E> */ B(StaCurrentContextSlot), U8(4),
/* 57 S> */ B(LdaConstant), U8(1),
/* 57 E> */ B(StaCurrentContextSlot), U8(5),
+ B(CreateClosure), U8(2), U8(3), U8(2),
+ B(Star), R(3),
B(LdaTheHole),
- B(Star), R(0),
- /* 62 S> */ B(CreateClosure), U8(2), U8(2), U8(2),
B(Star), R(4),
- B(LdaTheHole),
- B(Star), R(5),
B(LdaSmi), I8(62),
- B(Star), R(7),
+ B(Star), R(6),
B(Wide), B(LdaSmi), I16(128),
- B(Star), R(8),
- B(Mov), R(4), R(6),
- B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(4),
- B(Star), R(5),
+ B(Star), R(7),
+ B(Mov), R(3), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
+ B(Star), R(4),
B(LdaImmutableCurrentContextSlot), U8(4),
- /* 75 E> */ B(ToName), R(7),
- B(CreateClosure), U8(3), U8(3), U8(2),
- B(Star), R(8),
+ /* 75 E> */ B(ToName), R(6),
+ B(CreateClosure), U8(3), U8(4), U8(2),
+ B(Star), R(7),
B(LdaSmi), I8(2),
- B(Star), R(9),
- B(Ldar), R(8),
- B(StaDataPropertyInLiteral), R(5), R(7), U8(3), U8(5),
+ B(Star), R(8),
+ B(Ldar), R(7),
+ B(StaDataPropertyInLiteral), R(4), R(6), U8(3), U8(6),
B(LdaImmutableCurrentContextSlot), U8(5),
- /* 106 E> */ B(ToName), R(7),
+ /* 106 E> */ B(ToName), R(6),
B(LdaConstant), U8(4),
- B(TestEqualStrictNoFeedback), R(7),
- B(Mov), R(4), R(6),
+ B(TestEqualStrictNoFeedback), R(6),
+ B(Mov), R(3), R(5),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowStaticPrototypeError), R(0), U8(0),
- B(CreateClosure), U8(5), U8(4), U8(2),
- B(Star), R(8),
- B(StaDataPropertyInLiteral), R(6), R(7), U8(3), U8(7),
- B(CallRuntime), U16(Runtime::kInstallClassNameAccessorWithCheck), R(4), U8(1),
- B(CallRuntime), U16(Runtime::kToFastProperties), R(4), U8(1),
+ B(CreateClosure), U8(5), U8(5), U8(2),
+ B(StaDataPropertyInLiteral), R(5), R(6), U8(3), U8(8),
+ B(CallRuntime), U16(Runtime::kInstallClassNameAccessorWithCheck), R(3), U8(1),
+ B(CallRuntime), U16(Runtime::kToFastProperties), R(3), U8(1),
B(Star), R(0),
B(Star), R(1),
- B(Star), R(2),
B(LdaUndefined),
/* 129 S> */ B(Return),
]
@@ -185,37 +169,32 @@ snippet: "
class C { constructor() { count++; }}
return new C();
"
-frame size: 9
+frame size: 8
parameter count: 1
-bytecode array length: 64
+bytecode array length: 56
bytecodes: [
B(CreateFunctionContext), U8(1),
- B(PushContext), R(3),
- B(LdaTheHole),
- B(Star), R(2),
+ B(PushContext), R(2),
/* 30 E> */ B(StackCheck),
/* 46 S> */ B(LdaZero),
/* 46 E> */ B(StaCurrentContextSlot), U8(4),
+ B(CreateClosure), U8(0), U8(3), U8(2),
+ B(Star), R(3),
B(LdaTheHole),
- B(Star), R(0),
- /* 49 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
B(Star), R(4),
- B(LdaTheHole),
- B(Star), R(5),
B(LdaSmi), I8(49),
- B(Star), R(7),
+ B(Star), R(6),
B(LdaSmi), I8(86),
- B(Star), R(8),
- B(Mov), R(4), R(6),
- B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(4),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kInstallClassNameAccessor), R(4), U8(1),
- B(CallRuntime), U16(Runtime::kToFastProperties), R(4), U8(1),
+ B(Star), R(7),
+ B(Mov), R(3), R(5),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
+ B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kInstallClassNameAccessor), R(3), U8(1),
+ B(CallRuntime), U16(Runtime::kToFastProperties), R(3), U8(1),
B(Star), R(0),
B(Star), R(1),
- B(Star), R(2),
/* 87 S> */ B(Nop),
- /* 94 E> */ B(Construct), R(2), R(0), U8(0), U8(3),
+ /* 94 E> */ B(Construct), R(1), R(0), U8(0), U8(4),
/* 103 S> */ B(Return),
]
constant pool: [
@@ -229,51 +208,45 @@ snippet: "
(class {})
class E { static name () {}}
"
-frame size: 10
+frame size: 8
parameter count: 1
-bytecode array length: 102
+bytecode array length: 92
bytecodes: [
- B(LdaTheHole),
- B(Star), R(3),
/* 30 E> */ B(StackCheck),
- /* 35 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
- B(Star), R(4),
+ /* 34 S> */ B(CreateClosure), U8(0), U8(3), U8(2),
+ B(Star), R(2),
B(LdaTheHole),
- B(Star), R(5),
+ B(Star), R(3),
B(LdaSmi), I8(35),
- B(Star), R(7),
- B(LdaSmi), I8(43),
- B(Star), R(8),
- B(Mov), R(4), R(6),
- B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(4),
B(Star), R(5),
- B(CallRuntime), U16(Runtime::kToFastProperties), R(4), U8(1),
- B(Star), R(1),
- B(LdaTheHole),
- B(Star), R(0),
- /* 45 S> */ B(CreateClosure), U8(1), U8(3), U8(2),
- B(Star), R(4),
+ B(LdaSmi), I8(43),
+ B(Star), R(6),
+ B(Mov), R(2), R(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kToFastProperties), R(2), U8(1),
+ B(CreateClosure), U8(1), U8(4), U8(2),
+ B(Star), R(2),
B(LdaTheHole),
- B(Star), R(5),
+ B(Star), R(3),
B(LdaSmi), I8(45),
- B(Star), R(7),
- B(LdaSmi), I8(73),
- B(Star), R(8),
- B(Mov), R(4), R(6),
- B(CallRuntime), U16(Runtime::kDefineClass), R(5), U8(4),
B(Star), R(5),
+ B(LdaSmi), I8(73),
+ B(Star), R(6),
+ B(Mov), R(2), R(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(Star), R(3),
B(LdaConstant), U8(2),
- B(Star), R(7),
- B(CreateClosure), U8(3), U8(4), U8(2),
- B(Star), R(8),
+ B(Star), R(5),
+ B(CreateClosure), U8(3), U8(5), U8(2),
+ B(Star), R(6),
B(LdaSmi), I8(2),
- B(Star), R(9),
- B(Ldar), R(8),
- B(StaDataPropertyInLiteral), R(6), R(7), U8(1), U8(5),
- B(CallRuntime), U16(Runtime::kToFastProperties), R(4), U8(1),
+ B(Star), R(7),
+ B(Ldar), R(6),
+ B(StaDataPropertyInLiteral), R(4), R(5), U8(1), U8(6),
+ B(CallRuntime), U16(Runtime::kToFastProperties), R(2), U8(1),
B(Star), R(0),
- B(Star), R(2),
- B(Star), R(3),
+ B(Star), R(1),
B(LdaUndefined),
/* 74 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden
index 466820c5ab..82c42d0290 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompareNil.golden
@@ -276,7 +276,7 @@ bytecodes: [
B(JumpIfUndefined), U8(12),
/* 64 E> */ B(StackCheck),
/* 92 S> */ B(Ldar), R(1),
- B(Inc), U8(5),
+ B(Inc), U8(6),
B(Star), R(1),
B(JumpLoop), U8(11), I8(0),
B(LdaUndefined),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
index 485dad5e10..3546d96f5c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CompoundExpressions.golden
@@ -16,7 +16,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(AddSmi), I8(2), U8(2),
+ /* 45 S> */ B(AddSmi), I8(2), U8(3),
B(Star), R(0),
B(LdaUndefined),
/* 53 S> */ B(Return),
@@ -37,7 +37,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(DivSmi), I8(2), U8(2),
+ /* 45 S> */ B(DivSmi), I8(2), U8(3),
B(Star), R(0),
B(LdaUndefined),
/* 53 S> */ B(Return),
@@ -56,11 +56,11 @@ parameter count: 1
bytecode array length: 22
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(1),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(1),
B(Mov), R(1), R(0),
- /* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(3),
- B(MulSmi), I8(2), U8(5),
- /* 61 E> */ B(StaNamedPropertySloppy), R(0), U8(1), U8(6),
+ /* 54 S> */ B(LdaNamedProperty), R(1), U8(1), U8(4),
+ B(MulSmi), I8(2), U8(6),
+ /* 61 E> */ B(StaNamedPropertySloppy), R(1), U8(1), U8(7),
B(LdaUndefined),
/* 67 S> */ B(Return),
]
@@ -80,13 +80,13 @@ parameter count: 1
bytecode array length: 25
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(1),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(1),
B(Mov), R(1), R(0),
/* 52 S> */ B(LdaSmi), I8(1),
B(Star), R(2),
- B(LdaKeyedProperty), R(0), U8(3),
- B(BitwiseXorSmi), I8(2), U8(5),
- /* 57 E> */ B(StaKeyedPropertySloppy), R(0), R(2), U8(6),
+ B(LdaKeyedProperty), R(1), U8(4),
+ B(BitwiseXorSmi), I8(2), U8(6),
+ /* 57 E> */ B(StaKeyedPropertySloppy), R(1), R(2), U8(7),
B(LdaUndefined),
/* 63 S> */ B(Return),
]
@@ -109,9 +109,9 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
/* 42 E> */ B(StaCurrentContextSlot), U8(4),
- /* 45 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
+ /* 45 S> */ B(CreateClosure), U8(0), U8(3), U8(2),
/* 75 S> */ B(LdaCurrentContextSlot), U8(4),
- B(BitwiseOrSmi), I8(24), U8(3),
+ B(BitwiseOrSmi), I8(24), U8(4),
/* 77 E> */ B(StaCurrentContextSlot), U8(4),
B(LdaUndefined),
/* 84 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden
index db44e2bd87..42af60e152 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Conditional.golden
@@ -51,7 +51,7 @@ bytecodes: [
/* 34 S> */ B(LdaZero),
B(Star), R(0),
B(LdaSmi), I8(1),
- /* 43 E> */ B(TestLessThan), R(0), U8(2),
+ /* 43 E> */ B(TestLessThan), R(0), U8(3),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(2),
B(Jump), U8(4),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden
index 6ba2ad4d94..77f8da1fa5 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariable.golden
@@ -11,10 +11,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 10
+bytecode array length: 7
bytecodes: [
- B(LdaTheHole),
- B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
@@ -32,10 +30,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 10
+bytecode array length: 7
bytecodes: [
- B(LdaTheHole),
- B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
@@ -82,10 +78,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 17
+bytecode array length: 14
bytecodes: [
- B(LdaTheHole),
- B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
index c92627b183..68562f6539 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ConstVariableContextSlot.golden
@@ -17,7 +17,7 @@ bytecodes: [
B(PushContext), R(1),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(CreateClosure), U8(0), U8(2), U8(2),
+ B(CreateClosure), U8(0), U8(3), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), I8(10),
@@ -43,7 +43,7 @@ bytecodes: [
B(PushContext), R(1),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(CreateClosure), U8(0), U8(2), U8(2),
+ B(CreateClosure), U8(0), U8(3), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), I8(10),
@@ -69,7 +69,7 @@ bytecodes: [
B(PushContext), R(1),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(CreateClosure), U8(0), U8(2), U8(2),
+ B(CreateClosure), U8(0), U8(3), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 47 S> */ B(LdaSmi), I8(20),
@@ -103,7 +103,7 @@ bytecodes: [
B(PushContext), R(1),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(CreateClosure), U8(0), U8(2), U8(2),
+ B(CreateClosure), U8(0), U8(3), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), I8(10),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden
index 63e48fd10a..452e470a46 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextParameters.golden
@@ -20,7 +20,7 @@ bytecodes: [
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(4),
/* 10 E> */ B(StackCheck),
- /* 19 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
+ /* 19 S> */ B(CreateClosure), U8(0), U8(3), U8(2),
/* 52 S> */ B(Return),
]
constant pool: [
@@ -43,7 +43,7 @@ bytecodes: [
B(Ldar), R(arg0),
B(StaCurrentContextSlot), U8(4),
/* 10 E> */ B(StackCheck),
- /* 27 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
+ /* 27 S> */ B(CreateClosure), U8(0), U8(3), U8(2),
B(Star), R(0),
/* 53 S> */ B(LdaCurrentContextSlot), U8(4),
/* 66 S> */ B(Return),
@@ -70,7 +70,7 @@ bytecodes: [
B(Ldar), R(arg2),
B(StaCurrentContextSlot), U8(4),
/* 10 E> */ B(StackCheck),
- /* 29 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
+ /* 29 S> */ B(CreateClosure), U8(0), U8(3), U8(2),
/* 61 S> */ B(Return),
]
constant pool: [
@@ -93,7 +93,7 @@ bytecodes: [
/* 10 E> */ B(StackCheck),
/* 26 S> */ B(Ldar), R(this),
/* 26 E> */ B(StaCurrentContextSlot), U8(4),
- /* 32 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
+ /* 32 S> */ B(CreateClosure), U8(0), U8(3), U8(2),
/* 65 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
index c763cca3e9..f674459590 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ContextVariables.golden
@@ -16,7 +16,7 @@ bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(0),
/* 30 E> */ B(StackCheck),
- /* 41 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
+ /* 41 S> */ B(CreateClosure), U8(0), U8(3), U8(2),
/* 71 S> */ B(Return),
]
constant pool: [
@@ -38,7 +38,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
/* 42 E> */ B(StaCurrentContextSlot), U8(4),
- /* 45 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
+ /* 45 S> */ B(CreateClosure), U8(0), U8(3), U8(2),
/* 75 S> */ B(Return),
]
constant pool: [
@@ -62,7 +62,7 @@ bytecodes: [
/* 42 E> */ B(StaCurrentContextSlot), U8(4),
/* 53 S> */ B(LdaSmi), I8(2),
/* 53 E> */ B(StaCurrentContextSlot), U8(5),
- /* 56 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
+ /* 56 S> */ B(CreateClosure), U8(0), U8(3), U8(2),
/* 92 S> */ B(Return),
]
constant pool: [
@@ -82,9 +82,9 @@ bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(0),
/* 30 E> */ B(StackCheck),
- /* 41 S> */ B(CreateClosure), U8(0), U8(4), U8(2),
+ /* 41 S> */ B(CreateClosure), U8(0), U8(5), U8(2),
B(Star), R(1),
- /* 64 E> */ B(CallUndefinedReceiver0), R(1), U8(2),
+ /* 64 E> */ B(CallUndefinedReceiver0), R(1), U8(3),
/* 68 S> */ B(LdaCurrentContextSlot), U8(4),
/* 78 S> */ B(Return),
]
@@ -118,7 +118,7 @@ bytecodes: [
B(StaCurrentContextSlot), U8(4),
/* 69 S> */ B(LdaSmi), I8(2),
/* 69 E> */ B(StaCurrentContextSlot), U8(4),
- /* 72 S> */ B(CreateClosure), U8(1), U8(2), U8(2),
+ /* 72 S> */ B(CreateClosure), U8(1), U8(3), U8(2),
B(PopContext), R(0),
/* 104 S> */ B(Return),
]
@@ -899,9 +899,9 @@ bytecodes: [
/* 3421 E> */ B(StaCurrentContextSlot), U8(254),
/* 3435 S> */ B(LdaZero),
/* 3435 E> */ B(StaCurrentContextSlot), U8(255),
- /* 3438 S> */ B(LdaGlobal), U8(0), U8(4),
+ /* 3438 S> */ B(LdaGlobal), U8(0), U8(5),
B(Star), R(1),
- /* 3438 E> */ B(CallUndefinedReceiver0), R(1), U8(2),
+ /* 3438 E> */ B(CallUndefinedReceiver0), R(1), U8(3),
/* 3454 S> */ B(LdaSmi), I8(100),
/* 3454 E> */ B(Wide), B(StaCurrentContextSlot), U16(256),
/* 3459 S> */ B(Wide), B(LdaCurrentContextSlot), U16(256),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
index 5a2146243c..6173d3fc36 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CountOperators.golden
@@ -16,7 +16,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(Inc), U8(2),
+ /* 45 S> */ B(Inc), U8(3),
B(Star), R(0),
/* 57 S> */ B(Return),
]
@@ -36,9 +36,9 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(ToNumber), R(1), U8(2),
+ /* 45 S> */ B(ToNumber), R(1), U8(3),
B(Ldar), R(1),
- B(Inc), U8(2),
+ B(Inc), U8(3),
B(Star), R(0),
B(Ldar), R(1),
/* 57 S> */ B(Return),
@@ -59,7 +59,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(Dec), U8(2),
+ /* 45 S> */ B(Dec), U8(3),
B(Star), R(0),
/* 57 S> */ B(Return),
]
@@ -79,9 +79,9 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(ToNumber), R(1), U8(2),
+ /* 45 S> */ B(ToNumber), R(1), U8(3),
B(Ldar), R(1),
- B(Dec), U8(2),
+ B(Dec), U8(3),
B(Star), R(0),
B(Ldar), R(1),
/* 57 S> */ B(Return),
@@ -100,13 +100,13 @@ parameter count: 1
bytecode array length: 27
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(1),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(1),
B(Mov), R(1), R(0),
- /* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(3),
- B(ToNumber), R(2), U8(7),
+ /* 54 S> */ B(LdaNamedProperty), R(1), U8(1), U8(4),
+ B(ToNumber), R(2), U8(8),
B(Ldar), R(2),
- B(Inc), U8(7),
- /* 66 E> */ B(StaNamedPropertySloppy), R(0), U8(1), U8(5),
+ B(Inc), U8(8),
+ /* 66 E> */ B(StaNamedPropertySloppy), R(1), U8(1), U8(6),
B(Ldar), R(2),
/* 70 S> */ B(Return),
]
@@ -126,11 +126,11 @@ parameter count: 1
bytecode array length: 20
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(1),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(1),
B(Mov), R(1), R(0),
- /* 54 S> */ B(LdaNamedProperty), R(0), U8(1), U8(3),
- B(Dec), U8(7),
- /* 65 E> */ B(StaNamedPropertySloppy), R(0), U8(1), U8(5),
+ /* 54 S> */ B(LdaNamedProperty), R(1), U8(1), U8(4),
+ B(Dec), U8(8),
+ /* 65 E> */ B(StaNamedPropertySloppy), R(1), U8(1), U8(6),
/* 70 S> */ B(Return),
]
constant pool: [
@@ -151,14 +151,14 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 45 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
- /* 60 S> */ B(CreateObjectLiteral), U8(1), U8(2), U8(1), R(2),
+ /* 60 S> */ B(CreateObjectLiteral), U8(1), U8(3), U8(1), R(2),
B(Mov), R(2), R(1),
/* 72 S> */ B(Ldar), R(0),
- /* 81 E> */ B(LdaKeyedProperty), R(1), U8(3),
- B(ToNumber), R(4), U8(7),
+ /* 81 E> */ B(LdaKeyedProperty), R(2), U8(4),
+ B(ToNumber), R(4), U8(8),
B(Ldar), R(4),
- B(Dec), U8(7),
- /* 86 E> */ B(StaKeyedPropertySloppy), R(1), R(0), U8(5),
+ B(Dec), U8(8),
+ /* 86 E> */ B(StaKeyedPropertySloppy), R(2), R(0), U8(6),
B(Ldar), R(4),
/* 90 S> */ B(Return),
]
@@ -180,12 +180,12 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 45 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
- /* 60 S> */ B(CreateObjectLiteral), U8(1), U8(2), U8(1), R(2),
+ /* 60 S> */ B(CreateObjectLiteral), U8(1), U8(3), U8(1), R(2),
B(Mov), R(2), R(1),
/* 72 S> */ B(Ldar), R(0),
- /* 83 E> */ B(LdaKeyedProperty), R(1), U8(3),
- B(Inc), U8(7),
- /* 87 E> */ B(StaKeyedPropertySloppy), R(1), R(0), U8(5),
+ /* 83 E> */ B(LdaKeyedProperty), R(2), U8(4),
+ B(Inc), U8(8),
+ /* 87 E> */ B(StaKeyedPropertySloppy), R(2), R(0), U8(6),
/* 90 S> */ B(Return),
]
constant pool: [
@@ -208,10 +208,10 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
/* 42 E> */ B(StaCurrentContextSlot), U8(4),
- /* 53 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
+ /* 53 S> */ B(CreateClosure), U8(0), U8(3), U8(2),
B(Star), R(0),
/* 78 S> */ B(LdaCurrentContextSlot), U8(4),
- B(Inc), U8(3),
+ B(Inc), U8(4),
/* 87 E> */ B(StaCurrentContextSlot), U8(4),
/* 90 S> */ B(Return),
]
@@ -234,12 +234,12 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
/* 42 E> */ B(StaCurrentContextSlot), U8(4),
- /* 53 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
+ /* 53 S> */ B(CreateClosure), U8(0), U8(3), U8(2),
B(Star), R(0),
/* 78 S> */ B(LdaCurrentContextSlot), U8(4),
- B(ToNumber), R(2), U8(3),
+ B(ToNumber), R(2), U8(4),
B(Ldar), R(2),
- B(Dec), U8(3),
+ B(Dec), U8(4),
/* 86 E> */ B(StaCurrentContextSlot), U8(4),
B(Ldar), R(2),
/* 90 S> */ B(Return),
@@ -261,19 +261,19 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 44 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 55 S> */ B(CreateArrayLiteral), U8(0), U8(2), U8(9),
+ /* 55 S> */ B(CreateArrayLiteral), U8(0), U8(3), U8(17),
B(Star), R(1),
/* 63 S> */ B(Ldar), R(0),
- B(ToNumber), R(3), U8(3),
+ B(ToNumber), R(3), U8(4),
B(Ldar), R(3),
- B(Inc), U8(3),
+ B(Inc), U8(4),
B(Star), R(0),
B(LdaSmi), I8(2),
- /* 79 E> */ B(StaKeyedPropertySloppy), R(1), R(3), U8(4),
+ /* 79 E> */ B(StaKeyedPropertySloppy), R(1), R(3), U8(5),
/* 84 S> */ B(Return),
]
constant pool: [
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
index cc073cfd66..53b25e5e84 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateArguments.golden
@@ -39,7 +39,7 @@ bytecodes: [
B(Star), R(0),
/* 10 E> */ B(StackCheck),
/* 15 S> */ B(LdaZero),
- /* 31 E> */ B(LdaKeyedProperty), R(0), U8(2),
+ /* 31 E> */ B(LdaKeyedProperty), R(0), U8(3),
/* 36 S> */ B(Return),
]
constant pool: [
@@ -84,7 +84,7 @@ bytecodes: [
B(Star), R(0),
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(LdaZero),
- /* 32 E> */ B(LdaKeyedProperty), R(0), U8(2),
+ /* 32 E> */ B(LdaKeyedProperty), R(0), U8(3),
/* 37 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
index fa824bb49c..6e010af912 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/CreateRestParameter.golden
@@ -13,15 +13,13 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 13
+bytecode array length: 8
bytecodes: [
B(CreateRestParameter),
B(Star), R(0),
- B(LdaTheHole),
- B(Star), R(1),
/* 10 E> */ B(StackCheck),
- B(Mov), R(0), R(1),
- /* 26 S> */ B(Ldar), R(1),
+ B(Star), R(1),
+ /* 26 S> */ B(Nop),
/* 43 S> */ B(Return),
]
constant pool: [
@@ -36,14 +34,10 @@ snippet: "
"
frame size: 3
parameter count: 2
-bytecode array length: 19
+bytecode array length: 13
bytecodes: [
B(CreateRestParameter),
B(Star), R(0),
- B(LdaTheHole),
- B(Star), R(1),
- B(LdaTheHole),
- B(Star), R(2),
/* 10 E> */ B(StackCheck),
B(Mov), R(arg0), R(1),
B(Mov), R(0), R(2),
@@ -62,19 +56,15 @@ snippet: "
"
frame size: 3
parameter count: 2
-bytecode array length: 21
+bytecode array length: 15
bytecodes: [
B(CreateRestParameter),
B(Star), R(0),
- B(LdaTheHole),
- B(Star), R(1),
- B(LdaTheHole),
- B(Star), R(2),
/* 10 E> */ B(StackCheck),
B(Mov), R(arg0), R(1),
B(Mov), R(0), R(2),
/* 29 S> */ B(LdaZero),
- /* 44 E> */ B(LdaKeyedProperty), R(2), U8(2),
+ /* 44 E> */ B(LdaKeyedProperty), R(2), U8(3),
/* 49 S> */ B(Return),
]
constant pool: [
@@ -89,25 +79,21 @@ snippet: "
"
frame size: 5
parameter count: 2
-bytecode array length: 33
+bytecode array length: 27
bytecodes: [
B(CreateUnmappedArguments),
B(Star), R(3),
B(CreateRestParameter),
B(Star), R(0),
- B(LdaTheHole),
- B(Star), R(1),
- B(LdaTheHole),
- B(Star), R(2),
/* 10 E> */ B(StackCheck),
B(Mov), R(arg0), R(1),
B(Mov), R(0), R(2),
/* 29 S> */ B(LdaZero),
- /* 44 E> */ B(LdaKeyedProperty), R(2), U8(2),
+ /* 44 E> */ B(LdaKeyedProperty), R(2), U8(3),
B(Star), R(4),
B(LdaZero),
- /* 59 E> */ B(LdaKeyedProperty), R(3), U8(4),
- /* 48 E> */ B(Add), R(4), U8(6),
+ /* 59 E> */ B(LdaKeyedProperty), R(3), U8(5),
+ /* 48 E> */ B(Add), R(4), U8(7),
/* 64 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
index bf7a371605..dafe4f64aa 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
@@ -86,7 +86,7 @@ bytecodes: [
B(Star), R(3),
B(CallRuntime), U16(Runtime::kInitializeVarGlobal), R(1), U8(3),
/* 11 S> */ B(LdaSmi), I8(2),
- /* 12 E> */ B(StaGlobalSloppy), U8(1), U8(4),
+ /* 12 E> */ B(StaGlobalSloppy), U8(1), U8(5),
B(Star), R(0),
/* 15 S> */ B(Return),
]
@@ -113,9 +113,9 @@ bytecodes: [
B(Mov), R(closure), R(3),
B(CallRuntime), U16(Runtime::kDeclareGlobalsForInterpreter), R(1), U8(3),
/* 0 E> */ B(StackCheck),
- /* 16 S> */ B(LdaGlobal), U8(1), U8(2),
+ /* 16 S> */ B(LdaGlobal), U8(1), U8(3),
B(Star), R(1),
- /* 16 E> */ B(CallUndefinedReceiver0), R(1), U8(5),
+ /* 16 E> */ B(CallUndefinedReceiver0), R(1), U8(6),
B(Star), R(0),
/* 20 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
index 08b3f47e13..7519af381c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Delete.golden
@@ -14,10 +14,10 @@ parameter count: 1
bytecode array length: 14
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(1),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(1),
B(Mov), R(1), R(0),
/* 56 S> */ B(LdaConstant), U8(1),
- B(DeletePropertySloppy), R(0),
+ B(DeletePropertySloppy), R(1),
/* 75 S> */ B(Return),
]
constant pool: [
@@ -36,10 +36,10 @@ parameter count: 1
bytecode array length: 14
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 56 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(1),
+ /* 56 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(1),
B(Mov), R(1), R(0),
/* 70 S> */ B(LdaConstant), U8(1),
- B(DeletePropertyStrict), R(0),
+ B(DeletePropertyStrict), R(1),
/* 89 S> */ B(Return),
]
constant pool: [
@@ -58,10 +58,10 @@ parameter count: 1
bytecode array length: 14
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(1),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(1),
B(Mov), R(1), R(0),
/* 56 S> */ B(LdaSmi), I8(2),
- B(DeletePropertySloppy), R(0),
+ B(DeletePropertySloppy), R(1),
/* 76 S> */ B(Return),
]
constant pool: [
@@ -103,10 +103,10 @@ bytecodes: [
B(CreateFunctionContext), U8(1),
B(PushContext), R(0),
/* 30 E> */ B(StackCheck),
- /* 56 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(1),
+ /* 56 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(1),
B(Ldar), R(1),
/* 56 E> */ B(StaCurrentContextSlot), U8(4),
- /* 64 S> */ B(CreateClosure), U8(1), U8(3), U8(2),
+ /* 64 S> */ B(CreateClosure), U8(1), U8(4), U8(2),
/* 93 S> */ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(1),
B(LdaSmi), I8(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden
index 412c5ccbe6..331d7eb488 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DoExpression.golden
@@ -58,7 +58,7 @@ bytecodes: [
/* 34 E> */ B(StackCheck),
/* 56 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
- /* 69 S> */ B(Inc), U8(2),
+ /* 69 S> */ B(Inc), U8(3),
B(Star), R(0),
B(Star), R(1),
/* 74 S> */ B(Jump), U8(2),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
index 4ef1c7654c..d77537933c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Eval.golden
@@ -22,7 +22,7 @@ bytecodes: [
B(Ldar), R(new_target),
B(StaCurrentContextSlot), U8(5),
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(LdaLookupGlobalSlot), U8(0), U8(4), U8(1),
+ /* 34 S> */ B(LdaLookupGlobalSlot), U8(0), U8(5), U8(1),
B(Star), R(1),
B(LdaConstant), U8(1),
B(Star), R(2),
@@ -37,7 +37,7 @@ bytecodes: [
B(Mov), R(closure), R(5),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(3), U8(6),
B(Star), R(1),
- /* 41 E> */ B(CallUndefinedReceiver1), R(1), R(2), U8(2),
+ /* 41 E> */ B(CallUndefinedReceiver1), R(1), R(2), U8(3),
/* 53 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
index 895c6ff7f7..0ebdd7ea95 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForAwaitOf.golden
@@ -14,451 +14,412 @@ snippet: "
}
f();
"
-frame size: 19
+frame size: 17
parameter count: 1
-bytecode array length: 1027
+bytecode array length: 946
bytecodes: [
B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(39),
- B(CallRuntime), U16(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
- B(PushContext), R(4),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
B(ResumeGenerator), R(new_target),
- B(Star), R(3),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(132),
- B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrueConstant), U8(12),
- B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrueConstant), U8(14),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
B(LdaSmi), I8(79),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kAbort), R(5), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
- B(Star), R(3),
- B(CreateFunctionContext), U8(13),
- B(PushContext), R(0),
- B(Ldar), R(this),
+ B(Star), R(0),
+ B(CreateFunctionContext), U8(12),
+ B(PushContext), R(2),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(StaCurrentContextSlot), U8(4),
/* 16 E> */ B(StackCheck),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(6),
- B(Mov), R(closure), R(5),
- B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(5), U8(2),
- B(StaCurrentContextSlot), U8(5),
B(LdaUndefined),
- B(Star), R(5),
- B(CallJSRuntime), U8(%async_function_promise_create), R(5), U8(1),
- B(StaCurrentContextSlot), U8(11),
- B(Mov), R(context), R(7),
- B(Mov), R(context), R(8),
+ B(Star), R(3),
+ B(CallJSRuntime), U8(%async_function_promise_create), R(3), U8(1),
+ B(StaCurrentContextSlot), U8(10),
+ B(Mov), R(context), R(5),
+ B(Mov), R(context), R(6),
B(Ldar), R(closure),
- B(CreateBlockContext), U8(0),
- B(PushContext), R(1),
+ B(CreateBlockContext), U8(3),
+ B(PushContext), R(7),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(LdaZero),
- B(StaContextSlot), R(1), U8(9), U8(0),
+ B(StaContextSlot), R(7), U8(8), U8(0),
+ B(Mov), R(context), R(10),
B(Mov), R(context), R(11),
- B(Mov), R(context), R(12),
- /* 43 S> */ B(CreateArrayLiteral), U8(1), U8(2), U8(9),
- B(Star), R(13),
- B(LdaNamedProperty), R(13), U8(2), U8(7),
+ /* 43 S> */ B(CreateArrayLiteral), U8(4), U8(3), U8(17),
+ B(Star), R(12),
+ B(LdaNamedProperty), R(12), U8(5), U8(8),
B(JumpIfUndefined), U8(17),
B(JumpIfNull), U8(15),
- B(Star), R(14),
- B(CallProperty0), R(14), R(13), U8(9),
+ B(Star), R(13),
+ B(CallProperty0), R(13), R(12), U8(10),
B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(13), U8(3), U8(3),
- B(Star), R(14),
- B(CallProperty0), R(14), R(13), U8(5),
- B(Star), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(14), U8(1),
- /* 43 E> */ B(StaContextSlot), R(1), U8(7), U8(0),
+ B(LdaNamedProperty), R(12), U8(6), U8(4),
+ B(Star), R(13),
+ B(CallProperty0), R(13), R(12), U8(6),
+ B(Star), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(13), U8(1),
+ /* 43 E> */ B(StaContextSlot), R(7), U8(6), U8(0),
+ B(Ldar), R(0),
+ B(SwitchOnSmiNoFeedback), U8(7), U8(1), I8(0),
B(LdaSmi), I8(-2),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(16),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(75),
+ B(TestEqualStrictNoFeedback), R(0),
+ B(JumpIfTrue), U8(11),
B(LdaSmi), I8(79),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kAbort), R(13), U8(1),
- /* 40 S> */ B(LdaContextSlot), R(1), U8(7), U8(0),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kAbort), R(12), U8(1),
+ /* 40 S> */ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
+ B(Star), R(12),
+ /* 40 S> */ B(LdaContextSlot), R(7), U8(6), U8(0),
B(Star), R(14),
- B(LdaNamedProperty), R(14), U8(4), U8(13),
+ B(LdaNamedProperty), R(14), U8(8), U8(14),
B(Star), R(13),
- /* 40 E> */ B(CallProperty0), R(13), R(14), U8(11),
- B(StaContextSlot), R(1), U8(10), U8(0),
+ /* 40 E> */ B(CallProperty0), R(13), R(14), U8(12),
+ B(StaContextSlot), R(7), U8(9), U8(0),
/* 40 S> */ B(LdaUndefined),
B(Star), R(13),
- B(LdaImmutableContextSlot), R(1), U8(5), U8(0),
+ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
B(Star), R(14),
- B(LdaContextSlot), R(1), U8(10), U8(0),
+ B(LdaContextSlot), R(7), U8(9), U8(0),
B(Star), R(15),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(16),
B(CallJSRuntime), U8(%async_function_await_uncaught), R(13), U8(4),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(13),
- B(LdaImmutableContextSlot), R(1), U8(5), U8(0),
- B(Star), R(14),
B(LdaZero),
- B(SuspendGenerator), R(14), U8(2),
+ B(SuspendGenerator), R(12), U8(2),
B(Ldar), R(13),
/* 57 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(14), U8(1),
- B(Star), R(15),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(14), U8(1),
- B(Star), R(16),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(12), U8(1),
+ B(Star), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(12), U8(1),
+ B(Star), R(14),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(16),
- B(JumpIfTrue), U8(30),
+ B(TestEqualStrictNoFeedback), R(14),
+ B(JumpIfTrue), U8(28),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(16),
- B(JumpIfTrue), U8(21),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(14),
+ B(JumpIfTrue), U8(19),
B(LdaTrue),
- B(Star), R(18),
- B(Mov), R(15), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(17), U8(2),
- B(Star), R(10),
- B(LdaZero),
+ B(Star), R(16),
+ B(Mov), R(13), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(15), U8(2),
B(Star), R(9),
+ B(LdaZero),
+ B(Star), R(8),
B(Jump), U8(155),
- B(Ldar), R(15),
+ B(Ldar), R(13),
B(ReThrow),
- B(Ldar), R(15),
- /* 40 E> */ B(StaContextSlot), R(1), U8(8), U8(0),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(15), U8(1),
+ B(Ldar), R(13),
+ /* 40 E> */ B(StaContextSlot), R(7), U8(7), U8(0),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(13), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(13),
- B(LdaContextSlot), R(1), U8(8), U8(0),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
- B(LdaContextSlot), R(1), U8(8), U8(0),
- B(Star), R(13),
- B(LdaNamedProperty), R(13), U8(5), U8(15),
+ B(LdaContextSlot), R(7), U8(7), U8(0),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+ B(LdaContextSlot), R(7), U8(7), U8(0),
+ B(Star), R(12),
+ B(LdaNamedProperty), R(12), U8(9), U8(16),
B(JumpIfToBooleanTrue), U8(56),
- B(LdaContextSlot), R(1), U8(8), U8(0),
- B(Star), R(13),
- B(LdaNamedProperty), R(13), U8(6), U8(17),
- B(StaContextSlot), R(1), U8(12), U8(0),
+ B(LdaContextSlot), R(7), U8(7), U8(0),
+ B(Star), R(12),
+ B(LdaNamedProperty), R(12), U8(10), U8(18),
+ B(StaContextSlot), R(7), U8(11), U8(0),
B(LdaSmi), I8(2),
- B(StaContextSlot), R(1), U8(9), U8(0),
- B(LdaContextSlot), R(1), U8(12), U8(0),
- B(StaContextSlot), R(1), U8(6), U8(0),
+ B(StaContextSlot), R(7), U8(8), U8(0),
+ B(LdaContextSlot), R(7), U8(11), U8(0),
+ B(StaContextSlot), R(7), U8(5), U8(0),
/* 23 E> */ B(StackCheck),
B(Ldar), R(closure),
- B(CreateBlockContext), U8(7),
- B(PushContext), R(2),
+ B(CreateBlockContext), U8(11),
+ B(PushContext), R(12),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(LdaContextSlot), R(1), U8(6), U8(0),
+ B(LdaContextSlot), R(7), U8(5), U8(0),
B(StaCurrentContextSlot), U8(4),
- B(PopContext), R(2),
+ B(PopContext), R(12),
B(LdaZero),
- B(StaContextSlot), R(1), U8(9), U8(0),
- B(JumpLoop), U8(222), I8(0),
+ B(StaContextSlot), R(7), U8(8), U8(0),
+ B(JumpLoop), U8(219), I8(0),
B(Jump), U8(48),
- B(Star), R(13),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(13), U8(8), U8(9),
B(Star), R(12),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(12), U8(12), U8(13),
+ B(Star), R(11),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(12),
- B(PushContext), R(2),
- B(LdaContextSlot), R(1), U8(9), U8(0),
+ B(Ldar), R(11),
+ B(PushContext), R(12),
+ B(LdaContextSlot), R(7), U8(8), U8(0),
B(Star), R(13),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(13), U8(19),
+ B(TestEqualStrict), R(13), U8(20),
B(JumpIfFalse), U8(8),
B(LdaSmi), I8(1),
- B(StaContextSlot), R(1), U8(9), U8(0),
+ B(StaContextSlot), R(7), U8(8), U8(0),
B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
- B(PopContext), R(2),
+ B(PopContext), R(12),
B(LdaSmi), I8(-1),
- B(Star), R(9),
+ B(Star), R(8),
B(Jump), U8(8),
- B(Star), R(10),
- B(LdaSmi), I8(1),
B(Star), R(9),
+ B(LdaSmi), I8(1),
+ B(Star), R(8),
B(LdaTheHole),
B(SetPendingMessage),
+ B(Star), R(10),
+ B(LdaContextSlot), R(7), U8(8), U8(0),
B(Star), R(11),
- B(LdaContextSlot), R(1), U8(9), U8(0),
- B(Star), R(12),
B(LdaZero),
- B(TestEqualStrict), R(12), U8(20),
- B(JumpIfTrueConstant), U8(16),
- B(LdaContextSlot), R(1), U8(7), U8(0),
- B(Star), R(12),
- B(LdaNamedProperty), R(12), U8(10), U8(21),
- B(StaContextSlot), R(1), U8(13), U8(0),
- B(LdaContextSlot), R(1), U8(13), U8(0),
+ B(TestEqualStrict), R(11), U8(21),
+ B(JumpIfTrueConstant), U8(18),
+ B(LdaContextSlot), R(7), U8(6), U8(0),
+ B(Star), R(11),
+ B(LdaNamedProperty), R(11), U8(14), U8(22),
+ B(StaContextSlot), R(7), U8(12), U8(0),
+ B(LdaContextSlot), R(7), U8(12), U8(0),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(JumpConstant), U8(15),
- B(LdaContextSlot), R(1), U8(9), U8(0),
- B(Star), R(12),
+ B(JumpConstant), U8(17),
+ B(LdaContextSlot), R(7), U8(8), U8(0),
+ B(Star), R(11),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(12), U8(24),
- B(JumpIfFalse), U8(179),
- B(LdaContextSlot), R(1), U8(13), U8(0),
+ B(TestEqualStrict), R(11), U8(25),
+ B(JumpIfFalse), U8(175),
+ B(LdaContextSlot), R(7), U8(12), U8(0),
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(132),
+ B(Wide), B(LdaSmi), I16(130),
+ B(Star), R(11),
+ B(LdaConstant), U8(15),
B(Star), R(12),
- B(LdaConstant), U8(11),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
B(Throw),
- B(Mov), R(context), R(12),
- B(LdaContextSlot), R(1), U8(13), U8(0),
+ B(Mov), R(context), R(11),
+ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
+ B(Star), R(12),
+ B(LdaContextSlot), R(7), U8(12), U8(0),
B(Star), R(13),
- B(LdaContextSlot), R(1), U8(7), U8(0),
+ B(LdaContextSlot), R(7), U8(6), U8(0),
B(Star), R(14),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
- B(StaContextSlot), R(1), U8(14), U8(0),
+ B(StaContextSlot), R(7), U8(13), U8(0),
B(LdaUndefined),
B(Star), R(13),
- B(LdaImmutableContextSlot), R(1), U8(5), U8(0),
+ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
B(Star), R(14),
- B(LdaContextSlot), R(1), U8(14), U8(0),
+ B(LdaContextSlot), R(7), U8(13), U8(0),
B(Star), R(15),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(16),
B(CallJSRuntime), U8(%async_function_await_caught), R(13), U8(4),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(13),
- B(LdaImmutableContextSlot), R(1), U8(5), U8(0),
- B(Star), R(14),
B(LdaSmi), I8(1),
- B(SuspendGenerator), R(14), U8(2),
+ B(SuspendGenerator), R(12), U8(2),
B(Ldar), R(13),
/* 57 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(14), U8(1),
- B(Star), R(15),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(14), U8(1),
- B(Star), R(16),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(12), U8(1),
+ B(Star), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(12), U8(1),
+ B(Star), R(14),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(16),
- B(JumpIfTrue), U8(38),
+ B(TestEqualStrictNoFeedback), R(14),
+ B(JumpIfTrue), U8(36),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(16),
- B(JumpIfTrue), U8(29),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(14),
+ B(JumpIfTrue), U8(27),
B(LdaTrue),
- B(Star), R(18),
- B(Mov), R(15), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(17), U8(2),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(Star), R(6),
+ B(Star), R(16),
+ B(Mov), R(13), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(15), U8(2),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(Star), R(4),
B(LdaZero),
- B(Star), R(5),
- B(JumpConstant), U8(18),
- B(Ldar), R(15),
+ B(Star), R(3),
+ B(JumpConstant), U8(22),
+ B(Ldar), R(13),
B(ReThrow),
- B(Ldar), R(15),
+ B(Ldar), R(13),
B(Jump), U8(20),
- B(Star), R(13),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(13), U8(8), U8(13),
B(Star), R(12),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(12), U8(12), U8(16),
+ B(Star), R(11),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(12),
- B(PushContext), R(2),
- B(PopContext), R(2),
- B(Jump), U8(158),
- B(LdaContextSlot), R(1), U8(13), U8(0),
+ B(Ldar), R(11),
+ B(PushContext), R(12),
+ B(PopContext), R(12),
+ B(Jump), U8(153),
+ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
+ B(Star), R(11),
+ B(LdaContextSlot), R(7), U8(12), U8(0),
B(Star), R(12),
- B(LdaContextSlot), R(1), U8(7), U8(0),
+ B(LdaContextSlot), R(7), U8(6), U8(0),
B(Star), R(13),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
- B(StaContextSlot), R(1), U8(16), U8(0),
+ B(StaContextSlot), R(7), U8(15), U8(0),
B(LdaUndefined),
B(Star), R(12),
- B(LdaImmutableContextSlot), R(1), U8(5), U8(0),
+ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
B(Star), R(13),
- B(LdaContextSlot), R(1), U8(16), U8(0),
+ B(LdaContextSlot), R(7), U8(15), U8(0),
B(Star), R(14),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(15),
B(CallJSRuntime), U8(%async_function_await_uncaught), R(12), U8(4),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(12),
- B(LdaImmutableContextSlot), R(1), U8(5), U8(0),
- B(Star), R(13),
B(LdaSmi), I8(2),
- B(SuspendGenerator), R(13), U8(2),
+ B(SuspendGenerator), R(11), U8(2),
B(Ldar), R(12),
/* 57 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(13), U8(1),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(13), U8(1),
- B(Star), R(15),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(11), U8(1),
+ B(Star), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(11), U8(1),
+ B(Star), R(13),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(15),
- B(JumpIfTrue), U8(39),
+ B(TestEqualStrictNoFeedback), R(13),
+ B(JumpIfTrue), U8(36),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(15),
- B(JumpIfTrue), U8(30),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(13),
+ B(JumpIfTrue), U8(27),
B(LdaTrue),
- B(Star), R(17),
- B(Mov), R(14), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(16), U8(2),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(Star), R(6),
- B(LdaSmi), I8(1),
- B(Star), R(5),
- B(Jump), U8(168),
- B(Ldar), R(14),
+ B(Star), R(15),
+ B(Mov), R(12), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(14), U8(2),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(160),
+ B(Ldar), R(12),
B(ReThrow),
- B(Ldar), R(14),
- B(StaContextSlot), R(1), U8(15), U8(0),
- B(LdaContextSlot), R(1), U8(15), U8(0),
- B(Star), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(12), U8(1),
+ B(Ldar), R(12),
+ B(StaContextSlot), R(7), U8(14), U8(0),
+ B(LdaContextSlot), R(7), U8(14), U8(0),
+ B(Star), R(11),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(11), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(13),
- B(LdaContextSlot), R(1), U8(15), U8(0),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
- B(Ldar), R(11),
+ B(LdaContextSlot), R(7), U8(14), U8(0),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+ B(Ldar), R(10),
B(SetPendingMessage),
+ B(Ldar), R(8),
+ B(SwitchOnSmiNoFeedback), U8(19), U8(2), I8(0),
+ B(Jump), U8(25),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(9),
- B(JumpIfTrue), U8(10),
- B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(9),
- B(JumpIfTrue), U8(21),
- B(Jump), U8(26),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(LdaSmi), I8(2),
- B(Star), R(5),
- B(Mov), R(10), R(6),
- B(Jump), U8(101),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(Ldar), R(10),
+ B(Star), R(3),
+ B(Mov), R(9), R(4),
+ B(Jump), U8(99),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(Ldar), R(9),
B(ReThrow),
- B(PopContext), R(1),
+ B(PopContext), R(7),
B(LdaUndefined),
- B(Star), R(9),
- B(LdaCurrentContextSlot), U8(11),
- B(Star), R(10),
+ B(Star), R(7),
+ B(LdaCurrentContextSlot), U8(10),
+ B(Star), R(8),
B(LdaUndefined),
- B(Star), R(11),
- B(CallJSRuntime), U8(%promise_resolve), R(9), U8(3),
- B(LdaCurrentContextSlot), U8(11),
- B(Star), R(6),
- B(LdaSmi), I8(3),
- B(Star), R(5),
- B(Jump), U8(68),
- B(Jump), U8(54),
B(Star), R(9),
+ B(CallJSRuntime), U8(%promise_resolve), R(7), U8(3),
+ B(LdaCurrentContextSlot), U8(10),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(67),
+ B(Jump), U8(53),
+ B(Star), R(7),
B(Ldar), R(closure),
- B(CreateCatchContext), R(9), U8(8), U8(17),
- B(Star), R(8),
+ B(CreateCatchContext), R(7), U8(12), U8(21),
+ B(Star), R(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(8),
- B(PushContext), R(1),
+ B(Ldar), R(6),
+ B(PushContext), R(7),
B(LdaUndefined),
+ B(Star), R(8),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(9),
- B(LdaContextSlot), R(1), U8(11), U8(0),
- B(Star), R(10),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(11),
+ B(Star), R(10),
B(LdaFalse),
- B(Star), R(12),
- B(CallJSRuntime), U8(%promise_internal_reject), R(9), U8(4),
- B(LdaContextSlot), R(1), U8(11), U8(0),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(Star), R(6),
- B(LdaSmi), I8(4),
- B(Star), R(5),
+ B(Star), R(11),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(8), U8(4),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
B(Jump), U8(14),
B(LdaSmi), I8(-1),
- B(Star), R(5),
+ B(Star), R(3),
B(Jump), U8(8),
- B(Star), R(6),
- B(LdaSmi), I8(5),
- B(Star), R(5),
+ B(Star), R(4),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(7),
+ B(Star), R(5),
B(LdaUndefined),
- B(Star), R(8),
- B(LdaCurrentContextSlot), U8(11),
- B(Star), R(9),
- B(CallJSRuntime), U8(%async_function_promise_release), R(8), U8(2),
- B(Ldar), R(7),
+ B(Star), R(6),
+ B(LdaCurrentContextSlot), U8(10),
+ B(Star), R(7),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(6), U8(2),
+ B(Ldar), R(5),
B(SetPendingMessage),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(34),
- B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(31),
- B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(28),
- B(LdaSmi), I8(3),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(25),
- B(LdaSmi), I8(4),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(22),
- B(LdaSmi), I8(5),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(19),
- B(Jump), U8(20),
- B(Ldar), R(6),
- /* 57 S> */ B(Return),
- B(Ldar), R(6),
- /* 57 S> */ B(Return),
- B(Ldar), R(6),
- /* 57 S> */ B(Return),
- B(Ldar), R(6),
- /* 57 S> */ B(Return),
- B(Ldar), R(6),
+ B(Ldar), R(3),
+ B(SwitchOnSmiNoFeedback), U8(23), U8(2), I8(0),
+ B(Jump), U8(8),
+ B(Ldar), R(4),
/* 57 S> */ B(Return),
- B(Ldar), R(6),
+ B(Ldar), R(4),
B(ReThrow),
B(LdaUndefined),
/* 57 S> */ B(Return),
]
constant pool: [
+ Smi [116],
+ Smi [544],
+ Smi [688],
FIXED_ARRAY_TYPE,
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
+ Smi [83],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -467,20 +428,22 @@ constant pool: [
FIXED_ARRAY_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- Smi [557],
FIXED_ARRAY_TYPE,
- Smi [699],
- Smi [348],
- Smi [371],
+ Smi [339],
+ Smi [362],
+ Smi [6],
+ Smi [22],
FIXED_ARRAY_TYPE,
- Smi [317],
+ Smi [304],
+ Smi [6],
+ Smi [9],
]
handlers: [
- [80, 940, 946],
- [83, 886, 888],
- [100, 423, 429],
- [103, 375, 377],
- [516, 642, 644],
+ [60, 900, 906],
+ [63, 847, 849],
+ [80, 400, 406],
+ [83, 352, 354],
+ [493, 615, 617],
]
---
@@ -490,480 +453,435 @@ snippet: "
}
f();
"
-frame size: 19
+frame size: 17
parameter count: 1
-bytecode array length: 1085
+bytecode array length: 992
bytecodes: [
B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(39),
- B(CallRuntime), U16(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
- B(PushContext), R(4),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
B(ResumeGenerator), R(new_target),
- B(Star), R(3),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(132),
- B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrueConstant), U8(12),
- B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrueConstant), U8(14),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
B(LdaSmi), I8(79),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kAbort), R(5), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
- B(Star), R(3),
- B(CreateFunctionContext), U8(13),
- B(PushContext), R(0),
- B(Ldar), R(this),
+ B(Star), R(0),
+ B(CreateFunctionContext), U8(12),
+ B(PushContext), R(2),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(StaCurrentContextSlot), U8(4),
/* 16 E> */ B(StackCheck),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(6),
- B(Mov), R(closure), R(5),
- B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(5), U8(2),
- B(StaCurrentContextSlot), U8(5),
B(LdaUndefined),
- B(Star), R(5),
- B(CallJSRuntime), U8(%async_function_promise_create), R(5), U8(1),
- B(StaCurrentContextSlot), U8(11),
- B(Mov), R(context), R(7),
- B(Mov), R(context), R(8),
+ B(Star), R(3),
+ B(CallJSRuntime), U8(%async_function_promise_create), R(3), U8(1),
+ B(StaCurrentContextSlot), U8(10),
+ B(Mov), R(context), R(5),
+ B(Mov), R(context), R(6),
B(Ldar), R(closure),
- B(CreateBlockContext), U8(0),
- B(PushContext), R(1),
+ B(CreateBlockContext), U8(3),
+ B(PushContext), R(7),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(LdaZero),
- B(StaContextSlot), R(1), U8(9), U8(0),
+ B(StaContextSlot), R(7), U8(8), U8(0),
+ B(Mov), R(context), R(10),
B(Mov), R(context), R(11),
- B(Mov), R(context), R(12),
- /* 43 S> */ B(CreateArrayLiteral), U8(1), U8(2), U8(9),
- B(Star), R(13),
- B(LdaNamedProperty), R(13), U8(2), U8(7),
+ /* 43 S> */ B(CreateArrayLiteral), U8(4), U8(3), U8(17),
+ B(Star), R(12),
+ B(LdaNamedProperty), R(12), U8(5), U8(8),
B(JumpIfUndefined), U8(17),
B(JumpIfNull), U8(15),
- B(Star), R(14),
- B(CallProperty0), R(14), R(13), U8(9),
+ B(Star), R(13),
+ B(CallProperty0), R(13), R(12), U8(10),
B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(13), U8(3), U8(3),
- B(Star), R(14),
- B(CallProperty0), R(14), R(13), U8(5),
- B(Star), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(14), U8(1),
- /* 43 E> */ B(StaContextSlot), R(1), U8(7), U8(0),
+ B(LdaNamedProperty), R(12), U8(6), U8(4),
+ B(Star), R(13),
+ B(CallProperty0), R(13), R(12), U8(6),
+ B(Star), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(13), U8(1),
+ /* 43 E> */ B(StaContextSlot), R(7), U8(6), U8(0),
+ B(Ldar), R(0),
+ B(SwitchOnSmiNoFeedback), U8(7), U8(1), I8(0),
B(LdaSmi), I8(-2),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(16),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(75),
+ B(TestEqualStrictNoFeedback), R(0),
+ B(JumpIfTrue), U8(11),
B(LdaSmi), I8(79),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kAbort), R(13), U8(1),
- /* 40 S> */ B(LdaContextSlot), R(1), U8(7), U8(0),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kAbort), R(12), U8(1),
+ /* 40 S> */ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
+ B(Star), R(12),
+ /* 40 S> */ B(LdaContextSlot), R(7), U8(6), U8(0),
B(Star), R(14),
- B(LdaNamedProperty), R(14), U8(4), U8(13),
+ B(LdaNamedProperty), R(14), U8(8), U8(14),
B(Star), R(13),
- /* 40 E> */ B(CallProperty0), R(13), R(14), U8(11),
- B(StaContextSlot), R(1), U8(10), U8(0),
+ /* 40 E> */ B(CallProperty0), R(13), R(14), U8(12),
+ B(StaContextSlot), R(7), U8(9), U8(0),
/* 40 S> */ B(LdaUndefined),
B(Star), R(13),
- B(LdaImmutableContextSlot), R(1), U8(5), U8(0),
+ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
B(Star), R(14),
- B(LdaContextSlot), R(1), U8(10), U8(0),
+ B(LdaContextSlot), R(7), U8(9), U8(0),
B(Star), R(15),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(16),
B(CallJSRuntime), U8(%async_function_await_uncaught), R(13), U8(4),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(13),
- B(LdaImmutableContextSlot), R(1), U8(5), U8(0),
- B(Star), R(14),
B(LdaZero),
- B(SuspendGenerator), R(14), U8(2),
+ B(SuspendGenerator), R(12), U8(2),
B(Ldar), R(13),
/* 68 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(14), U8(1),
- B(Star), R(15),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(14), U8(1),
- B(Star), R(16),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(12), U8(1),
+ B(Star), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(12), U8(1),
+ B(Star), R(14),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(16),
- B(JumpIfTrue), U8(30),
+ B(TestEqualStrictNoFeedback), R(14),
+ B(JumpIfTrue), U8(28),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(16),
- B(JumpIfTrue), U8(21),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(14),
+ B(JumpIfTrue), U8(19),
B(LdaTrue),
- B(Star), R(18),
- B(Mov), R(15), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(17), U8(2),
- B(Star), R(10),
- B(LdaZero),
+ B(Star), R(16),
+ B(Mov), R(13), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(15), U8(2),
B(Star), R(9),
+ B(LdaZero),
+ B(Star), R(8),
B(Jump), U8(167),
- B(Ldar), R(15),
+ B(Ldar), R(13),
B(ReThrow),
- B(Ldar), R(15),
- /* 40 E> */ B(StaContextSlot), R(1), U8(8), U8(0),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(15), U8(1),
+ B(Ldar), R(13),
+ /* 40 E> */ B(StaContextSlot), R(7), U8(7), U8(0),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(13), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(13),
- B(LdaContextSlot), R(1), U8(8), U8(0),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
- B(LdaContextSlot), R(1), U8(8), U8(0),
- B(Star), R(13),
- B(LdaNamedProperty), R(13), U8(5), U8(15),
+ B(LdaContextSlot), R(7), U8(7), U8(0),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+ B(LdaContextSlot), R(7), U8(7), U8(0),
+ B(Star), R(12),
+ B(LdaNamedProperty), R(12), U8(9), U8(16),
B(JumpIfToBooleanTrue), U8(68),
- B(LdaContextSlot), R(1), U8(8), U8(0),
- B(Star), R(13),
- B(LdaNamedProperty), R(13), U8(6), U8(17),
- B(StaContextSlot), R(1), U8(12), U8(0),
+ B(LdaContextSlot), R(7), U8(7), U8(0),
+ B(Star), R(12),
+ B(LdaNamedProperty), R(12), U8(10), U8(18),
+ B(StaContextSlot), R(7), U8(11), U8(0),
B(LdaSmi), I8(2),
- B(StaContextSlot), R(1), U8(9), U8(0),
- B(LdaContextSlot), R(1), U8(12), U8(0),
- B(StaContextSlot), R(1), U8(6), U8(0),
+ B(StaContextSlot), R(7), U8(8), U8(0),
+ B(LdaContextSlot), R(7), U8(11), U8(0),
+ B(StaContextSlot), R(7), U8(5), U8(0),
/* 23 E> */ B(StackCheck),
B(Ldar), R(closure),
- B(CreateBlockContext), U8(7),
- B(PushContext), R(2),
+ B(CreateBlockContext), U8(11),
+ B(PushContext), R(12),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(LdaContextSlot), R(1), U8(6), U8(0),
+ B(LdaContextSlot), R(7), U8(5), U8(0),
B(StaCurrentContextSlot), U8(4),
/* 56 S> */ B(LdaImmutableCurrentContextSlot), U8(4),
- B(PopContext), R(2),
- B(PopContext), R(2),
- B(PopContext), R(2),
- B(PopContext), R(2),
- B(PopContext), R(2),
- B(PopContext), R(2),
- B(Star), R(10),
- B(LdaSmi), I8(1),
+ B(PopContext), R(12),
+ B(PopContext), R(12),
+ B(PopContext), R(12),
+ B(PopContext), R(12),
+ B(PopContext), R(12),
+ B(PopContext), R(12),
B(Star), R(9),
+ B(LdaSmi), I8(1),
+ B(Star), R(8),
B(Jump), U8(62),
B(Jump), U8(48),
- B(Star), R(13),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(13), U8(8), U8(9),
B(Star), R(12),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(12), U8(12), U8(13),
+ B(Star), R(11),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(12),
- B(PushContext), R(2),
- B(LdaContextSlot), R(1), U8(9), U8(0),
+ B(Ldar), R(11),
+ B(PushContext), R(12),
+ B(LdaContextSlot), R(7), U8(8), U8(0),
B(Star), R(13),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(13), U8(19),
+ B(TestEqualStrict), R(13), U8(20),
B(JumpIfFalse), U8(8),
B(LdaSmi), I8(1),
- B(StaContextSlot), R(1), U8(9), U8(0),
+ B(StaContextSlot), R(7), U8(8), U8(0),
B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
- B(PopContext), R(2),
+ B(PopContext), R(12),
B(LdaSmi), I8(-1),
- B(Star), R(9),
+ B(Star), R(8),
B(Jump), U8(8),
- B(Star), R(10),
- B(LdaSmi), I8(2),
B(Star), R(9),
+ B(LdaSmi), I8(2),
+ B(Star), R(8),
B(LdaTheHole),
B(SetPendingMessage),
+ B(Star), R(10),
+ B(LdaContextSlot), R(7), U8(8), U8(0),
B(Star), R(11),
- B(LdaContextSlot), R(1), U8(9), U8(0),
- B(Star), R(12),
B(LdaZero),
- B(TestEqualStrict), R(12), U8(20),
- B(JumpIfTrueConstant), U8(16),
- B(LdaContextSlot), R(1), U8(7), U8(0),
- B(Star), R(12),
- B(LdaNamedProperty), R(12), U8(10), U8(21),
- B(StaContextSlot), R(1), U8(13), U8(0),
- B(LdaContextSlot), R(1), U8(13), U8(0),
+ B(TestEqualStrict), R(11), U8(21),
+ B(JumpIfTrueConstant), U8(18),
+ B(LdaContextSlot), R(7), U8(6), U8(0),
+ B(Star), R(11),
+ B(LdaNamedProperty), R(11), U8(14), U8(22),
+ B(StaContextSlot), R(7), U8(12), U8(0),
+ B(LdaContextSlot), R(7), U8(12), U8(0),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(JumpConstant), U8(15),
- B(LdaContextSlot), R(1), U8(9), U8(0),
- B(Star), R(12),
+ B(JumpConstant), U8(17),
+ B(LdaContextSlot), R(7), U8(8), U8(0),
+ B(Star), R(11),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(12), U8(24),
- B(JumpIfFalse), U8(179),
- B(LdaContextSlot), R(1), U8(13), U8(0),
+ B(TestEqualStrict), R(11), U8(25),
+ B(JumpIfFalse), U8(175),
+ B(LdaContextSlot), R(7), U8(12), U8(0),
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(132),
+ B(Wide), B(LdaSmi), I16(130),
+ B(Star), R(11),
+ B(LdaConstant), U8(15),
B(Star), R(12),
- B(LdaConstant), U8(11),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
B(Throw),
- B(Mov), R(context), R(12),
- B(LdaContextSlot), R(1), U8(13), U8(0),
+ B(Mov), R(context), R(11),
+ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
+ B(Star), R(12),
+ B(LdaContextSlot), R(7), U8(12), U8(0),
B(Star), R(13),
- B(LdaContextSlot), R(1), U8(7), U8(0),
+ B(LdaContextSlot), R(7), U8(6), U8(0),
B(Star), R(14),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
- B(StaContextSlot), R(1), U8(14), U8(0),
+ B(StaContextSlot), R(7), U8(13), U8(0),
B(LdaUndefined),
B(Star), R(13),
- B(LdaImmutableContextSlot), R(1), U8(5), U8(0),
+ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
B(Star), R(14),
- B(LdaContextSlot), R(1), U8(14), U8(0),
+ B(LdaContextSlot), R(7), U8(13), U8(0),
B(Star), R(15),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(16),
B(CallJSRuntime), U8(%async_function_await_caught), R(13), U8(4),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(13),
- B(LdaImmutableContextSlot), R(1), U8(5), U8(0),
- B(Star), R(14),
B(LdaSmi), I8(1),
- B(SuspendGenerator), R(14), U8(2),
+ B(SuspendGenerator), R(12), U8(2),
B(Ldar), R(13),
/* 68 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(14), U8(1),
- B(Star), R(15),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(14), U8(1),
- B(Star), R(16),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(12), U8(1),
+ B(Star), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(12), U8(1),
+ B(Star), R(14),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(16),
- B(JumpIfTrue), U8(38),
+ B(TestEqualStrictNoFeedback), R(14),
+ B(JumpIfTrue), U8(36),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(16),
- B(JumpIfTrue), U8(29),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(14),
+ B(JumpIfTrue), U8(27),
B(LdaTrue),
- B(Star), R(18),
- B(Mov), R(15), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(17), U8(2),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(Star), R(6),
+ B(Star), R(16),
+ B(Mov), R(13), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(15), U8(2),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(Star), R(4),
B(LdaZero),
- B(Star), R(5),
- B(JumpConstant), U8(18),
- B(Ldar), R(15),
+ B(Star), R(3),
+ B(JumpConstant), U8(23),
+ B(Ldar), R(13),
B(ReThrow),
- B(Ldar), R(15),
+ B(Ldar), R(13),
B(Jump), U8(20),
- B(Star), R(13),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(13), U8(8), U8(13),
B(Star), R(12),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(12), U8(12), U8(16),
+ B(Star), R(11),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(12),
- B(PushContext), R(2),
- B(PopContext), R(2),
- B(Jump), U8(158),
- B(LdaContextSlot), R(1), U8(13), U8(0),
+ B(Ldar), R(11),
+ B(PushContext), R(12),
+ B(PopContext), R(12),
+ B(Jump), U8(153),
+ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
+ B(Star), R(11),
+ B(LdaContextSlot), R(7), U8(12), U8(0),
B(Star), R(12),
- B(LdaContextSlot), R(1), U8(7), U8(0),
+ B(LdaContextSlot), R(7), U8(6), U8(0),
B(Star), R(13),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
- B(StaContextSlot), R(1), U8(16), U8(0),
+ B(StaContextSlot), R(7), U8(15), U8(0),
B(LdaUndefined),
B(Star), R(12),
- B(LdaImmutableContextSlot), R(1), U8(5), U8(0),
+ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
B(Star), R(13),
- B(LdaContextSlot), R(1), U8(16), U8(0),
+ B(LdaContextSlot), R(7), U8(15), U8(0),
B(Star), R(14),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(15),
B(CallJSRuntime), U8(%async_function_await_uncaught), R(12), U8(4),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(12),
- B(LdaImmutableContextSlot), R(1), U8(5), U8(0),
- B(Star), R(13),
B(LdaSmi), I8(2),
- B(SuspendGenerator), R(13), U8(2),
+ B(SuspendGenerator), R(11), U8(2),
B(Ldar), R(12),
/* 68 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(13), U8(1),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(13), U8(1),
- B(Star), R(15),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(11), U8(1),
+ B(Star), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(11), U8(1),
+ B(Star), R(13),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(15),
- B(JumpIfTrue), U8(39),
+ B(TestEqualStrictNoFeedback), R(13),
+ B(JumpIfTrue), U8(36),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(15),
- B(JumpIfTrue), U8(30),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(13),
+ B(JumpIfTrue), U8(27),
B(LdaTrue),
- B(Star), R(17),
- B(Mov), R(14), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(16), U8(2),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(Star), R(6),
- B(LdaSmi), I8(1),
- B(Star), R(5),
- B(Jump), U8(191),
- B(Ldar), R(14),
+ B(Star), R(15),
+ B(Mov), R(12), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(14), U8(2),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(177),
+ B(Ldar), R(12),
B(ReThrow),
- B(Ldar), R(14),
- B(StaContextSlot), R(1), U8(15), U8(0),
- B(LdaContextSlot), R(1), U8(15), U8(0),
- B(Star), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(12), U8(1),
+ B(Ldar), R(12),
+ B(StaContextSlot), R(7), U8(14), U8(0),
+ B(LdaContextSlot), R(7), U8(14), U8(0),
+ B(Star), R(11),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(11), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(13),
- B(LdaContextSlot), R(1), U8(15), U8(0),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
- B(Ldar), R(11),
+ B(LdaContextSlot), R(7), U8(14), U8(0),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+ B(Ldar), R(10),
B(SetPendingMessage),
+ B(Ldar), R(8),
+ B(SwitchOnSmiNoFeedback), U8(19), U8(3), I8(0),
+ B(Jump), U8(42),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(9),
- B(JumpIfTrue), U8(16),
+ B(Star), R(3),
+ B(Mov), R(9), R(4),
+ B(Jump), U8(116),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(9),
- B(JumpIfTrue), U8(27),
- B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(9),
- B(JumpIfTrue), U8(38),
- B(Jump), U8(43),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(LdaSmi), I8(2),
- B(Star), R(5),
- B(Mov), R(10), R(6),
- B(Jump), U8(118),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(LdaSmi), I8(3),
- B(Star), R(5),
- B(Mov), R(10), R(6),
- B(Jump), U8(101),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(Ldar), R(10),
+ B(Star), R(3),
+ B(Mov), R(9), R(4),
+ B(Jump), U8(99),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(Ldar), R(9),
B(ReThrow),
- B(PopContext), R(1),
+ B(PopContext), R(7),
B(LdaUndefined),
- B(Star), R(9),
- B(LdaCurrentContextSlot), U8(11),
- B(Star), R(10),
+ B(Star), R(7),
+ B(LdaCurrentContextSlot), U8(10),
+ B(Star), R(8),
B(LdaUndefined),
- B(Star), R(11),
- B(CallJSRuntime), U8(%promise_resolve), R(9), U8(3),
- B(LdaCurrentContextSlot), U8(11),
- B(Star), R(6),
- B(LdaSmi), I8(4),
- B(Star), R(5),
- B(Jump), U8(68),
- B(Jump), U8(54),
B(Star), R(9),
+ B(CallJSRuntime), U8(%promise_resolve), R(7), U8(3),
+ B(LdaCurrentContextSlot), U8(10),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(67),
+ B(Jump), U8(53),
+ B(Star), R(7),
B(Ldar), R(closure),
- B(CreateCatchContext), R(9), U8(8), U8(17),
- B(Star), R(8),
+ B(CreateCatchContext), R(7), U8(12), U8(22),
+ B(Star), R(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(8),
- B(PushContext), R(1),
+ B(Ldar), R(6),
+ B(PushContext), R(7),
B(LdaUndefined),
+ B(Star), R(8),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(9),
- B(LdaContextSlot), R(1), U8(11), U8(0),
- B(Star), R(10),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(11),
+ B(Star), R(10),
B(LdaFalse),
- B(Star), R(12),
- B(CallJSRuntime), U8(%promise_internal_reject), R(9), U8(4),
- B(LdaContextSlot), R(1), U8(11), U8(0),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(Star), R(6),
- B(LdaSmi), I8(5),
- B(Star), R(5),
+ B(Star), R(11),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(8), U8(4),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
B(Jump), U8(14),
B(LdaSmi), I8(-1),
- B(Star), R(5),
+ B(Star), R(3),
B(Jump), U8(8),
- B(Star), R(6),
- B(LdaSmi), I8(6),
- B(Star), R(5),
+ B(Star), R(4),
+ B(LdaSmi), I8(2),
+ B(Star), R(3),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(7),
+ B(Star), R(5),
B(LdaUndefined),
- B(Star), R(8),
- B(LdaCurrentContextSlot), U8(11),
- B(Star), R(9),
- B(CallJSRuntime), U8(%async_function_promise_release), R(8), U8(2),
- B(Ldar), R(7),
+ B(Star), R(6),
+ B(LdaCurrentContextSlot), U8(10),
+ B(Star), R(7),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(6), U8(2),
+ B(Ldar), R(5),
B(SetPendingMessage),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(40),
- B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(37),
- B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(34),
- B(LdaSmi), I8(3),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(31),
- B(LdaSmi), I8(4),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(42),
- B(LdaSmi), I8(5),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(39),
- B(LdaSmi), I8(6),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(36),
- B(Jump), U8(37),
- B(Ldar), R(6),
- /* 68 S> */ B(Return),
- B(Ldar), R(6),
+ B(Ldar), R(3),
+ B(SwitchOnSmiNoFeedback), U8(24), U8(3), I8(0),
+ B(Jump), U8(25),
+ B(Ldar), R(4),
/* 68 S> */ B(Return),
- B(Ldar), R(6),
- /* 68 S> */ B(Return),
- B(LdaCurrentContextSlot), U8(11),
- B(Star), R(9),
+ B(LdaCurrentContextSlot), U8(10),
+ B(Star), R(7),
B(LdaUndefined),
- B(Star), R(8),
- B(Mov), R(6), R(10),
- B(CallJSRuntime), U8(%promise_resolve), R(8), U8(3),
- B(Ldar), R(9),
- /* 68 S> */ B(Return),
- B(Ldar), R(6),
- /* 68 S> */ B(Return),
- B(Ldar), R(6),
+ B(Star), R(6),
+ B(Mov), R(4), R(8),
+ B(CallJSRuntime), U8(%promise_resolve), R(6), U8(3),
+ B(Ldar), R(7),
/* 68 S> */ B(Return),
- B(Ldar), R(6),
+ B(Ldar), R(4),
B(ReThrow),
B(LdaUndefined),
/* 68 S> */ B(Return),
]
constant pool: [
+ Smi [116],
+ Smi [556],
+ Smi [700],
FIXED_ARRAY_TYPE,
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
+ Smi [83],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -972,20 +890,24 @@ constant pool: [
FIXED_ARRAY_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- Smi [569],
FIXED_ARRAY_TYPE,
- Smi [711],
- Smi [348],
- Smi [371],
+ Smi [339],
+ Smi [362],
+ Smi [6],
+ Smi [22],
+ Smi [39],
FIXED_ARRAY_TYPE,
- Smi [340],
+ Smi [321],
+ Smi [6],
+ Smi [9],
+ Smi [26],
]
handlers: [
- [80, 975, 981],
- [83, 921, 923],
- [100, 435, 441],
- [103, 387, 389],
- [528, 654, 656],
+ [60, 929, 935],
+ [63, 876, 878],
+ [80, 412, 418],
+ [83, 364, 366],
+ [505, 627, 629],
]
---
@@ -998,467 +920,428 @@ snippet: "
}
f();
"
-frame size: 19
+frame size: 17
parameter count: 1
-bytecode array length: 1064
+bytecode array length: 980
bytecodes: [
B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(39),
- B(CallRuntime), U16(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
- B(PushContext), R(4),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
B(ResumeGenerator), R(new_target),
- B(Star), R(3),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(132),
- B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrueConstant), U8(12),
- B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrueConstant), U8(14),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(3), I8(0),
B(LdaSmi), I8(79),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kAbort), R(5), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
- B(Star), R(3),
- B(CreateFunctionContext), U8(13),
- B(PushContext), R(0),
- B(Ldar), R(this),
+ B(Star), R(0),
+ B(CreateFunctionContext), U8(12),
+ B(PushContext), R(2),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(StaCurrentContextSlot), U8(4),
/* 16 E> */ B(StackCheck),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(6),
- B(Mov), R(closure), R(5),
- B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(5), U8(2),
- B(StaCurrentContextSlot), U8(5),
B(LdaUndefined),
- B(Star), R(5),
- B(CallJSRuntime), U8(%async_function_promise_create), R(5), U8(1),
- B(StaCurrentContextSlot), U8(11),
- B(Mov), R(context), R(7),
- B(Mov), R(context), R(8),
+ B(Star), R(3),
+ B(CallJSRuntime), U8(%async_function_promise_create), R(3), U8(1),
+ B(StaCurrentContextSlot), U8(10),
+ B(Mov), R(context), R(5),
+ B(Mov), R(context), R(6),
B(Ldar), R(closure),
- B(CreateBlockContext), U8(0),
- B(PushContext), R(1),
+ B(CreateBlockContext), U8(3),
+ B(PushContext), R(7),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(LdaZero),
- B(StaContextSlot), R(1), U8(9), U8(0),
+ B(StaContextSlot), R(7), U8(8), U8(0),
+ B(Mov), R(context), R(10),
B(Mov), R(context), R(11),
- B(Mov), R(context), R(12),
- /* 43 S> */ B(CreateArrayLiteral), U8(1), U8(2), U8(9),
- B(Star), R(13),
- B(LdaNamedProperty), R(13), U8(2), U8(7),
+ /* 43 S> */ B(CreateArrayLiteral), U8(4), U8(3), U8(17),
+ B(Star), R(12),
+ B(LdaNamedProperty), R(12), U8(5), U8(8),
B(JumpIfUndefined), U8(17),
B(JumpIfNull), U8(15),
- B(Star), R(14),
- B(CallProperty0), R(14), R(13), U8(9),
+ B(Star), R(13),
+ B(CallProperty0), R(13), R(12), U8(10),
B(JumpIfJSReceiver), U8(23),
B(CallRuntime), U16(Runtime::kThrowSymbolAsyncIteratorInvalid), R(0), U8(0),
- B(LdaNamedProperty), R(13), U8(3), U8(3),
- B(Star), R(14),
- B(CallProperty0), R(14), R(13), U8(5),
- B(Star), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(14), U8(1),
- /* 43 E> */ B(StaContextSlot), R(1), U8(7), U8(0),
+ B(LdaNamedProperty), R(12), U8(6), U8(4),
+ B(Star), R(13),
+ B(CallProperty0), R(13), R(12), U8(6),
+ B(Star), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateAsyncFromSyncIterator), R(13), U8(1),
+ /* 43 E> */ B(StaContextSlot), R(7), U8(6), U8(0),
+ B(Ldar), R(0),
+ B(SwitchOnSmiNoFeedback), U8(7), U8(1), I8(0),
B(LdaSmi), I8(-2),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(16),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(75),
+ B(TestEqualStrictNoFeedback), R(0),
+ B(JumpIfTrue), U8(11),
B(LdaSmi), I8(79),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kAbort), R(13), U8(1),
- /* 40 S> */ B(LdaContextSlot), R(1), U8(7), U8(0),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kAbort), R(12), U8(1),
+ /* 40 S> */ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
+ B(Star), R(12),
+ /* 40 S> */ B(LdaContextSlot), R(7), U8(6), U8(0),
B(Star), R(14),
- B(LdaNamedProperty), R(14), U8(4), U8(13),
+ B(LdaNamedProperty), R(14), U8(8), U8(14),
B(Star), R(13),
- /* 40 E> */ B(CallProperty0), R(13), R(14), U8(11),
- B(StaContextSlot), R(1), U8(10), U8(0),
+ /* 40 E> */ B(CallProperty0), R(13), R(14), U8(12),
+ B(StaContextSlot), R(7), U8(9), U8(0),
/* 40 S> */ B(LdaUndefined),
B(Star), R(13),
- B(LdaImmutableContextSlot), R(1), U8(5), U8(0),
+ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
B(Star), R(14),
- B(LdaContextSlot), R(1), U8(10), U8(0),
+ B(LdaContextSlot), R(7), U8(9), U8(0),
B(Star), R(15),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(16),
B(CallJSRuntime), U8(%async_function_await_uncaught), R(13), U8(4),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(13),
- B(LdaImmutableContextSlot), R(1), U8(5), U8(0),
- B(Star), R(14),
B(LdaZero),
- B(SuspendGenerator), R(14), U8(2),
+ B(SuspendGenerator), R(12), U8(2),
B(Ldar), R(13),
/* 114 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(14), U8(1),
- B(Star), R(15),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(14), U8(1),
- B(Star), R(16),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(12), U8(1),
+ B(Star), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(12), U8(1),
+ B(Star), R(14),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(16),
- B(JumpIfTrue), U8(30),
+ B(TestEqualStrictNoFeedback), R(14),
+ B(JumpIfTrue), U8(28),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(16),
- B(JumpIfTrue), U8(21),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(14),
+ B(JumpIfTrue), U8(19),
B(LdaTrue),
- B(Star), R(18),
- B(Mov), R(15), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(17), U8(2),
- B(Star), R(10),
- B(LdaZero),
+ B(Star), R(16),
+ B(Mov), R(13), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(15), U8(2),
B(Star), R(9),
- B(Jump), U8(192),
- B(Ldar), R(15),
+ B(LdaZero),
+ B(Star), R(8),
+ B(Jump), U8(189),
+ B(Ldar), R(13),
B(ReThrow),
- B(Ldar), R(15),
- /* 40 E> */ B(StaContextSlot), R(1), U8(8), U8(0),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(15), U8(1),
+ B(Ldar), R(13),
+ /* 40 E> */ B(StaContextSlot), R(7), U8(7), U8(0),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(13), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(13),
- B(LdaContextSlot), R(1), U8(8), U8(0),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(13), U8(1),
- B(LdaContextSlot), R(1), U8(8), U8(0),
- B(Star), R(13),
- B(LdaNamedProperty), R(13), U8(5), U8(15),
- B(JumpIfToBooleanTrue), U8(93),
- B(LdaContextSlot), R(1), U8(8), U8(0),
- B(Star), R(13),
- B(LdaNamedProperty), R(13), U8(6), U8(17),
- B(StaContextSlot), R(1), U8(12), U8(0),
+ B(LdaContextSlot), R(7), U8(7), U8(0),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+ B(LdaContextSlot), R(7), U8(7), U8(0),
+ B(Star), R(12),
+ B(LdaNamedProperty), R(12), U8(9), U8(16),
+ B(JumpIfToBooleanTrue), U8(90),
+ B(LdaContextSlot), R(7), U8(7), U8(0),
+ B(Star), R(12),
+ B(LdaNamedProperty), R(12), U8(10), U8(18),
+ B(StaContextSlot), R(7), U8(11), U8(0),
B(LdaSmi), I8(2),
- B(StaContextSlot), R(1), U8(9), U8(0),
- B(LdaContextSlot), R(1), U8(12), U8(0),
- B(StaContextSlot), R(1), U8(6), U8(0),
+ B(StaContextSlot), R(7), U8(8), U8(0),
+ B(LdaContextSlot), R(7), U8(11), U8(0),
+ B(StaContextSlot), R(7), U8(5), U8(0),
/* 23 E> */ B(StackCheck),
B(Ldar), R(closure),
- B(CreateBlockContext), U8(7),
- B(PushContext), R(2),
+ B(CreateBlockContext), U8(11),
+ B(PushContext), R(12),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(LdaContextSlot), R(1), U8(6), U8(0),
+ B(LdaContextSlot), R(7), U8(5), U8(0),
B(StaCurrentContextSlot), U8(4),
/* 63 S> */ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(13),
B(LdaSmi), I8(10),
- /* 69 E> */ B(TestEqual), R(13), U8(19),
+ /* 69 E> */ B(TestEqual), R(13), U8(20),
B(JumpIfFalse), U8(8),
- /* 76 S> */ B(PopContext), R(2),
- B(PopContext), R(2),
+ /* 76 S> */ B(PopContext), R(12),
+ B(PopContext), R(12),
B(Jump), U8(26),
/* 90 S> */ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(13),
B(LdaSmi), I8(20),
- /* 96 E> */ B(TestEqual), R(13), U8(20),
+ /* 96 E> */ B(TestEqual), R(13), U8(21),
B(JumpIfFalse), U8(8),
- /* 103 S> */ B(PopContext), R(2),
- B(PopContext), R(2),
- B(Jump), U8(15),
- B(PopContext), R(2),
+ /* 103 S> */ B(PopContext), R(12),
+ B(PopContext), R(12),
+ B(Jump), U8(12),
+ B(PopContext), R(12),
B(LdaZero),
- B(StaContextSlot), R(1), U8(9), U8(0),
- B(Wide), B(JumpLoop), U16(257), I16(0),
+ B(StaContextSlot), R(7), U8(8), U8(0),
+ B(JumpLoop), U8(253), I8(0),
B(Jump), U8(48),
- B(Star), R(13),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(13), U8(8), U8(9),
B(Star), R(12),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(12), U8(12), U8(13),
+ B(Star), R(11),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(12),
- B(PushContext), R(2),
- B(LdaContextSlot), R(1), U8(9), U8(0),
+ B(Ldar), R(11),
+ B(PushContext), R(12),
+ B(LdaContextSlot), R(7), U8(8), U8(0),
B(Star), R(13),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(13), U8(21),
+ B(TestEqualStrict), R(13), U8(22),
B(JumpIfFalse), U8(8),
B(LdaSmi), I8(1),
- B(StaContextSlot), R(1), U8(9), U8(0),
+ B(StaContextSlot), R(7), U8(8), U8(0),
B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
- B(PopContext), R(2),
+ B(PopContext), R(12),
B(LdaSmi), I8(-1),
- B(Star), R(9),
+ B(Star), R(8),
B(Jump), U8(8),
- B(Star), R(10),
- B(LdaSmi), I8(1),
B(Star), R(9),
+ B(LdaSmi), I8(1),
+ B(Star), R(8),
B(LdaTheHole),
B(SetPendingMessage),
+ B(Star), R(10),
+ B(LdaContextSlot), R(7), U8(8), U8(0),
B(Star), R(11),
- B(LdaContextSlot), R(1), U8(9), U8(0),
- B(Star), R(12),
B(LdaZero),
- B(TestEqualStrict), R(12), U8(22),
- B(JumpIfTrueConstant), U8(16),
- B(LdaContextSlot), R(1), U8(7), U8(0),
- B(Star), R(12),
- B(LdaNamedProperty), R(12), U8(10), U8(23),
- B(StaContextSlot), R(1), U8(13), U8(0),
- B(LdaContextSlot), R(1), U8(13), U8(0),
+ B(TestEqualStrict), R(11), U8(23),
+ B(JumpIfTrueConstant), U8(18),
+ B(LdaContextSlot), R(7), U8(6), U8(0),
+ B(Star), R(11),
+ B(LdaNamedProperty), R(11), U8(14), U8(24),
+ B(StaContextSlot), R(7), U8(12), U8(0),
+ B(LdaContextSlot), R(7), U8(12), U8(0),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
- B(JumpConstant), U8(15),
- B(LdaContextSlot), R(1), U8(9), U8(0),
- B(Star), R(12),
+ B(JumpConstant), U8(17),
+ B(LdaContextSlot), R(7), U8(8), U8(0),
+ B(Star), R(11),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(12), U8(26),
- B(JumpIfFalse), U8(179),
- B(LdaContextSlot), R(1), U8(13), U8(0),
+ B(TestEqualStrict), R(11), U8(27),
+ B(JumpIfFalse), U8(175),
+ B(LdaContextSlot), R(7), U8(12), U8(0),
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(132),
+ B(Wide), B(LdaSmi), I16(130),
+ B(Star), R(11),
+ B(LdaConstant), U8(15),
B(Star), R(12),
- B(LdaConstant), U8(11),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
B(Throw),
- B(Mov), R(context), R(12),
- B(LdaContextSlot), R(1), U8(13), U8(0),
+ B(Mov), R(context), R(11),
+ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
+ B(Star), R(12),
+ B(LdaContextSlot), R(7), U8(12), U8(0),
B(Star), R(13),
- B(LdaContextSlot), R(1), U8(7), U8(0),
+ B(LdaContextSlot), R(7), U8(6), U8(0),
B(Star), R(14),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
- B(StaContextSlot), R(1), U8(14), U8(0),
+ B(StaContextSlot), R(7), U8(13), U8(0),
B(LdaUndefined),
B(Star), R(13),
- B(LdaImmutableContextSlot), R(1), U8(5), U8(0),
+ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
B(Star), R(14),
- B(LdaContextSlot), R(1), U8(14), U8(0),
+ B(LdaContextSlot), R(7), U8(13), U8(0),
B(Star), R(15),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(16),
B(CallJSRuntime), U8(%async_function_await_caught), R(13), U8(4),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(13),
- B(LdaImmutableContextSlot), R(1), U8(5), U8(0),
- B(Star), R(14),
B(LdaSmi), I8(1),
- B(SuspendGenerator), R(14), U8(2),
+ B(SuspendGenerator), R(12), U8(2),
B(Ldar), R(13),
/* 114 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(14), U8(1),
- B(Star), R(15),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(14), U8(1),
- B(Star), R(16),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(12), U8(1),
+ B(Star), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(12), U8(1),
+ B(Star), R(14),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(16),
- B(JumpIfTrue), U8(38),
+ B(TestEqualStrictNoFeedback), R(14),
+ B(JumpIfTrue), U8(36),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(16),
- B(JumpIfTrue), U8(29),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(14),
+ B(JumpIfTrue), U8(27),
B(LdaTrue),
- B(Star), R(18),
- B(Mov), R(15), R(17),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(17), U8(2),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(Star), R(6),
+ B(Star), R(16),
+ B(Mov), R(13), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(15), U8(2),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(Star), R(4),
B(LdaZero),
- B(Star), R(5),
- B(JumpConstant), U8(18),
- B(Ldar), R(15),
+ B(Star), R(3),
+ B(JumpConstant), U8(22),
+ B(Ldar), R(13),
B(ReThrow),
- B(Ldar), R(15),
+ B(Ldar), R(13),
B(Jump), U8(20),
- B(Star), R(13),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(13), U8(8), U8(13),
B(Star), R(12),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(12), U8(12), U8(16),
+ B(Star), R(11),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(12),
- B(PushContext), R(2),
- B(PopContext), R(2),
- B(Jump), U8(158),
- B(LdaContextSlot), R(1), U8(13), U8(0),
+ B(Ldar), R(11),
+ B(PushContext), R(12),
+ B(PopContext), R(12),
+ B(Jump), U8(153),
+ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
+ B(Star), R(11),
+ B(LdaContextSlot), R(7), U8(12), U8(0),
B(Star), R(12),
- B(LdaContextSlot), R(1), U8(7), U8(0),
+ B(LdaContextSlot), R(7), U8(6), U8(0),
B(Star), R(13),
B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
- B(StaContextSlot), R(1), U8(16), U8(0),
+ B(StaContextSlot), R(7), U8(15), U8(0),
B(LdaUndefined),
B(Star), R(12),
- B(LdaImmutableContextSlot), R(1), U8(5), U8(0),
+ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
B(Star), R(13),
- B(LdaContextSlot), R(1), U8(16), U8(0),
+ B(LdaContextSlot), R(7), U8(15), U8(0),
B(Star), R(14),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(15),
B(CallJSRuntime), U8(%async_function_await_uncaught), R(12), U8(4),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(12),
- B(LdaImmutableContextSlot), R(1), U8(5), U8(0),
- B(Star), R(13),
B(LdaSmi), I8(2),
- B(SuspendGenerator), R(13), U8(2),
+ B(SuspendGenerator), R(11), U8(2),
B(Ldar), R(12),
/* 114 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(13), U8(1),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(13), U8(1),
- B(Star), R(15),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(11), U8(1),
+ B(Star), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(11), U8(1),
+ B(Star), R(13),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(15),
- B(JumpIfTrue), U8(39),
+ B(TestEqualStrictNoFeedback), R(13),
+ B(JumpIfTrue), U8(36),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(15),
- B(JumpIfTrue), U8(30),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(13),
+ B(JumpIfTrue), U8(27),
B(LdaTrue),
- B(Star), R(17),
- B(Mov), R(14), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(16), U8(2),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(Star), R(6),
- B(LdaSmi), I8(1),
- B(Star), R(5),
- B(Jump), U8(168),
- B(Ldar), R(14),
+ B(Star), R(15),
+ B(Mov), R(12), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(14), U8(2),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(160),
+ B(Ldar), R(12),
B(ReThrow),
- B(Ldar), R(14),
- B(StaContextSlot), R(1), U8(15), U8(0),
- B(LdaContextSlot), R(1), U8(15), U8(0),
- B(Star), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(12), U8(1),
+ B(Ldar), R(12),
+ B(StaContextSlot), R(7), U8(14), U8(0),
+ B(LdaContextSlot), R(7), U8(14), U8(0),
+ B(Star), R(11),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(11), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(13),
- B(LdaContextSlot), R(1), U8(15), U8(0),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
- B(Ldar), R(11),
+ B(LdaContextSlot), R(7), U8(14), U8(0),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+ B(Ldar), R(10),
B(SetPendingMessage),
+ B(Ldar), R(8),
+ B(SwitchOnSmiNoFeedback), U8(19), U8(2), I8(0),
+ B(Jump), U8(25),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(9),
- B(JumpIfTrue), U8(10),
- B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(9),
- B(JumpIfTrue), U8(21),
- B(Jump), U8(26),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(LdaSmi), I8(2),
- B(Star), R(5),
- B(Mov), R(10), R(6),
- B(Jump), U8(101),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(Ldar), R(10),
+ B(Star), R(3),
+ B(Mov), R(9), R(4),
+ B(Jump), U8(99),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(Ldar), R(9),
B(ReThrow),
- B(PopContext), R(1),
+ B(PopContext), R(7),
B(LdaUndefined),
- B(Star), R(9),
- B(LdaCurrentContextSlot), U8(11),
- B(Star), R(10),
+ B(Star), R(7),
+ B(LdaCurrentContextSlot), U8(10),
+ B(Star), R(8),
B(LdaUndefined),
- B(Star), R(11),
- B(CallJSRuntime), U8(%promise_resolve), R(9), U8(3),
- B(LdaCurrentContextSlot), U8(11),
- B(Star), R(6),
- B(LdaSmi), I8(3),
- B(Star), R(5),
- B(Jump), U8(68),
- B(Jump), U8(54),
B(Star), R(9),
+ B(CallJSRuntime), U8(%promise_resolve), R(7), U8(3),
+ B(LdaCurrentContextSlot), U8(10),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(67),
+ B(Jump), U8(53),
+ B(Star), R(7),
B(Ldar), R(closure),
- B(CreateCatchContext), R(9), U8(8), U8(17),
- B(Star), R(8),
+ B(CreateCatchContext), R(7), U8(12), U8(21),
+ B(Star), R(6),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(8),
- B(PushContext), R(1),
+ B(Ldar), R(6),
+ B(PushContext), R(7),
B(LdaUndefined),
+ B(Star), R(8),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
B(Star), R(9),
- B(LdaContextSlot), R(1), U8(11), U8(0),
- B(Star), R(10),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(11),
+ B(Star), R(10),
B(LdaFalse),
- B(Star), R(12),
- B(CallJSRuntime), U8(%promise_internal_reject), R(9), U8(4),
- B(LdaContextSlot), R(1), U8(11), U8(0),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(Star), R(6),
- B(LdaSmi), I8(4),
- B(Star), R(5),
+ B(Star), R(11),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(8), U8(4),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
B(Jump), U8(14),
B(LdaSmi), I8(-1),
- B(Star), R(5),
+ B(Star), R(3),
B(Jump), U8(8),
- B(Star), R(6),
- B(LdaSmi), I8(5),
- B(Star), R(5),
+ B(Star), R(4),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(7),
+ B(Star), R(5),
B(LdaUndefined),
- B(Star), R(8),
- B(LdaCurrentContextSlot), U8(11),
- B(Star), R(9),
- B(CallJSRuntime), U8(%async_function_promise_release), R(8), U8(2),
- B(Ldar), R(7),
+ B(Star), R(6),
+ B(LdaCurrentContextSlot), U8(10),
+ B(Star), R(7),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(6), U8(2),
+ B(Ldar), R(5),
B(SetPendingMessage),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(34),
- B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(31),
- B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(28),
- B(LdaSmi), I8(3),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(25),
- B(LdaSmi), I8(4),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(22),
- B(LdaSmi), I8(5),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(19),
- B(Jump), U8(20),
- B(Ldar), R(6),
- /* 114 S> */ B(Return),
- B(Ldar), R(6),
- /* 114 S> */ B(Return),
- B(Ldar), R(6),
- /* 114 S> */ B(Return),
- B(Ldar), R(6),
- /* 114 S> */ B(Return),
- B(Ldar), R(6),
+ B(Ldar), R(3),
+ B(SwitchOnSmiNoFeedback), U8(23), U8(2), I8(0),
+ B(Jump), U8(8),
+ B(Ldar), R(4),
/* 114 S> */ B(Return),
- B(Ldar), R(6),
+ B(Ldar), R(4),
B(ReThrow),
B(LdaUndefined),
/* 114 S> */ B(Return),
]
constant pool: [
+ Smi [116],
+ Smi [578],
+ Smi [722],
FIXED_ARRAY_TYPE,
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
SYMBOL_TYPE,
SYMBOL_TYPE,
+ Smi [83],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -1467,20 +1350,22 @@ constant pool: [
FIXED_ARRAY_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
- Smi [594],
FIXED_ARRAY_TYPE,
- Smi [736],
- Smi [348],
- Smi [371],
+ Smi [339],
+ Smi [362],
+ Smi [6],
+ Smi [22],
FIXED_ARRAY_TYPE,
- Smi [317],
+ Smi [304],
+ Smi [6],
+ Smi [9],
]
handlers: [
- [80, 977, 983],
- [83, 923, 925],
- [100, 460, 466],
- [103, 412, 414],
- [553, 679, 681],
+ [60, 934, 940],
+ [63, 881, 883],
+ [80, 434, 440],
+ [83, 386, 388],
+ [527, 649, 651],
]
---
@@ -1491,276 +1376,242 @@ snippet: "
}
f();
"
-frame size: 14
+frame size: 11
parameter count: 1
-bytecode array length: 579
+bytecode array length: 508
bytecodes: [
- B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(22),
- B(CallRuntime), U16(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
- B(PushContext), R(3),
- B(ResumeGenerator), R(new_target),
- B(Star), R(2),
- B(LdaSmi), I8(79),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kAbort), R(4), U8(1),
- B(LdaSmi), I8(-2),
- B(Star), R(2),
- B(CreateFunctionContext), U8(10),
+ B(CreateFunctionContext), U8(9),
B(PushContext), R(0),
- B(Ldar), R(this),
- B(StaCurrentContextSlot), U8(4),
/* 16 E> */ B(StackCheck),
- B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(5),
- B(Mov), R(closure), R(4),
- B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(4), U8(2),
- B(StaCurrentContextSlot), U8(5),
B(LdaUndefined),
- B(Star), R(4),
- B(CallJSRuntime), U8(%async_function_promise_create), R(4), U8(1),
- B(StaCurrentContextSlot), U8(13),
- B(Mov), R(context), R(6),
- B(Mov), R(context), R(7),
- /* 31 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(8),
- B(Ldar), R(8),
- /* 31 E> */ B(StaCurrentContextSlot), U8(6),
+ B(Star), R(1),
+ B(CallJSRuntime), U8(%async_function_promise_create), R(1), U8(1),
+ B(StaCurrentContextSlot), U8(12),
+ B(Mov), R(context), R(3),
+ B(Mov), R(context), R(4),
+ /* 31 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(5),
+ B(Ldar), R(5),
+ /* 31 E> */ B(StaCurrentContextSlot), U8(5),
B(LdaZero),
- B(StaCurrentContextSlot), U8(9),
- B(Mov), R(context), R(10),
- B(Mov), R(context), R(11),
- /* 68 S> */ B(CreateArrayLiteral), U8(1), U8(3), U8(9),
- B(Star), R(12),
- B(LdaNamedProperty), R(12), U8(2), U8(4),
- B(Star), R(13),
- B(CallProperty0), R(13), R(12), U8(6),
+ B(StaCurrentContextSlot), U8(8),
+ B(Mov), R(context), R(7),
+ B(Mov), R(context), R(8),
+ /* 68 S> */ B(CreateArrayLiteral), U8(1), U8(4), U8(17),
+ B(Star), R(9),
+ B(LdaNamedProperty), R(9), U8(2), U8(5),
+ B(Star), R(10),
+ B(CallProperty0), R(10), R(9), U8(7),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- /* 68 E> */ B(StaCurrentContextSlot), U8(7),
- /* 65 S> */ B(LdaCurrentContextSlot), U8(7),
- B(Star), R(13),
- B(LdaNamedProperty), R(13), U8(3), U8(10),
- B(Star), R(12),
- /* 65 E> */ B(CallProperty0), R(12), R(13), U8(8),
- /* 65 E> */ B(StaCurrentContextSlot), U8(8),
- B(Star), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(12), U8(1),
+ /* 68 E> */ B(StaCurrentContextSlot), U8(6),
+ /* 65 S> */ B(LdaCurrentContextSlot), U8(6),
+ B(Star), R(10),
+ B(LdaNamedProperty), R(10), U8(3), U8(11),
+ B(Star), R(9),
+ /* 65 E> */ B(CallProperty0), R(9), R(10), U8(9),
+ /* 65 E> */ B(StaCurrentContextSlot), U8(7),
+ B(Star), R(9),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(9), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(11),
- B(LdaCurrentContextSlot), U8(8),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
- B(LdaCurrentContextSlot), U8(8),
- B(Star), R(12),
- B(LdaNamedProperty), R(12), U8(4), U8(12),
+ B(LdaCurrentContextSlot), U8(7),
+ B(Star), R(9),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
+ B(LdaCurrentContextSlot), U8(7),
+ B(Star), R(9),
+ B(LdaNamedProperty), R(9), U8(4), U8(13),
B(JumpIfToBooleanTrue), U8(42),
- B(LdaImmutableCurrentContextSlot), U8(6),
- B(Star), R(12),
- B(LdaCurrentContextSlot), U8(8),
- B(Star), R(13),
- /* 58 E> */ B(LdaNamedProperty), R(13), U8(5), U8(14),
- B(StaCurrentContextSlot), U8(10),
- B(LdaSmi), I8(2),
+ B(LdaImmutableCurrentContextSlot), U8(5),
+ B(Star), R(9),
+ B(LdaCurrentContextSlot), U8(7),
+ B(Star), R(10),
+ /* 58 E> */ B(LdaNamedProperty), R(10), U8(5), U8(15),
B(StaCurrentContextSlot), U8(9),
- B(LdaCurrentContextSlot), U8(10),
- B(StaNamedPropertySloppy), R(12), U8(6), U8(16),
+ B(LdaSmi), I8(2),
+ B(StaCurrentContextSlot), U8(8),
+ B(LdaCurrentContextSlot), U8(9),
+ B(StaNamedPropertySloppy), R(9), U8(6), U8(17),
/* 53 E> */ B(StackCheck),
- /* 79 S> */ B(LdaImmutableCurrentContextSlot), U8(6),
- B(Star), R(12),
- /* 87 E> */ B(LdaNamedProperty), R(12), U8(6), U8(18),
+ /* 79 S> */ B(LdaImmutableCurrentContextSlot), U8(5),
B(Star), R(9),
+ /* 87 E> */ B(LdaNamedProperty), R(9), U8(6), U8(19),
+ B(Star), R(6),
B(LdaZero),
- B(Star), R(8),
+ B(Star), R(5),
B(Jump), U8(62),
B(Jump), U8(48),
- B(Star), R(12),
+ B(Star), R(9),
B(Ldar), R(closure),
- B(CreateCatchContext), R(12), U8(7), U8(8),
- B(Star), R(11),
+ B(CreateCatchContext), R(9), U8(7), U8(8),
+ B(Star), R(8),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(11),
- B(PushContext), R(1),
- B(LdaContextSlot), R(1), U8(9), U8(0),
- B(Star), R(12),
+ B(Ldar), R(8),
+ B(PushContext), R(9),
+ B(LdaContextSlot), R(9), U8(8), U8(0),
+ B(Star), R(10),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(12), U8(20),
+ B(TestEqualStrict), R(10), U8(21),
B(JumpIfFalse), U8(8),
B(LdaSmi), I8(1),
- B(StaContextSlot), R(1), U8(9), U8(0),
+ B(StaContextSlot), R(9), U8(8), U8(0),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kReThrow), R(12), U8(1),
- B(PopContext), R(1),
+ B(Star), R(10),
+ B(CallRuntime), U16(Runtime::kReThrow), R(10), U8(1),
+ B(PopContext), R(9),
B(LdaSmi), I8(-1),
- B(Star), R(8),
+ B(Star), R(5),
B(Jump), U8(8),
- B(Star), R(9),
+ B(Star), R(6),
B(LdaSmi), I8(1),
- B(Star), R(8),
+ B(Star), R(5),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(10),
- B(LdaCurrentContextSlot), U8(9),
- B(Star), R(11),
+ B(Star), R(7),
+ B(LdaCurrentContextSlot), U8(8),
+ B(Star), R(8),
B(LdaZero),
- B(TestEqualStrict), R(11), U8(21),
+ B(TestEqualStrict), R(8), U8(22),
B(JumpIfTrue), U8(126),
- B(LdaCurrentContextSlot), U8(7),
- B(Star), R(11),
- B(LdaNamedProperty), R(11), U8(9), U8(22),
- B(StaCurrentContextSlot), U8(11),
- B(LdaCurrentContextSlot), U8(11),
+ B(LdaCurrentContextSlot), U8(6),
+ B(Star), R(8),
+ B(LdaNamedProperty), R(8), U8(9), U8(23),
+ B(StaCurrentContextSlot), U8(10),
+ B(LdaCurrentContextSlot), U8(10),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
B(Jump), U8(109),
- B(LdaCurrentContextSlot), U8(9),
- B(Star), R(11),
+ B(LdaCurrentContextSlot), U8(8),
+ B(Star), R(8),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(11), U8(25),
+ B(TestEqualStrict), R(8), U8(26),
B(JumpIfFalse), U8(63),
- B(LdaCurrentContextSlot), U8(11),
+ B(LdaCurrentContextSlot), U8(10),
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(132),
- B(Star), R(11),
+ B(Wide), B(LdaSmi), I16(130),
+ B(Star), R(8),
B(LdaConstant), U8(10),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
+ B(Star), R(9),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(8), U8(2),
B(Throw),
- B(Mov), R(context), R(11),
- B(LdaCurrentContextSlot), U8(11),
- B(Star), R(12),
- B(LdaCurrentContextSlot), U8(7),
- B(Star), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
+ B(Mov), R(context), R(8),
+ B(LdaCurrentContextSlot), U8(10),
+ B(Star), R(9),
+ B(LdaCurrentContextSlot), U8(6),
+ B(Star), R(10),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(9), U8(2),
B(Jump), U8(20),
- B(Star), R(12),
+ B(Star), R(9),
B(Ldar), R(closure),
- B(CreateCatchContext), R(12), U8(7), U8(11),
- B(Star), R(11),
+ B(CreateCatchContext), R(9), U8(7), U8(11),
+ B(Star), R(8),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(11),
- B(PushContext), R(1),
- B(PopContext), R(1),
+ B(Ldar), R(8),
+ B(PushContext), R(9),
+ B(PopContext), R(9),
B(Jump), U8(37),
+ B(LdaCurrentContextSlot), U8(10),
+ B(Star), R(8),
+ B(LdaCurrentContextSlot), U8(6),
+ B(Star), R(9),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(8), U8(2),
+ B(StaCurrentContextSlot), U8(11),
B(LdaCurrentContextSlot), U8(11),
- B(Star), R(11),
- B(LdaCurrentContextSlot), U8(7),
- B(Star), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
- B(StaCurrentContextSlot), U8(12),
- B(LdaCurrentContextSlot), U8(12),
- B(Star), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(11), U8(1),
+ B(Star), R(8),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(8), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(11),
- B(LdaCurrentContextSlot), U8(12),
- B(Star), R(11),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(Ldar), R(10),
+ B(LdaCurrentContextSlot), U8(11),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
+ B(Ldar), R(7),
B(SetPendingMessage),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(8),
- B(JumpIfTrue), U8(10),
- B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(8),
- B(JumpIfTrue), U8(12),
+ B(Ldar), R(5),
+ B(SwitchOnSmiNoFeedback), U8(12), U8(2), I8(0),
B(Jump), U8(13),
B(LdaZero),
- B(Star), R(4),
- B(Mov), R(9), R(5),
+ B(Star), R(1),
+ B(Mov), R(6), R(2),
B(Jump), U8(95),
- B(Ldar), R(9),
+ B(Ldar), R(6),
B(ReThrow),
B(LdaUndefined),
- B(Star), R(8),
- B(LdaCurrentContextSlot), U8(13),
- B(Star), R(9),
- B(LdaUndefined),
- B(Star), R(10),
- B(CallJSRuntime), U8(%promise_resolve), R(8), U8(3),
- B(LdaCurrentContextSlot), U8(13),
B(Star), R(5),
+ B(LdaCurrentContextSlot), U8(12),
+ B(Star), R(6),
+ B(LdaUndefined),
+ B(Star), R(7),
+ B(CallJSRuntime), U8(%promise_resolve), R(5), U8(3),
+ B(LdaCurrentContextSlot), U8(12),
+ B(Star), R(2),
B(LdaSmi), I8(1),
- B(Star), R(4),
+ B(Star), R(1),
B(Jump), U8(68),
B(Jump), U8(54),
- B(Star), R(8),
+ B(Star), R(5),
B(Ldar), R(closure),
- B(CreateCatchContext), R(8), U8(7), U8(12),
- B(Star), R(7),
+ B(CreateCatchContext), R(5), U8(7), U8(14),
+ B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(7),
- B(PushContext), R(1),
+ B(Ldar), R(4),
+ B(PushContext), R(5),
B(LdaUndefined),
- B(Star), R(8),
- B(LdaContextSlot), R(1), U8(13), U8(0),
- B(Star), R(9),
+ B(Star), R(6),
+ B(LdaContextSlot), R(5), U8(12), U8(0),
+ B(Star), R(7),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(10),
+ B(Star), R(8),
B(LdaFalse),
- B(Star), R(11),
- B(CallJSRuntime), U8(%promise_internal_reject), R(8), U8(4),
- B(LdaContextSlot), R(1), U8(13), U8(0),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(Star), R(5),
- B(LdaSmi), I8(2),
- B(Star), R(4),
+ B(Star), R(9),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(6), U8(4),
+ B(LdaContextSlot), R(5), U8(12), U8(0),
+ B(PopContext), R(5),
+ B(PopContext), R(5),
+ B(Star), R(2),
+ B(LdaSmi), I8(1),
+ B(Star), R(1),
B(Jump), U8(14),
B(LdaSmi), I8(-1),
- B(Star), R(4),
+ B(Star), R(1),
B(Jump), U8(8),
- B(Star), R(5),
- B(LdaSmi), I8(3),
- B(Star), R(4),
+ B(Star), R(2),
+ B(LdaSmi), I8(2),
+ B(Star), R(1),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(6),
+ B(Star), R(3),
B(LdaUndefined),
- B(Star), R(7),
- B(LdaCurrentContextSlot), U8(13),
- B(Star), R(8),
- B(CallJSRuntime), U8(%async_function_promise_release), R(7), U8(2),
- B(Ldar), R(6),
+ B(Star), R(4),
+ B(LdaCurrentContextSlot), U8(12),
+ B(Star), R(5),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(4), U8(2),
+ B(Ldar), R(3),
B(SetPendingMessage),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(4),
- B(JumpIfTrue), U8(22),
- B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(4),
- B(JumpIfTrue), U8(33),
- B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(4),
- B(JumpIfTrue), U8(30),
- B(LdaSmi), I8(3),
- B(TestEqualStrictNoFeedback), R(4),
- B(JumpIfTrue), U8(27),
- B(Jump), U8(28),
- B(LdaCurrentContextSlot), U8(13),
- B(Star), R(8),
+ B(Ldar), R(1),
+ B(SwitchOnSmiNoFeedback), U8(15), U8(3), I8(0),
+ B(Jump), U8(25),
+ B(LdaCurrentContextSlot), U8(12),
+ B(Star), R(5),
B(LdaUndefined),
- B(Star), R(7),
- B(Mov), R(5), R(9),
- B(CallJSRuntime), U8(%promise_resolve), R(7), U8(3),
- B(Ldar), R(8),
- /* 96 S> */ B(Return),
+ B(Star), R(4),
+ B(Mov), R(2), R(6),
+ B(CallJSRuntime), U8(%promise_resolve), R(4), U8(3),
B(Ldar), R(5),
/* 96 S> */ B(Return),
- B(Ldar), R(5),
+ B(Ldar), R(2),
/* 96 S> */ B(Return),
- B(Ldar), R(5),
+ B(Ldar), R(2),
B(ReThrow),
B(LdaUndefined),
/* 96 S> */ B(Return),
]
constant pool: [
FIXED_ARRAY_TYPE,
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
@@ -1771,13 +1622,18 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
FIXED_ARRAY_TYPE,
+ Smi [6],
+ Smi [14],
FIXED_ARRAY_TYPE,
+ Smi [6],
+ Smi [23],
+ Smi [26],
]
handlers: [
- [63, 496, 502],
- [66, 442, 444],
- [81, 241, 247],
- [84, 193, 195],
- [322, 334, 336],
+ [17, 445, 451],
+ [20, 391, 393],
+ [35, 195, 201],
+ [38, 147, 149],
+ [276, 288, 290],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
index 4acc95246e..21a9c5c471 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForIn.golden
@@ -76,7 +76,7 @@ bytecodes: [
B(Star), R(7),
/* 63 S> */ B(ForInContinue), R(7), R(6),
B(JumpIfFalse), U8(23),
- B(ForInNext), R(3), R(7), R(4), U8(2),
+ B(ForInNext), R(3), R(7), R(4), U8(3),
B(JumpIfUndefined), U8(9),
B(Star), R(1),
/* 54 E> */ B(StackCheck),
@@ -107,7 +107,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaZero),
B(Star), R(0),
- /* 59 S> */ B(CreateArrayLiteral), U8(0), U8(2), U8(9),
+ /* 59 S> */ B(CreateArrayLiteral), U8(0), U8(3), U8(17),
B(JumpIfUndefined), U8(46),
B(JumpIfNull), U8(44),
B(ToObject), R(3),
@@ -116,13 +116,13 @@ bytecodes: [
B(Star), R(7),
/* 54 S> */ B(ForInContinue), R(7), R(6),
B(JumpIfFalse), U8(31),
- B(ForInNext), R(3), R(7), R(4), U8(4),
+ B(ForInNext), R(3), R(7), R(4), U8(5),
B(JumpIfUndefined), U8(17),
B(Star), R(1),
/* 45 E> */ B(StackCheck),
B(Star), R(2),
/* 70 S> */ B(Ldar), R(1),
- /* 75 E> */ B(Add), R(0), U8(3),
+ /* 75 E> */ B(Add), R(0), U8(4),
B(Mov), R(0), R(8),
B(Star), R(0),
/* 72 E> */ B(ForInStep), R(7),
@@ -132,7 +132,7 @@ bytecodes: [
/* 80 S> */ B(Return),
]
constant pool: [
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
]
handlers: [
]
@@ -150,9 +150,9 @@ parameter count: 1
bytecode array length: 87
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(1),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(1),
B(Mov), R(1), R(0),
- /* 77 S> */ B(CreateArrayLiteral), U8(1), U8(3), U8(9),
+ /* 77 S> */ B(CreateArrayLiteral), U8(1), U8(4), U8(17),
B(JumpIfUndefined), U8(72),
B(JumpIfNull), U8(70),
B(ToObject), R(1),
@@ -161,24 +161,24 @@ bytecodes: [
B(Star), R(5),
/* 68 S> */ B(ForInContinue), R(5), R(4),
B(JumpIfFalse), U8(57),
- B(ForInNext), R(1), R(5), R(2), U8(14),
+ B(ForInNext), R(1), R(5), R(2), U8(15),
B(JumpIfUndefined), U8(43),
B(Star), R(6),
B(Ldar), R(6),
- /* 67 E> */ B(StaNamedPropertySloppy), R(0), U8(2), U8(12),
+ /* 67 E> */ B(StaNamedPropertySloppy), R(0), U8(2), U8(13),
/* 62 E> */ B(StackCheck),
/* 95 S> */ B(Nop),
- /* 100 E> */ B(LdaNamedProperty), R(0), U8(2), U8(6),
+ /* 100 E> */ B(LdaNamedProperty), R(0), U8(2), U8(7),
B(Star), R(6),
B(LdaSmi), I8(10),
- /* 106 E> */ B(TestEqual), R(6), U8(8),
+ /* 106 E> */ B(TestEqual), R(6), U8(9),
B(JumpIfFalse), U8(4),
/* 113 S> */ B(Jump), U8(18),
/* 125 S> */ B(Nop),
- /* 130 E> */ B(LdaNamedProperty), R(0), U8(2), U8(9),
+ /* 130 E> */ B(LdaNamedProperty), R(0), U8(2), U8(10),
B(Star), R(6),
B(LdaSmi), I8(20),
- /* 136 E> */ B(TestEqual), R(6), U8(11),
+ /* 136 E> */ B(TestEqual), R(6), U8(12),
B(JumpIfFalse), U8(4),
/* 143 S> */ B(Jump), U8(9),
B(ForInStep), R(5),
@@ -189,7 +189,7 @@ bytecodes: [
]
constant pool: [
FIXED_ARRAY_TYPE,
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["a"],
]
handlers: [
@@ -205,9 +205,9 @@ parameter count: 1
bytecode array length: 62
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 42 S> */ B(CreateArrayLiteral), U8(0), U8(2), U8(9),
+ /* 42 S> */ B(CreateArrayLiteral), U8(0), U8(3), U8(17),
B(Star), R(0),
- /* 72 S> */ B(CreateArrayLiteral), U8(1), U8(3), U8(9),
+ /* 72 S> */ B(CreateArrayLiteral), U8(1), U8(4), U8(17),
B(JumpIfUndefined), U8(49),
B(JumpIfNull), U8(47),
B(ToObject), R(1),
@@ -216,16 +216,16 @@ bytecodes: [
B(Star), R(5),
/* 65 S> */ B(ForInContinue), R(5), R(4),
B(JumpIfFalse), U8(34),
- B(ForInNext), R(1), R(5), R(2), U8(10),
+ B(ForInNext), R(1), R(5), R(2), U8(11),
B(JumpIfUndefined), U8(20),
B(Star), R(6),
B(LdaZero),
B(Star), R(8),
B(Ldar), R(6),
- /* 64 E> */ B(StaKeyedPropertySloppy), R(0), R(8), U8(8),
+ /* 64 E> */ B(StaKeyedPropertySloppy), R(0), R(8), U8(9),
/* 59 E> */ B(StackCheck),
/* 83 S> */ B(LdaSmi), I8(3),
- /* 91 E> */ B(LdaKeyedProperty), R(0), U8(6),
+ /* 91 E> */ B(LdaKeyedProperty), R(0), U8(7),
/* 98 S> */ B(Return),
B(ForInStep), R(5),
B(Star), R(5),
@@ -234,8 +234,8 @@ bytecodes: [
/* 98 S> */ B(Return),
]
constant pool: [
- CONSTANT_ELEMENTS_PAIR_TYPE,
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
+ TUPLE2_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
index 99f48b8b9b..6f592ba4d6 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOf.golden
@@ -9,34 +9,34 @@ wrap: yes
snippet: "
for (var p of [0, 1, 2]) {}
"
-frame size: 15
+frame size: 14
parameter count: 1
-bytecode array length: 262
+bytecode array length: 260
bytecodes: [
/* 30 E> */ B(StackCheck),
B(LdaZero),
B(Star), R(4),
+ B(Mov), R(context), R(10),
B(Mov), R(context), R(11),
- B(Mov), R(context), R(12),
- /* 48 S> */ B(CreateArrayLiteral), U8(0), U8(2), U8(9),
+ /* 48 S> */ B(CreateArrayLiteral), U8(0), U8(3), U8(17),
+ B(Star), R(12),
+ B(LdaNamedProperty), R(12), U8(1), U8(4),
B(Star), R(13),
- B(LdaNamedProperty), R(13), U8(1), U8(3),
- B(Star), R(14),
- B(CallProperty0), R(14), R(13), U8(5),
+ B(CallProperty0), R(13), R(12), U8(6),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(2),
- /* 45 S> */ B(LdaNamedProperty), R(2), U8(2), U8(9),
- B(Star), R(13),
- /* 45 E> */ B(CallProperty0), R(13), R(2), U8(7),
+ /* 45 S> */ B(LdaNamedProperty), R(2), U8(2), U8(10),
+ B(Star), R(12),
+ /* 45 E> */ B(CallProperty0), R(12), R(2), U8(8),
B(Star), R(3),
/* 45 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(3), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
- B(LdaNamedProperty), R(3), U8(3), U8(11),
+ B(LdaNamedProperty), R(3), U8(3), U8(12),
B(JumpIfToBooleanTrue), U8(25),
- B(LdaNamedProperty), R(3), U8(4), U8(13),
+ B(LdaNamedProperty), R(3), U8(4), U8(14),
B(Star), R(5),
B(LdaSmi), I8(2),
B(Star), R(4),
@@ -47,86 +47,85 @@ bytecodes: [
B(Star), R(4),
B(JumpLoop), U8(50), I8(0),
B(Jump), U8(36),
- B(Star), R(13),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(13), U8(5), U8(6),
B(Star), R(12),
- B(PushContext), R(8),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(12), U8(5), U8(6),
+ B(PushContext), R(12),
+ B(Star), R(11),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(4), U8(15),
+ B(TestEqualStrict), R(4), U8(16),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
B(Star), R(4),
B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
- B(PopContext), R(8),
+ B(PopContext), R(12),
B(LdaSmi), I8(-1),
- B(Star), R(9),
+ B(Star), R(8),
B(Jump), U8(7),
- B(Star), R(10),
- B(LdaZero),
B(Star), R(9),
+ B(LdaZero),
+ B(Star), R(8),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(11),
+ B(Star), R(10),
B(LdaZero),
- B(TestEqualStrict), R(4), U8(16),
+ B(TestEqualStrict), R(4), U8(17),
B(JumpIfTrue), U8(104),
- B(LdaNamedProperty), R(2), U8(7), U8(17),
+ B(LdaNamedProperty), R(2), U8(7), U8(18),
B(Star), R(6),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
B(Jump), U8(93),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(4), U8(20),
+ B(TestEqualStrict), R(4), U8(21),
B(JumpIfFalse), U8(61),
B(Ldar), R(6),
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(132),
- B(Star), R(12),
+ B(Wide), B(LdaSmi), I16(130),
+ B(Star), R(11),
B(LdaConstant), U8(8),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
B(Throw),
- B(Mov), R(context), R(12),
- B(Mov), R(6), R(13),
- B(Mov), R(2), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
+ B(Mov), R(context), R(11),
+ B(Mov), R(6), R(12),
+ B(Mov), R(2), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
B(Jump), U8(20),
- B(Star), R(13),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(13), U8(5), U8(9),
B(Star), R(12),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(12), U8(5), U8(9),
+ B(Star), R(11),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(12),
- B(PushContext), R(8),
- B(PopContext), R(8),
+ B(Ldar), R(11),
+ B(PushContext), R(12),
+ B(PopContext), R(12),
B(Jump), U8(27),
- B(Mov), R(6), R(12),
- B(Mov), R(2), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
+ B(Mov), R(6), R(11),
+ B(Mov), R(2), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
B(Star), R(7),
B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(7), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
- B(Ldar), R(11),
+ B(Ldar), R(10),
B(SetPendingMessage),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(9),
- B(JumpIfTrue), U8(4),
- B(Jump), U8(5),
- B(Ldar), R(10),
+ B(TestEqualStrictNoFeedback), R(8),
+ B(JumpIfFalse), U8(5),
+ B(Ldar), R(9),
B(ReThrow),
B(LdaUndefined),
/* 62 S> */ B(Return),
]
constant pool: [
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
@@ -148,35 +147,35 @@ snippet: "
var x = 'potatoes';
for (var p of x) { return p; }
"
-frame size: 16
+frame size: 15
parameter count: 1
-bytecode array length: 275
+bytecode array length: 270
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
B(LdaZero),
B(Star), R(5),
+ B(Mov), R(context), R(11),
B(Mov), R(context), R(12),
- B(Mov), R(context), R(13),
- /* 68 S> */ B(LdaNamedProperty), R(0), U8(1), U8(2),
- B(Star), R(15),
- B(CallProperty0), R(15), R(0), U8(4),
- B(Mov), R(0), R(14),
+ /* 68 S> */ B(LdaNamedProperty), R(0), U8(1), U8(3),
+ B(Star), R(14),
+ B(CallProperty0), R(14), R(0), U8(5),
+ B(Mov), R(0), R(13),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(3),
- /* 65 S> */ B(LdaNamedProperty), R(3), U8(2), U8(8),
- B(Star), R(14),
- /* 65 E> */ B(CallProperty0), R(14), R(3), U8(6),
+ /* 65 S> */ B(LdaNamedProperty), R(3), U8(2), U8(9),
+ B(Star), R(13),
+ /* 65 E> */ B(CallProperty0), R(13), R(3), U8(7),
B(Star), R(4),
/* 65 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(4), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(4), U8(1),
- B(LdaNamedProperty), R(4), U8(3), U8(10),
+ B(LdaNamedProperty), R(4), U8(3), U8(11),
B(JumpIfToBooleanTrue), U8(27),
- B(LdaNamedProperty), R(4), U8(4), U8(12),
+ B(LdaNamedProperty), R(4), U8(4), U8(13),
B(Star), R(6),
B(LdaSmi), I8(2),
B(Star), R(5),
@@ -184,89 +183,85 @@ bytecodes: [
/* 54 E> */ B(StackCheck),
B(Mov), R(1), R(2),
/* 73 S> */ B(LdaZero),
- B(Star), R(10),
- B(Mov), R(1), R(11),
+ B(Star), R(9),
+ B(Mov), R(1), R(10),
B(Jump), U8(50),
B(Jump), U8(36),
- B(Star), R(14),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(14), U8(5), U8(6),
B(Star), R(13),
- B(PushContext), R(9),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(13), U8(5), U8(6),
+ B(PushContext), R(13),
+ B(Star), R(12),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(5), U8(14),
+ B(TestEqualStrict), R(5), U8(15),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
B(Star), R(5),
B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(14),
B(CallRuntime), U16(Runtime::kReThrow), R(14), U8(1),
- B(PopContext), R(9),
+ B(PopContext), R(13),
B(LdaSmi), I8(-1),
- B(Star), R(10),
+ B(Star), R(9),
B(Jump), U8(8),
- B(Star), R(11),
- B(LdaSmi), I8(1),
B(Star), R(10),
+ B(LdaSmi), I8(1),
+ B(Star), R(9),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(12),
+ B(Star), R(11),
B(LdaZero),
- B(TestEqualStrict), R(5), U8(15),
+ B(TestEqualStrict), R(5), U8(16),
B(JumpIfTrue), U8(104),
- B(LdaNamedProperty), R(3), U8(7), U8(16),
+ B(LdaNamedProperty), R(3), U8(7), U8(17),
B(Star), R(7),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
B(Jump), U8(93),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(5), U8(19),
+ B(TestEqualStrict), R(5), U8(20),
B(JumpIfFalse), U8(61),
B(Ldar), R(7),
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(132),
- B(Star), R(13),
+ B(Wide), B(LdaSmi), I16(130),
+ B(Star), R(12),
B(LdaConstant), U8(8),
- B(Star), R(14),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
B(Throw),
- B(Mov), R(context), R(13),
- B(Mov), R(7), R(14),
- B(Mov), R(3), R(15),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(14), U8(2),
+ B(Mov), R(context), R(12),
+ B(Mov), R(7), R(13),
+ B(Mov), R(3), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
B(Jump), U8(20),
- B(Star), R(14),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(14), U8(5), U8(9),
B(Star), R(13),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(13), U8(5), U8(9),
+ B(Star), R(12),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(13),
- B(PushContext), R(9),
- B(PopContext), R(9),
+ B(Ldar), R(12),
+ B(PushContext), R(13),
+ B(PopContext), R(13),
B(Jump), U8(27),
- B(Mov), R(7), R(13),
- B(Mov), R(3), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
+ B(Mov), R(7), R(12),
+ B(Mov), R(3), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
B(Star), R(8),
B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(8), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
- B(Ldar), R(12),
+ B(Ldar), R(11),
B(SetPendingMessage),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(10),
- B(JumpIfTrue), U8(10),
- B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(10),
- B(JumpIfTrue), U8(7),
+ B(Ldar), R(9),
+ B(SwitchOnSmiNoFeedback), U8(10), U8(2), I8(0),
B(Jump), U8(8),
- B(Ldar), R(11),
+ B(Ldar), R(10),
/* 85 S> */ B(Return),
- B(Ldar), R(11),
+ B(Ldar), R(10),
B(ReThrow),
B(LdaUndefined),
/* 85 S> */ B(Return),
@@ -282,6 +277,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
FIXED_ARRAY_TYPE,
+ Smi [6],
+ Smi [9],
]
handlers: [
[11, 127, 133],
@@ -296,34 +293,34 @@ snippet: "
if (x == 20) break;
}
"
-frame size: 15
+frame size: 14
parameter count: 1
-bytecode array length: 280
+bytecode array length: 278
bytecodes: [
/* 30 E> */ B(StackCheck),
B(LdaZero),
B(Star), R(4),
+ B(Mov), R(context), R(10),
B(Mov), R(context), R(11),
- B(Mov), R(context), R(12),
- /* 48 S> */ B(CreateArrayLiteral), U8(0), U8(2), U8(9),
+ /* 48 S> */ B(CreateArrayLiteral), U8(0), U8(3), U8(17),
+ B(Star), R(12),
+ B(LdaNamedProperty), R(12), U8(1), U8(4),
B(Star), R(13),
- B(LdaNamedProperty), R(13), U8(1), U8(3),
- B(Star), R(14),
- B(CallProperty0), R(14), R(13), U8(5),
+ B(CallProperty0), R(13), R(12), U8(6),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(2),
- /* 45 S> */ B(LdaNamedProperty), R(2), U8(2), U8(9),
- B(Star), R(13),
- /* 45 E> */ B(CallProperty0), R(13), R(2), U8(7),
+ /* 45 S> */ B(LdaNamedProperty), R(2), U8(2), U8(10),
+ B(Star), R(12),
+ /* 45 E> */ B(CallProperty0), R(12), R(2), U8(8),
B(Star), R(3),
/* 45 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(3), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
- B(LdaNamedProperty), R(3), U8(3), U8(11),
+ B(LdaNamedProperty), R(3), U8(3), U8(12),
B(JumpIfToBooleanTrue), U8(43),
- B(LdaNamedProperty), R(3), U8(4), U8(13),
+ B(LdaNamedProperty), R(3), U8(4), U8(14),
B(Star), R(5),
B(LdaSmi), I8(2),
B(Star), R(4),
@@ -331,97 +328,96 @@ bytecodes: [
/* 34 E> */ B(StackCheck),
B(Mov), R(0), R(1),
/* 66 S> */ B(LdaSmi), I8(10),
- /* 72 E> */ B(TestEqual), R(1), U8(15),
+ /* 72 E> */ B(TestEqual), R(1), U8(16),
B(JumpIfFalse), U8(4),
/* 79 S> */ B(Jump), U8(14),
/* 91 S> */ B(LdaSmi), I8(20),
- /* 97 E> */ B(TestEqual), R(1), U8(16),
+ /* 97 E> */ B(TestEqual), R(1), U8(17),
B(JumpIfFalse), U8(4),
/* 104 S> */ B(Jump), U8(8),
B(LdaZero),
B(Star), R(4),
B(JumpLoop), U8(68), I8(0),
B(Jump), U8(36),
- B(Star), R(13),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(13), U8(5), U8(6),
B(Star), R(12),
- B(PushContext), R(8),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(12), U8(5), U8(6),
+ B(PushContext), R(12),
+ B(Star), R(11),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(4), U8(17),
+ B(TestEqualStrict), R(4), U8(18),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
B(Star), R(4),
B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(13),
B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
- B(PopContext), R(8),
+ B(PopContext), R(12),
B(LdaSmi), I8(-1),
- B(Star), R(9),
+ B(Star), R(8),
B(Jump), U8(7),
- B(Star), R(10),
- B(LdaZero),
B(Star), R(9),
+ B(LdaZero),
+ B(Star), R(8),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(11),
+ B(Star), R(10),
B(LdaZero),
- B(TestEqualStrict), R(4), U8(18),
+ B(TestEqualStrict), R(4), U8(19),
B(JumpIfTrue), U8(104),
- B(LdaNamedProperty), R(2), U8(7), U8(19),
+ B(LdaNamedProperty), R(2), U8(7), U8(20),
B(Star), R(6),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
B(Jump), U8(93),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(4), U8(22),
+ B(TestEqualStrict), R(4), U8(23),
B(JumpIfFalse), U8(61),
B(Ldar), R(6),
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(132),
- B(Star), R(12),
+ B(Wide), B(LdaSmi), I16(130),
+ B(Star), R(11),
B(LdaConstant), U8(8),
- B(Star), R(13),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
B(Throw),
- B(Mov), R(context), R(12),
- B(Mov), R(6), R(13),
- B(Mov), R(2), R(14),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
+ B(Mov), R(context), R(11),
+ B(Mov), R(6), R(12),
+ B(Mov), R(2), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
B(Jump), U8(20),
- B(Star), R(13),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(13), U8(5), U8(9),
B(Star), R(12),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(12), U8(5), U8(9),
+ B(Star), R(11),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(12),
- B(PushContext), R(8),
- B(PopContext), R(8),
+ B(Ldar), R(11),
+ B(PushContext), R(12),
+ B(PopContext), R(12),
B(Jump), U8(27),
- B(Mov), R(6), R(12),
- B(Mov), R(2), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
+ B(Mov), R(6), R(11),
+ B(Mov), R(2), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
B(Star), R(7),
B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(7), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
- B(Ldar), R(11),
+ B(Ldar), R(10),
B(SetPendingMessage),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(9),
- B(JumpIfTrue), U8(4),
- B(Jump), U8(5),
- B(Ldar), R(10),
+ B(TestEqualStrictNoFeedback), R(8),
+ B(JumpIfFalse), U8(5),
+ B(Ldar), R(9),
B(ReThrow),
B(LdaUndefined),
/* 113 S> */ B(Return),
]
constant pool: [
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
@@ -443,135 +439,131 @@ snippet: "
var x = { 'a': 1, 'b': 2 };
for (x['a'] of [1,2,3]) { return x['a']; }
"
-frame size: 14
+frame size: 13
parameter count: 1
-bytecode array length: 286
+bytecode array length: 281
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(8),
- B(Mov), R(8), R(0),
+ /* 42 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(7),
+ B(Mov), R(7), R(0),
B(LdaZero),
B(Star), R(3),
+ B(Mov), R(context), R(9),
B(Mov), R(context), R(10),
- B(Mov), R(context), R(11),
- /* 77 S> */ B(CreateArrayLiteral), U8(1), U8(3), U8(9),
+ /* 77 S> */ B(CreateArrayLiteral), U8(1), U8(4), U8(17),
+ B(Star), R(11),
+ B(LdaNamedProperty), R(11), U8(2), U8(5),
B(Star), R(12),
- B(LdaNamedProperty), R(12), U8(2), U8(4),
- B(Star), R(13),
- B(CallProperty0), R(13), R(12), U8(6),
+ B(CallProperty0), R(12), R(11), U8(7),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
B(Star), R(1),
- /* 74 S> */ B(LdaNamedProperty), R(1), U8(3), U8(10),
- B(Star), R(12),
- /* 74 E> */ B(CallProperty0), R(12), R(1), U8(8),
+ /* 74 S> */ B(LdaNamedProperty), R(1), U8(3), U8(11),
+ B(Star), R(11),
+ /* 74 E> */ B(CallProperty0), R(11), R(1), U8(9),
B(Star), R(2),
/* 74 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(2), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(2), U8(1),
- B(LdaNamedProperty), R(2), U8(4), U8(12),
+ B(LdaNamedProperty), R(2), U8(4), U8(13),
B(JumpIfToBooleanTrue), U8(31),
- /* 67 E> */ B(LdaNamedProperty), R(2), U8(5), U8(14),
+ /* 67 E> */ B(LdaNamedProperty), R(2), U8(5), U8(15),
B(Star), R(4),
B(LdaSmi), I8(2),
B(Star), R(3),
B(Ldar), R(4),
- B(StaNamedPropertySloppy), R(0), U8(6), U8(16),
+ B(StaNamedPropertySloppy), R(0), U8(6), U8(17),
/* 62 E> */ B(StackCheck),
/* 88 S> */ B(Nop),
- /* 96 E> */ B(LdaNamedProperty), R(0), U8(6), U8(18),
- B(Star), R(9),
- B(LdaZero),
+ /* 96 E> */ B(LdaNamedProperty), R(0), U8(6), U8(19),
B(Star), R(8),
+ B(LdaZero),
+ B(Star), R(7),
B(Jump), U8(50),
B(Jump), U8(36),
- B(Star), R(12),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(12), U8(7), U8(8),
B(Star), R(11),
- B(PushContext), R(7),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(11), U8(7), U8(8),
+ B(PushContext), R(11),
+ B(Star), R(10),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(3), U8(20),
+ B(TestEqualStrict), R(3), U8(21),
B(JumpIfFalse), U8(6),
B(LdaSmi), I8(1),
B(Star), R(3),
B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(12),
B(CallRuntime), U16(Runtime::kReThrow), R(12), U8(1),
- B(PopContext), R(7),
+ B(PopContext), R(11),
B(LdaSmi), I8(-1),
- B(Star), R(8),
+ B(Star), R(7),
B(Jump), U8(8),
- B(Star), R(9),
- B(LdaSmi), I8(1),
B(Star), R(8),
+ B(LdaSmi), I8(1),
+ B(Star), R(7),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(10),
+ B(Star), R(9),
B(LdaZero),
- B(TestEqualStrict), R(3), U8(21),
+ B(TestEqualStrict), R(3), U8(22),
B(JumpIfTrue), U8(104),
- B(LdaNamedProperty), R(1), U8(9), U8(22),
+ B(LdaNamedProperty), R(1), U8(9), U8(23),
B(Star), R(5),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
B(Jump), U8(93),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(3), U8(25),
+ B(TestEqualStrict), R(3), U8(26),
B(JumpIfFalse), U8(61),
B(Ldar), R(5),
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(132),
- B(Star), R(11),
+ B(Wide), B(LdaSmi), I16(130),
+ B(Star), R(10),
B(LdaConstant), U8(10),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(10), U8(2),
B(Throw),
- B(Mov), R(context), R(11),
- B(Mov), R(5), R(12),
- B(Mov), R(1), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
+ B(Mov), R(context), R(10),
+ B(Mov), R(5), R(11),
+ B(Mov), R(1), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
B(Jump), U8(20),
- B(Star), R(12),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(12), U8(7), U8(11),
B(Star), R(11),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(11), U8(7), U8(11),
+ B(Star), R(10),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(11),
- B(PushContext), R(7),
- B(PopContext), R(7),
+ B(Ldar), R(10),
+ B(PushContext), R(11),
+ B(PopContext), R(11),
B(Jump), U8(27),
- B(Mov), R(5), R(11),
- B(Mov), R(1), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
+ B(Mov), R(5), R(10),
+ B(Mov), R(1), R(11),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(10), U8(2),
B(Star), R(6),
B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(7),
B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
- B(Ldar), R(10),
+ B(Ldar), R(9),
B(SetPendingMessage),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(8),
- B(JumpIfTrue), U8(10),
- B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(8),
- B(JumpIfTrue), U8(7),
+ B(Ldar), R(7),
+ B(SwitchOnSmiNoFeedback), U8(12), U8(2), I8(0),
B(Jump), U8(8),
- B(Ldar), R(9),
+ B(Ldar), R(8),
/* 105 S> */ B(Return),
- B(Ldar), R(9),
+ B(Ldar), R(8),
B(ReThrow),
B(LdaUndefined),
/* 105 S> */ B(Return),
]
constant pool: [
FIXED_ARRAY_TYPE,
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
SYMBOL_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
@@ -582,6 +574,8 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
FIXED_ARRAY_TYPE,
+ Smi [6],
+ Smi [9],
]
handlers: [
[15, 138, 144],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
new file mode 100644
index 0000000000..0b57c48309
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ForOfLoop.golden
@@ -0,0 +1,1860 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: no
+test function name: f
+
+---
+snippet: "
+ function f(arr) {
+ for (let x of arr) { let y = x; }
+ }
+ f([1, 2, 3]);
+"
+frame size: 16
+parameter count: 2
+bytecode array length: 260
+bytecodes: [
+ /* 10 E> */ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(6),
+ B(Mov), R(context), R(12),
+ B(Mov), R(context), R(13),
+ /* 34 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(3),
+ B(Star), R(15),
+ B(CallProperty0), R(15), R(arg0), U8(5),
+ B(Mov), R(arg0), R(14),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(Star), R(4),
+ /* 31 S> */ B(LdaNamedProperty), R(4), U8(1), U8(9),
+ B(Star), R(14),
+ /* 31 E> */ B(CallProperty0), R(14), R(4), U8(7),
+ B(Star), R(5),
+ /* 31 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(5), U8(1),
+ B(ToBooleanLogicalNot),
+ B(JumpIfFalse), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(5), U8(1),
+ B(LdaNamedProperty), R(5), U8(2), U8(11),
+ B(JumpIfToBooleanTrue), U8(28),
+ B(LdaNamedProperty), R(5), U8(3), U8(13),
+ B(Star), R(7),
+ B(LdaSmi), I8(2),
+ B(Star), R(6),
+ B(Mov), R(7), R(3),
+ /* 20 E> */ B(StackCheck),
+ B(Mov), R(3), R(1),
+ /* 49 S> */ B(Mov), R(1), R(0),
+ B(LdaZero),
+ B(Star), R(6),
+ B(JumpLoop), U8(53), I8(0),
+ B(Jump), U8(36),
+ B(Star), R(14),
+ B(Ldar), R(closure),
+ /* 49 E> */ B(CreateCatchContext), R(14), U8(4), U8(5),
+ B(PushContext), R(14),
+ B(Star), R(13),
+ B(LdaSmi), I8(2),
+ B(TestEqualStrict), R(6), U8(15),
+ B(JumpIfFalse), U8(6),
+ B(LdaSmi), I8(1),
+ B(Star), R(6),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(15),
+ B(CallRuntime), U16(Runtime::kReThrow), R(15), U8(1),
+ B(PopContext), R(14),
+ B(LdaSmi), I8(-1),
+ B(Star), R(10),
+ B(Jump), U8(7),
+ B(Star), R(11),
+ B(LdaZero),
+ B(Star), R(10),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(12),
+ B(LdaZero),
+ B(TestEqualStrict), R(6), U8(16),
+ B(JumpIfTrue), U8(104),
+ B(LdaNamedProperty), R(4), U8(6), U8(17),
+ B(Star), R(8),
+ B(TestUndetectable),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(93),
+ B(LdaSmi), I8(1),
+ B(TestEqualStrict), R(6), U8(20),
+ B(JumpIfFalse), U8(61),
+ B(Ldar), R(8),
+ B(TestTypeOf), U8(5),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(18),
+ B(Wide), B(LdaSmi), I16(130),
+ B(Star), R(13),
+ B(LdaConstant), U8(7),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(13), U8(2),
+ B(Throw),
+ B(Mov), R(context), R(13),
+ B(Mov), R(8), R(14),
+ B(Mov), R(4), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(14), U8(2),
+ B(Jump), U8(20),
+ B(Star), R(14),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(14), U8(4), U8(8),
+ B(Star), R(13),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(13),
+ B(PushContext), R(14),
+ B(PopContext), R(14),
+ B(Jump), U8(27),
+ B(Mov), R(8), R(13),
+ B(Mov), R(4), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
+ B(Star), R(9),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(9), U8(1),
+ B(JumpIfToBooleanFalse), U8(4),
+ B(Jump), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
+ B(Ldar), R(12),
+ B(SetPendingMessage),
+ B(LdaZero),
+ B(TestEqualStrictNoFeedback), R(10),
+ B(JumpIfFalse), U8(5),
+ B(Ldar), R(11),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 54 S> */ B(Return),
+]
+constant pool: [
+ SYMBOL_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
+ FIXED_ARRAY_TYPE,
+]
+handlers: [
+ [7, 124, 130],
+ [10, 88, 90],
+ [190, 200, 202],
+]
+
+---
+snippet: "
+ function f(arr) {
+ for (let x of arr) { eval('1'); }
+ }
+ f([1, 2, 3]);
+"
+frame size: 22
+parameter count: 2
+bytecode array length: 345
+bytecodes: [
+ B(CreateFunctionContext), U8(4),
+ B(PushContext), R(7),
+ B(Ldar), R(this),
+ B(StaCurrentContextSlot), U8(5),
+ B(Ldar), R(arg0),
+ B(StaCurrentContextSlot), U8(4),
+ B(CreateMappedArguments),
+ B(StaCurrentContextSlot), U8(7),
+ B(Ldar), R(new_target),
+ B(StaCurrentContextSlot), U8(6),
+ /* 10 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(8),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Mov), R(context), R(11),
+ B(Mov), R(context), R(12),
+ /* 34 S> */ B(LdaContextSlot), R(8), U8(4), U8(0),
+ B(Star), R(13),
+ B(LdaNamedProperty), R(13), U8(1), U8(3),
+ B(Star), R(14),
+ B(CallProperty0), R(14), R(13), U8(5),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(Star), R(1),
+ /* 31 S> */ B(LdaNamedProperty), R(1), U8(2), U8(9),
+ B(Star), R(13),
+ /* 31 E> */ B(CallProperty0), R(13), R(1), U8(7),
+ B(Star), R(2),
+ /* 31 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(2), U8(1),
+ B(ToBooleanLogicalNot),
+ B(JumpIfFalse), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(2), U8(1),
+ B(LdaNamedProperty), R(2), U8(3), U8(11),
+ B(JumpIfToBooleanTrue), U8(78),
+ B(LdaNamedProperty), R(2), U8(4), U8(13),
+ B(Star), R(4),
+ B(LdaSmi), I8(2),
+ B(Star), R(3),
+ B(Mov), R(4), R(0),
+ /* 20 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(5),
+ B(PushContext), R(13),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(Ldar), R(4),
+ B(StaCurrentContextSlot), U8(4),
+ /* 41 S> */ B(LdaLookupGlobalSlot), U8(6), U8(17), U8(1),
+ B(Star), R(14),
+ B(LdaConstant), U8(7),
+ B(Star), R(15),
+ B(LdaZero),
+ B(Star), R(19),
+ B(LdaSmi), I8(37),
+ B(Star), R(20),
+ B(LdaSmi), I8(41),
+ B(Star), R(21),
+ B(Mov), R(14), R(16),
+ B(Mov), R(15), R(17),
+ B(Mov), R(closure), R(18),
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(16), U8(6),
+ B(Star), R(14),
+ /* 41 E> */ B(CallUndefinedReceiver1), R(14), R(15), U8(15),
+ B(PopContext), R(13),
+ B(LdaZero),
+ B(Star), R(3),
+ B(JumpLoop), U8(103), I8(0),
+ B(Jump), U8(36),
+ B(Star), R(13),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(13), U8(8), U8(9),
+ B(PushContext), R(13),
+ B(Star), R(12),
+ B(LdaSmi), I8(2),
+ B(TestEqualStrict), R(3), U8(19),
+ B(JumpIfFalse), U8(6),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(14),
+ B(CallRuntime), U16(Runtime::kReThrow), R(14), U8(1),
+ B(PopContext), R(13),
+ B(LdaSmi), I8(-1),
+ B(Star), R(9),
+ B(Jump), U8(7),
+ B(Star), R(10),
+ B(LdaZero),
+ B(Star), R(9),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(11),
+ B(LdaZero),
+ B(TestEqualStrict), R(3), U8(20),
+ B(JumpIfTrue), U8(104),
+ B(LdaNamedProperty), R(1), U8(10), U8(21),
+ B(Star), R(5),
+ B(TestUndetectable),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(93),
+ B(LdaSmi), I8(1),
+ B(TestEqualStrict), R(3), U8(24),
+ B(JumpIfFalse), U8(61),
+ B(Ldar), R(5),
+ B(TestTypeOf), U8(5),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(18),
+ B(Wide), B(LdaSmi), I16(130),
+ B(Star), R(12),
+ B(LdaConstant), U8(11),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(12), U8(2),
+ B(Throw),
+ B(Mov), R(context), R(12),
+ B(Mov), R(5), R(13),
+ B(Mov), R(1), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(13), U8(2),
+ B(Jump), U8(20),
+ B(Star), R(13),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(13), U8(8), U8(12),
+ B(Star), R(12),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(12),
+ B(PushContext), R(13),
+ B(PopContext), R(13),
+ B(Jump), U8(27),
+ B(Mov), R(5), R(12),
+ B(Mov), R(1), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
+ B(Star), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(6), U8(1),
+ B(JumpIfToBooleanFalse), U8(4),
+ B(Jump), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(6), U8(1),
+ B(Ldar), R(11),
+ B(SetPendingMessage),
+ B(LdaZero),
+ B(TestEqualStrictNoFeedback), R(9),
+ B(JumpIfFalse), U8(7),
+ B(PopContext), R(7),
+ B(Ldar), R(10),
+ B(ReThrow),
+ B(PopContext), R(8),
+ B(LdaUndefined),
+ /* 54 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+ SYMBOL_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["eval"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["1"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
+ FIXED_ARRAY_TYPE,
+]
+handlers: [
+ [35, 205, 211],
+ [38, 169, 171],
+ [271, 281, 283],
+]
+
+---
+snippet: "
+ function f(arr) {
+ for (let x of arr) { (function() { return x; })(); }
+ }
+ f([1, 2, 3]);
+"
+frame size: 14
+parameter count: 2
+bytecode array length: 278
+bytecodes: [
+ /* 10 E> */ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(4),
+ B(Mov), R(context), R(10),
+ B(Mov), R(context), R(11),
+ /* 34 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(3),
+ B(Star), R(13),
+ B(CallProperty0), R(13), R(arg0), U8(5),
+ B(Mov), R(arg0), R(12),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(Star), R(2),
+ /* 31 S> */ B(LdaNamedProperty), R(2), U8(1), U8(9),
+ B(Star), R(12),
+ /* 31 E> */ B(CallProperty0), R(12), R(2), U8(7),
+ B(Star), R(3),
+ /* 31 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(3), U8(1),
+ B(ToBooleanLogicalNot),
+ B(JumpIfFalse), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(3), U8(1),
+ B(LdaNamedProperty), R(3), U8(2), U8(11),
+ B(JumpIfToBooleanTrue), U8(46),
+ B(LdaNamedProperty), R(3), U8(3), U8(13),
+ B(Star), R(5),
+ B(LdaSmi), I8(2),
+ B(Star), R(4),
+ B(Mov), R(5), R(1),
+ /* 20 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(4),
+ B(PushContext), R(12),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(Ldar), R(5),
+ B(StaCurrentContextSlot), U8(4),
+ /* 41 S> */ B(CreateClosure), U8(5), U8(17), U8(2),
+ B(Star), R(13),
+ /* 67 E> */ B(CallUndefinedReceiver0), R(13), U8(15),
+ B(PopContext), R(12),
+ B(LdaZero),
+ B(Star), R(4),
+ B(JumpLoop), U8(71), I8(0),
+ B(Jump), U8(36),
+ B(Star), R(12),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(12), U8(6), U8(7),
+ B(PushContext), R(12),
+ B(Star), R(11),
+ B(LdaSmi), I8(2),
+ B(TestEqualStrict), R(4), U8(18),
+ B(JumpIfFalse), U8(6),
+ B(LdaSmi), I8(1),
+ B(Star), R(4),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
+ B(PopContext), R(12),
+ B(LdaSmi), I8(-1),
+ B(Star), R(8),
+ B(Jump), U8(7),
+ B(Star), R(9),
+ B(LdaZero),
+ B(Star), R(8),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(10),
+ B(LdaZero),
+ B(TestEqualStrict), R(4), U8(19),
+ B(JumpIfTrue), U8(104),
+ B(LdaNamedProperty), R(2), U8(8), U8(20),
+ B(Star), R(6),
+ B(TestUndetectable),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(93),
+ B(LdaSmi), I8(1),
+ B(TestEqualStrict), R(4), U8(23),
+ B(JumpIfFalse), U8(61),
+ B(Ldar), R(6),
+ B(TestTypeOf), U8(5),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(18),
+ B(Wide), B(LdaSmi), I16(130),
+ B(Star), R(11),
+ B(LdaConstant), U8(9),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
+ B(Throw),
+ B(Mov), R(context), R(11),
+ B(Mov), R(6), R(12),
+ B(Mov), R(2), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
+ B(Jump), U8(20),
+ B(Star), R(12),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(12), U8(6), U8(10),
+ B(Star), R(11),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(11),
+ B(PushContext), R(12),
+ B(PopContext), R(12),
+ B(Jump), U8(27),
+ B(Mov), R(6), R(11),
+ B(Mov), R(2), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
+ B(Star), R(7),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(7), U8(1),
+ B(JumpIfToBooleanFalse), U8(4),
+ B(Jump), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(7), U8(1),
+ B(Ldar), R(10),
+ B(SetPendingMessage),
+ B(LdaZero),
+ B(TestEqualStrictNoFeedback), R(8),
+ B(JumpIfFalse), U8(5),
+ B(Ldar), R(9),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 73 S> */ B(Return),
+]
+constant pool: [
+ SYMBOL_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
+ FIXED_ARRAY_TYPE,
+]
+handlers: [
+ [7, 142, 148],
+ [10, 106, 108],
+ [208, 218, 220],
+]
+
+---
+snippet: "
+ function f(arr) {
+ for (let { x, y } of arr) { let z = x + y; }
+ }
+ f([{ x: 0, y: 3 }, { x: 1, y: 9 }, { x: -12, y: 17 }]);
+"
+frame size: 19
+parameter count: 2
+bytecode array length: 298
+bytecodes: [
+ /* 10 E> */ B(StackCheck),
+ B(LdaZero),
+ B(Star), R(9),
+ B(Mov), R(context), R(15),
+ B(Mov), R(context), R(16),
+ /* 41 S> */ B(LdaNamedProperty), R(arg0), U8(0), U8(3),
+ B(Star), R(18),
+ B(CallProperty0), R(18), R(arg0), U8(5),
+ B(Mov), R(arg0), R(17),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ B(Star), R(7),
+ /* 38 S> */ B(LdaNamedProperty), R(7), U8(1), U8(9),
+ B(Star), R(17),
+ /* 38 E> */ B(CallProperty0), R(17), R(7), U8(7),
+ B(Star), R(8),
+ /* 38 E> */ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(8), U8(1),
+ B(ToBooleanLogicalNot),
+ B(JumpIfFalse), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(8), U8(1),
+ B(LdaNamedProperty), R(8), U8(2), U8(11),
+ B(JumpIfToBooleanTrue), U8(66),
+ B(LdaNamedProperty), R(8), U8(3), U8(13),
+ B(Star), R(10),
+ B(LdaSmi), I8(2),
+ B(Star), R(9),
+ B(Mov), R(10), R(5),
+ /* 20 E> */ B(StackCheck),
+ B(Mov), R(5), R(6),
+ B(Ldar), R(5),
+ B(JumpIfUndefined), U8(6),
+ B(Ldar), R(6),
+ B(JumpIfNotNull), U8(16),
+ B(LdaSmi), I8(61),
+ B(Star), R(17),
+ B(LdaConstant), U8(4),
+ B(Star), R(18),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(17), U8(2),
+ B(Throw),
+ /* 31 S> */ B(LdaNamedProperty), R(6), U8(5), U8(17),
+ B(Star), R(1),
+ /* 34 S> */ B(LdaNamedProperty), R(6), U8(6), U8(19),
+ B(Star), R(2),
+ /* 58 S> */ B(Ldar), R(2),
+ /* 58 E> */ B(Add), R(1), U8(21),
+ B(Star), R(0),
+ B(LdaZero),
+ B(Star), R(9),
+ B(JumpLoop), U8(91), I8(0),
+ B(Jump), U8(36),
+ B(Star), R(17),
+ B(Ldar), R(closure),
+ /* 58 E> */ B(CreateCatchContext), R(17), U8(7), U8(8),
+ B(PushContext), R(17),
+ B(Star), R(16),
+ B(LdaSmi), I8(2),
+ B(TestEqualStrict), R(9), U8(22),
+ B(JumpIfFalse), U8(6),
+ B(LdaSmi), I8(1),
+ B(Star), R(9),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(18),
+ B(CallRuntime), U16(Runtime::kReThrow), R(18), U8(1),
+ B(PopContext), R(17),
+ B(LdaSmi), I8(-1),
+ B(Star), R(13),
+ B(Jump), U8(7),
+ B(Star), R(14),
+ B(LdaZero),
+ B(Star), R(13),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(15),
+ B(LdaZero),
+ B(TestEqualStrict), R(9), U8(23),
+ B(JumpIfTrue), U8(104),
+ B(LdaNamedProperty), R(7), U8(9), U8(24),
+ B(Star), R(11),
+ B(TestUndetectable),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(93),
+ B(LdaSmi), I8(1),
+ B(TestEqualStrict), R(9), U8(27),
+ B(JumpIfFalse), U8(61),
+ B(Ldar), R(11),
+ B(TestTypeOf), U8(5),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(18),
+ B(Wide), B(LdaSmi), I16(130),
+ B(Star), R(16),
+ B(LdaConstant), U8(4),
+ B(Star), R(17),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(16), U8(2),
+ B(Throw),
+ B(Mov), R(context), R(16),
+ B(Mov), R(11), R(17),
+ B(Mov), R(7), R(18),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(17), U8(2),
+ B(Jump), U8(20),
+ B(Star), R(17),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(17), U8(7), U8(10),
+ B(Star), R(16),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(16),
+ B(PushContext), R(17),
+ B(PopContext), R(17),
+ B(Jump), U8(27),
+ B(Mov), R(11), R(16),
+ B(Mov), R(7), R(17),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(16), U8(2),
+ B(Star), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(12), U8(1),
+ B(JumpIfToBooleanFalse), U8(4),
+ B(Jump), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+ B(Ldar), R(15),
+ B(SetPendingMessage),
+ B(LdaZero),
+ B(TestEqualStrictNoFeedback), R(13),
+ B(JumpIfFalse), U8(5),
+ B(Ldar), R(14),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 65 S> */ B(Return),
+]
+constant pool: [
+ SYMBOL_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["y"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
+ FIXED_ARRAY_TYPE,
+]
+handlers: [
+ [7, 162, 168],
+ [10, 126, 128],
+ [228, 238, 240],
+]
+
+---
+snippet: "
+ function* f(arr) {
+ for (let x of arr) { let y = x; }
+ }
+ f([1, 2, 3]);
+"
+frame size: 13
+parameter count: 2
+bytecode array length: 589
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
+ B(ResumeGenerator), R(new_target),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
+ B(LdaSmi), I8(79),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(LdaSmi), I8(-2),
+ B(Star), R(0),
+ B(CreateFunctionContext), U8(9),
+ B(PushContext), R(2),
+ B(Ldar), R(arg0),
+ B(StaCurrentContextSlot), U8(4),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
+ B(StaCurrentContextSlot), U8(5),
+ /* 11 E> */ B(StackCheck),
+ B(Mov), R(context), R(5),
+ B(LdaImmutableCurrentContextSlot), U8(5),
+ B(Star), R(6),
+ B(LdaImmutableCurrentContextSlot), U8(5),
+ B(Star), R(7),
+ B(LdaZero),
+ /* 11 E> */ B(SuspendGenerator), R(6), U8(0),
+ B(Ldar), R(7),
+ /* 55 S> */ B(Return),
+ B(LdaSmi), I8(-2),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(6), U8(1),
+ B(Star), R(7),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(6), U8(1),
+ B(Star), R(8),
+ B(LdaZero),
+ B(TestEqualStrictNoFeedback), R(8),
+ B(JumpIfTrue), U8(28),
+ B(LdaSmi), I8(2),
+ B(TestEqualStrictNoFeedback), R(8),
+ B(JumpIfTrue), U8(19),
+ B(LdaTrue),
+ B(Star), R(10),
+ B(Mov), R(7), R(9),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(9), U8(2),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(JumpConstant), U8(13),
+ B(Ldar), R(7),
+ /* 11 E> */ B(Throw),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(1),
+ B(PushContext), R(6),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaZero),
+ B(StaContextSlot), R(6), U8(9), U8(0),
+ B(Mov), R(context), R(9),
+ B(Mov), R(context), R(10),
+ /* 35 S> */ B(LdaImmutableContextSlot), R(6), U8(4), U8(0),
+ B(Star), R(11),
+ B(LdaNamedProperty), R(11), U8(2), U8(3),
+ B(Star), R(12),
+ B(CallProperty0), R(12), R(11), U8(5),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ /* 35 E> */ B(StaContextSlot), R(6), U8(7), U8(0),
+ /* 32 S> */ B(LdaContextSlot), R(6), U8(7), U8(0),
+ B(Star), R(12),
+ B(LdaNamedProperty), R(12), U8(3), U8(9),
+ B(Star), R(11),
+ /* 32 E> */ B(CallProperty0), R(11), R(12), U8(7),
+ /* 32 E> */ B(StaContextSlot), R(6), U8(8), U8(0),
+ B(Star), R(11),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(11), U8(1),
+ B(ToBooleanLogicalNot),
+ B(JumpIfFalse), U8(13),
+ B(LdaContextSlot), R(6), U8(8), U8(0),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+ B(LdaContextSlot), R(6), U8(8), U8(0),
+ B(Star), R(11),
+ B(LdaNamedProperty), R(11), U8(4), U8(11),
+ B(JumpIfToBooleanTrue), U8(73),
+ B(LdaContextSlot), R(6), U8(8), U8(0),
+ B(Star), R(11),
+ B(LdaNamedProperty), R(11), U8(5), U8(13),
+ B(StaContextSlot), R(6), U8(10), U8(0),
+ B(LdaSmi), I8(2),
+ B(StaContextSlot), R(6), U8(9), U8(0),
+ B(LdaContextSlot), R(6), U8(10), U8(0),
+ B(StaContextSlot), R(6), U8(6), U8(0),
+ /* 21 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(6),
+ B(PushContext), R(11),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaContextSlot), R(6), U8(6), U8(0),
+ B(StaCurrentContextSlot), U8(4),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(7),
+ B(PushContext), R(12),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ /* 50 S> */ B(LdaImmutableContextSlot), R(12), U8(4), U8(0),
+ /* 50 E> */ B(StaCurrentContextSlot), U8(4),
+ B(PopContext), R(12),
+ B(PopContext), R(11),
+ B(LdaZero),
+ B(StaContextSlot), R(6), U8(9), U8(0),
+ B(JumpLoop), U8(120), I8(0),
+ B(Jump), U8(44),
+ B(Star), R(11),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(11), U8(8), U8(9),
+ B(PushContext), R(11),
+ B(Star), R(10),
+ B(LdaContextSlot), R(6), U8(9), U8(0),
+ B(Star), R(12),
+ B(LdaSmi), I8(2),
+ B(TestEqualStrict), R(12), U8(15),
+ B(JumpIfFalse), U8(8),
+ B(LdaSmi), I8(1),
+ B(StaContextSlot), R(6), U8(9), U8(0),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kReThrow), R(12), U8(1),
+ B(PopContext), R(11),
+ B(LdaSmi), I8(-1),
+ B(Star), R(7),
+ B(Jump), U8(7),
+ B(Star), R(8),
+ B(LdaZero),
+ B(Star), R(7),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(9),
+ B(LdaContextSlot), R(6), U8(9), U8(0),
+ B(Star), R(10),
+ B(LdaZero),
+ B(TestEqualStrict), R(10), U8(16),
+ B(JumpIfTrue), U8(150),
+ B(LdaContextSlot), R(6), U8(7), U8(0),
+ B(Star), R(10),
+ B(LdaNamedProperty), R(10), U8(10), U8(17),
+ B(StaContextSlot), R(6), U8(11), U8(0),
+ B(LdaContextSlot), R(6), U8(11), U8(0),
+ B(TestUndetectable),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(127),
+ B(LdaContextSlot), R(6), U8(9), U8(0),
+ B(Star), R(10),
+ B(LdaSmi), I8(1),
+ B(TestEqualStrict), R(10), U8(20),
+ B(JumpIfFalse), U8(69),
+ B(LdaContextSlot), R(6), U8(11), U8(0),
+ B(TestTypeOf), U8(5),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(18),
+ B(Wide), B(LdaSmi), I16(130),
+ B(Star), R(10),
+ B(LdaConstant), U8(11),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(10), U8(2),
+ B(Throw),
+ B(Mov), R(context), R(10),
+ B(LdaContextSlot), R(6), U8(11), U8(0),
+ B(Star), R(11),
+ B(LdaContextSlot), R(6), U8(7), U8(0),
+ B(Star), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
+ B(Jump), U8(20),
+ B(Star), R(11),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(11), U8(8), U8(12),
+ B(Star), R(10),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(10),
+ B(PushContext), R(11),
+ B(PopContext), R(11),
+ B(Jump), U8(47),
+ B(LdaContextSlot), R(6), U8(11), U8(0),
+ B(Star), R(10),
+ B(LdaContextSlot), R(6), U8(7), U8(0),
+ B(Star), R(11),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(10), U8(2),
+ B(StaContextSlot), R(6), U8(12), U8(0),
+ B(LdaContextSlot), R(6), U8(12), U8(0),
+ B(Star), R(10),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
+ B(JumpIfToBooleanFalse), U8(4),
+ B(Jump), U8(13),
+ B(LdaContextSlot), R(6), U8(12), U8(0),
+ B(Star), R(10),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+ B(Ldar), R(9),
+ B(SetPendingMessage),
+ B(LdaZero),
+ B(TestEqualStrictNoFeedback), R(7),
+ B(JumpIfFalse), U8(15),
+ B(PopContext), R(6),
+ B(PopContext), R(6),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(Mov), R(8), R(4),
+ B(Jump), U8(33),
+ B(PopContext), R(6),
+ B(LdaUndefined),
+ B(Star), R(6),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(6), U8(2),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(14),
+ B(LdaSmi), I8(-1),
+ B(Star), R(3),
+ B(Jump), U8(8),
+ B(Star), R(4),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(5),
+ B(LdaImmutableCurrentContextSlot), U8(5),
+ B(Star), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(6), U8(1),
+ B(Ldar), R(5),
+ B(SetPendingMessage),
+ B(Ldar), R(3),
+ B(SwitchOnSmiNoFeedback), U8(14), U8(2), I8(0),
+ B(Jump), U8(8),
+ B(Ldar), R(4),
+ /* 55 S> */ B(Return),
+ B(Ldar), R(4),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 55 S> */ B(Return),
+]
+constant pool: [
+ Smi [56],
+ FIXED_ARRAY_TYPE,
+ SYMBOL_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
+ FIXED_ARRAY_TYPE,
+ Smi [446],
+ Smi [6],
+ Smi [9],
+]
+handlers: [
+ [55, 546, 552],
+ [134, 331, 337],
+ [137, 287, 289],
+ [423, 439, 441],
+]
+
+---
+snippet: "
+ function* f(arr) {
+ for (let x of arr) yield x;
+ }
+ f([1, 2, 3]);
+"
+frame size: 17
+parameter count: 2
+bytecode array length: 695
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
+ B(ResumeGenerator), R(new_target),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
+ B(LdaSmi), I8(79),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(LdaSmi), I8(-2),
+ B(Star), R(0),
+ B(CreateFunctionContext), U8(9),
+ B(PushContext), R(2),
+ B(Ldar), R(arg0),
+ B(StaCurrentContextSlot), U8(4),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
+ B(StaCurrentContextSlot), U8(5),
+ /* 11 E> */ B(StackCheck),
+ B(Mov), R(context), R(5),
+ B(LdaImmutableCurrentContextSlot), U8(5),
+ B(Star), R(6),
+ B(LdaImmutableCurrentContextSlot), U8(5),
+ B(Star), R(7),
+ B(LdaZero),
+ /* 11 E> */ B(SuspendGenerator), R(6), U8(0),
+ B(Ldar), R(7),
+ /* 49 S> */ B(Return),
+ B(LdaSmi), I8(-2),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(6), U8(1),
+ B(Star), R(7),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(6), U8(1),
+ B(Star), R(8),
+ B(LdaZero),
+ B(TestEqualStrictNoFeedback), R(8),
+ B(JumpIfTrue), U8(28),
+ B(LdaSmi), I8(2),
+ B(TestEqualStrictNoFeedback), R(8),
+ B(JumpIfTrue), U8(19),
+ B(LdaTrue),
+ B(Star), R(10),
+ B(Mov), R(7), R(9),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(9), U8(2),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(JumpConstant), U8(16),
+ B(Ldar), R(7),
+ /* 11 E> */ B(Throw),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(2),
+ B(PushContext), R(6),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaZero),
+ B(StaContextSlot), R(6), U8(9), U8(0),
+ B(Mov), R(context), R(9),
+ B(Mov), R(context), R(10),
+ /* 35 S> */ B(LdaImmutableContextSlot), R(6), U8(4), U8(0),
+ B(Star), R(11),
+ B(LdaNamedProperty), R(11), U8(3), U8(3),
+ B(Star), R(12),
+ B(CallProperty0), R(12), R(11), U8(5),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ /* 35 E> */ B(StaContextSlot), R(6), U8(7), U8(0),
+ B(Ldar), R(0),
+ B(SwitchOnSmiNoFeedback), U8(4), U8(1), I8(1),
+ B(LdaSmi), I8(-2),
+ B(TestEqualStrictNoFeedback), R(0),
+ B(JumpIfTrue), U8(11),
+ B(LdaSmi), I8(79),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kAbort), R(11), U8(1),
+ /* 32 S> */ B(LdaContextSlot), R(6), U8(7), U8(0),
+ B(Star), R(12),
+ B(LdaNamedProperty), R(12), U8(5), U8(9),
+ B(Star), R(11),
+ /* 32 E> */ B(CallProperty0), R(11), R(12), U8(7),
+ /* 32 E> */ B(StaContextSlot), R(6), U8(8), U8(0),
+ B(Star), R(11),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(11), U8(1),
+ B(ToBooleanLogicalNot),
+ B(JumpIfFalse), U8(13),
+ B(LdaContextSlot), R(6), U8(8), U8(0),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+ B(LdaContextSlot), R(6), U8(8), U8(0),
+ B(Star), R(11),
+ B(LdaNamedProperty), R(11), U8(6), U8(11),
+ B(JumpIfToBooleanTrue), U8(142),
+ B(LdaContextSlot), R(6), U8(8), U8(0),
+ B(Star), R(11),
+ B(LdaNamedProperty), R(11), U8(7), U8(13),
+ B(StaContextSlot), R(6), U8(10), U8(0),
+ B(LdaSmi), I8(2),
+ B(StaContextSlot), R(6), U8(9), U8(0),
+ B(LdaContextSlot), R(6), U8(10), U8(0),
+ B(StaContextSlot), R(6), U8(6), U8(0),
+ /* 21 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(8),
+ B(PushContext), R(11),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaContextSlot), R(6), U8(6), U8(0),
+ B(StaCurrentContextSlot), U8(4),
+ /* 40 S> */ B(LdaImmutableContextSlot), R(6), U8(5), U8(0),
+ B(Star), R(12),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(13),
+ B(LdaFalse),
+ B(Star), R(14),
+ /* 46 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(13), U8(2),
+ B(Star), R(13),
+ B(LdaSmi), I8(1),
+ B(SuspendGenerator), R(12), U8(0),
+ B(Ldar), R(13),
+ /* 49 S> */ B(Return),
+ B(LdaSmi), I8(-2),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(12), U8(1),
+ B(Star), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(12), U8(1),
+ B(Star), R(14),
+ B(LdaZero),
+ B(TestEqualStrictNoFeedback), R(14),
+ B(JumpIfTrue), U8(40),
+ B(LdaSmi), I8(2),
+ B(TestEqualStrictNoFeedback), R(14),
+ B(JumpIfTrue), U8(31),
+ B(LdaTrue),
+ B(Star), R(16),
+ B(Mov), R(13), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(15), U8(2),
+ B(PopContext), R(11),
+ B(PopContext), R(11),
+ B(PopContext), R(11),
+ B(PopContext), R(11),
+ B(PopContext), R(11),
+ B(PopContext), R(11),
+ B(Star), R(8),
+ B(LdaZero),
+ B(Star), R(7),
+ B(Jump), U8(71),
+ B(Ldar), R(13),
+ /* 40 E> */ B(Throw),
+ B(PopContext), R(11),
+ B(LdaZero),
+ B(StaContextSlot), R(6), U8(9), U8(0),
+ B(JumpLoop), U8(210), I8(0),
+ B(Jump), U8(44),
+ B(Star), R(11),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(11), U8(9), U8(10),
+ B(PushContext), R(11),
+ B(Star), R(10),
+ B(LdaContextSlot), R(6), U8(9), U8(0),
+ B(Star), R(12),
+ B(LdaSmi), I8(2),
+ B(TestEqualStrict), R(12), U8(15),
+ B(JumpIfFalse), U8(8),
+ B(LdaSmi), I8(1),
+ B(StaContextSlot), R(6), U8(9), U8(0),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kReThrow), R(12), U8(1),
+ B(PopContext), R(11),
+ B(LdaSmi), I8(-1),
+ B(Star), R(7),
+ B(Jump), U8(8),
+ B(Star), R(8),
+ B(LdaSmi), I8(1),
+ B(Star), R(7),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(9),
+ B(LdaContextSlot), R(6), U8(9), U8(0),
+ B(Star), R(10),
+ B(LdaZero),
+ B(TestEqualStrict), R(10), U8(16),
+ B(JumpIfTrue), U8(150),
+ B(LdaContextSlot), R(6), U8(7), U8(0),
+ B(Star), R(10),
+ B(LdaNamedProperty), R(10), U8(11), U8(17),
+ B(StaContextSlot), R(6), U8(11), U8(0),
+ B(LdaContextSlot), R(6), U8(11), U8(0),
+ B(TestUndetectable),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(127),
+ B(LdaContextSlot), R(6), U8(9), U8(0),
+ B(Star), R(10),
+ B(LdaSmi), I8(1),
+ B(TestEqualStrict), R(10), U8(20),
+ B(JumpIfFalse), U8(69),
+ B(LdaContextSlot), R(6), U8(11), U8(0),
+ B(TestTypeOf), U8(5),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(18),
+ B(Wide), B(LdaSmi), I16(130),
+ B(Star), R(10),
+ B(LdaConstant), U8(12),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(10), U8(2),
+ B(Throw),
+ B(Mov), R(context), R(10),
+ B(LdaContextSlot), R(6), U8(11), U8(0),
+ B(Star), R(11),
+ B(LdaContextSlot), R(6), U8(7), U8(0),
+ B(Star), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
+ B(Jump), U8(20),
+ B(Star), R(11),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(11), U8(9), U8(13),
+ B(Star), R(10),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(10),
+ B(PushContext), R(11),
+ B(PopContext), R(11),
+ B(Jump), U8(47),
+ B(LdaContextSlot), R(6), U8(11), U8(0),
+ B(Star), R(10),
+ B(LdaContextSlot), R(6), U8(7), U8(0),
+ B(Star), R(11),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(10), U8(2),
+ B(StaContextSlot), R(6), U8(12), U8(0),
+ B(LdaContextSlot), R(6), U8(12), U8(0),
+ B(Star), R(10),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
+ B(JumpIfToBooleanFalse), U8(4),
+ B(Jump), U8(13),
+ B(LdaContextSlot), R(6), U8(12), U8(0),
+ B(Star), R(10),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+ B(Ldar), R(9),
+ B(SetPendingMessage),
+ B(Ldar), R(7),
+ B(SwitchOnSmiNoFeedback), U8(14), U8(2), I8(0),
+ B(Jump), U8(27),
+ B(PopContext), R(6),
+ B(PopContext), R(6),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Mov), R(8), R(4),
+ B(Jump), U8(46),
+ B(PopContext), R(6),
+ B(PopContext), R(6),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(Mov), R(8), R(4),
+ B(Jump), U8(33),
+ B(PopContext), R(6),
+ B(LdaUndefined),
+ B(Star), R(6),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(6), U8(2),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(14),
+ B(LdaSmi), I8(-1),
+ B(Star), R(3),
+ B(Jump), U8(8),
+ B(Star), R(4),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(5),
+ B(LdaImmutableCurrentContextSlot), U8(5),
+ B(Star), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(6), U8(1),
+ B(Ldar), R(5),
+ B(SetPendingMessage),
+ B(Ldar), R(3),
+ B(SwitchOnSmiNoFeedback), U8(17), U8(2), I8(0),
+ B(Jump), U8(8),
+ B(Ldar), R(4),
+ /* 49 S> */ B(Return),
+ B(Ldar), R(4),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 49 S> */ B(Return),
+]
+constant pool: [
+ Smi [56],
+ Smi [150],
+ FIXED_ARRAY_TYPE,
+ SYMBOL_TYPE,
+ Smi [142],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
+ FIXED_ARRAY_TYPE,
+ Smi [6],
+ Smi [18],
+ Smi [552],
+ Smi [6],
+ Smi [9],
+]
+handlers: [
+ [55, 652, 658],
+ [134, 421, 427],
+ [137, 377, 379],
+ [514, 530, 532],
+]
+
+---
+snippet: "
+ async function f(arr) {
+ for (let x of arr) { let y = x; }
+ }
+ f([1, 2, 3]);
+"
+frame size: 12
+parameter count: 2
+bytecode array length: 556
+bytecodes: [
+ B(CreateFunctionContext), U8(10),
+ B(PushContext), R(0),
+ B(Ldar), R(arg0),
+ B(StaCurrentContextSlot), U8(4),
+ /* 16 E> */ B(StackCheck),
+ B(LdaUndefined),
+ B(Star), R(1),
+ B(CallJSRuntime), U8(%async_function_promise_create), R(1), U8(1),
+ B(StaCurrentContextSlot), U8(13),
+ B(Mov), R(context), R(3),
+ B(Mov), R(context), R(4),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(5),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaZero),
+ B(StaContextSlot), R(5), U8(9), U8(0),
+ B(Mov), R(context), R(8),
+ B(Mov), R(context), R(9),
+ /* 40 S> */ B(LdaImmutableContextSlot), R(5), U8(4), U8(0),
+ B(Star), R(10),
+ B(LdaNamedProperty), R(10), U8(1), U8(3),
+ B(Star), R(11),
+ B(CallProperty0), R(11), R(10), U8(5),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ /* 40 E> */ B(StaContextSlot), R(5), U8(7), U8(0),
+ /* 37 S> */ B(LdaContextSlot), R(5), U8(7), U8(0),
+ B(Star), R(11),
+ B(LdaNamedProperty), R(11), U8(2), U8(9),
+ B(Star), R(10),
+ /* 37 E> */ B(CallProperty0), R(10), R(11), U8(7),
+ /* 37 E> */ B(StaContextSlot), R(5), U8(8), U8(0),
+ B(Star), R(10),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
+ B(ToBooleanLogicalNot),
+ B(JumpIfFalse), U8(13),
+ B(LdaContextSlot), R(5), U8(8), U8(0),
+ B(Star), R(10),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+ B(LdaContextSlot), R(5), U8(8), U8(0),
+ B(Star), R(10),
+ B(LdaNamedProperty), R(10), U8(3), U8(11),
+ B(JumpIfToBooleanTrue), U8(73),
+ B(LdaContextSlot), R(5), U8(8), U8(0),
+ B(Star), R(10),
+ B(LdaNamedProperty), R(10), U8(4), U8(13),
+ B(StaContextSlot), R(5), U8(10), U8(0),
+ B(LdaSmi), I8(2),
+ B(StaContextSlot), R(5), U8(9), U8(0),
+ B(LdaContextSlot), R(5), U8(10), U8(0),
+ B(StaContextSlot), R(5), U8(6), U8(0),
+ /* 26 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(5),
+ B(PushContext), R(10),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaContextSlot), R(5), U8(6), U8(0),
+ B(StaCurrentContextSlot), U8(4),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(6),
+ B(PushContext), R(11),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ /* 55 S> */ B(LdaImmutableContextSlot), R(11), U8(4), U8(0),
+ /* 55 E> */ B(StaCurrentContextSlot), U8(4),
+ B(PopContext), R(11),
+ B(PopContext), R(10),
+ B(LdaZero),
+ B(StaContextSlot), R(5), U8(9), U8(0),
+ B(JumpLoop), U8(120), I8(0),
+ B(Jump), U8(48),
+ B(Star), R(10),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(10), U8(7), U8(8),
+ B(Star), R(9),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(9),
+ B(PushContext), R(10),
+ B(LdaContextSlot), R(5), U8(9), U8(0),
+ B(Star), R(11),
+ B(LdaSmi), I8(2),
+ B(TestEqualStrict), R(11), U8(15),
+ B(JumpIfFalse), U8(8),
+ B(LdaSmi), I8(1),
+ B(StaContextSlot), R(5), U8(9), U8(0),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kReThrow), R(11), U8(1),
+ B(PopContext), R(10),
+ B(LdaSmi), I8(-1),
+ B(Star), R(6),
+ B(Jump), U8(7),
+ B(Star), R(7),
+ B(LdaZero),
+ B(Star), R(6),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(8),
+ B(LdaContextSlot), R(5), U8(9), U8(0),
+ B(Star), R(9),
+ B(LdaZero),
+ B(TestEqualStrict), R(9), U8(16),
+ B(JumpIfTrue), U8(150),
+ B(LdaContextSlot), R(5), U8(7), U8(0),
+ B(Star), R(9),
+ B(LdaNamedProperty), R(9), U8(9), U8(17),
+ B(StaContextSlot), R(5), U8(11), U8(0),
+ B(LdaContextSlot), R(5), U8(11), U8(0),
+ B(TestUndetectable),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(127),
+ B(LdaContextSlot), R(5), U8(9), U8(0),
+ B(Star), R(9),
+ B(LdaSmi), I8(1),
+ B(TestEqualStrict), R(9), U8(20),
+ B(JumpIfFalse), U8(69),
+ B(LdaContextSlot), R(5), U8(11), U8(0),
+ B(TestTypeOf), U8(5),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(18),
+ B(Wide), B(LdaSmi), I16(130),
+ B(Star), R(9),
+ B(LdaConstant), U8(10),
+ B(Star), R(10),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(9), U8(2),
+ B(Throw),
+ B(Mov), R(context), R(9),
+ B(LdaContextSlot), R(5), U8(11), U8(0),
+ B(Star), R(10),
+ B(LdaContextSlot), R(5), U8(7), U8(0),
+ B(Star), R(11),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(10), U8(2),
+ B(Jump), U8(20),
+ B(Star), R(10),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(10), U8(7), U8(11),
+ B(Star), R(9),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(9),
+ B(PushContext), R(10),
+ B(PopContext), R(10),
+ B(Jump), U8(47),
+ B(LdaContextSlot), R(5), U8(11), U8(0),
+ B(Star), R(9),
+ B(LdaContextSlot), R(5), U8(7), U8(0),
+ B(Star), R(10),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(9), U8(2),
+ B(StaContextSlot), R(5), U8(12), U8(0),
+ B(LdaContextSlot), R(5), U8(12), U8(0),
+ B(Star), R(9),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(9), U8(1),
+ B(JumpIfToBooleanFalse), U8(4),
+ B(Jump), U8(13),
+ B(LdaContextSlot), R(5), U8(12), U8(0),
+ B(Star), R(9),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(9), U8(1),
+ B(Ldar), R(8),
+ B(SetPendingMessage),
+ B(LdaZero),
+ B(TestEqualStrictNoFeedback), R(6),
+ B(JumpIfFalse), U8(9),
+ B(PopContext), R(5),
+ B(PopContext), R(5),
+ B(Ldar), R(7),
+ B(ReThrow),
+ B(PopContext), R(5),
+ B(LdaUndefined),
+ B(Star), R(5),
+ B(LdaCurrentContextSlot), U8(13),
+ B(Star), R(6),
+ B(LdaUndefined),
+ B(Star), R(7),
+ B(CallJSRuntime), U8(%promise_resolve), R(5), U8(3),
+ B(LdaCurrentContextSlot), U8(13),
+ B(Star), R(2),
+ B(LdaZero),
+ B(Star), R(1),
+ B(Jump), U8(67),
+ B(Jump), U8(53),
+ B(Star), R(5),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(5), U8(7), U8(12),
+ B(Star), R(4),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(4),
+ B(PushContext), R(5),
+ B(LdaUndefined),
+ B(Star), R(6),
+ B(LdaContextSlot), R(5), U8(13), U8(0),
+ B(Star), R(7),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(8),
+ B(LdaFalse),
+ B(Star), R(9),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(6), U8(4),
+ B(LdaContextSlot), R(5), U8(13), U8(0),
+ B(PopContext), R(5),
+ B(PopContext), R(5),
+ B(Star), R(2),
+ B(LdaZero),
+ B(Star), R(1),
+ B(Jump), U8(14),
+ B(LdaSmi), I8(-1),
+ B(Star), R(1),
+ B(Jump), U8(8),
+ B(Star), R(2),
+ B(LdaSmi), I8(1),
+ B(Star), R(1),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(3),
+ B(LdaUndefined),
+ B(Star), R(4),
+ B(LdaCurrentContextSlot), U8(13),
+ B(Star), R(5),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(4), U8(2),
+ B(Ldar), R(3),
+ B(SetPendingMessage),
+ B(Ldar), R(1),
+ B(SwitchOnSmiNoFeedback), U8(13), U8(2), I8(0),
+ B(Jump), U8(8),
+ B(Ldar), R(2),
+ /* 60 S> */ B(Return),
+ B(Ldar), R(2),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 60 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+ SYMBOL_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ Smi [6],
+ Smi [9],
+]
+handlers: [
+ [21, 510, 516],
+ [24, 457, 459],
+ [41, 242, 248],
+ [44, 194, 196],
+ [334, 350, 352],
+]
+
+---
+snippet: "
+ async function f(arr) {
+ for (let x of arr) await x;
+ }
+ f([1, 2, 3]);
+"
+frame size: 18
+parameter count: 2
+bytecode array length: 732
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
+ B(ResumeGenerator), R(new_target),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
+ B(LdaSmi), I8(79),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(LdaSmi), I8(-2),
+ B(Star), R(0),
+ B(CreateFunctionContext), U8(11),
+ B(PushContext), R(2),
+ B(Ldar), R(arg0),
+ B(StaCurrentContextSlot), U8(4),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
+ B(StaCurrentContextSlot), U8(5),
+ /* 16 E> */ B(StackCheck),
+ B(LdaUndefined),
+ B(Star), R(3),
+ B(CallJSRuntime), U8(%async_function_promise_create), R(3), U8(1),
+ B(StaCurrentContextSlot), U8(7),
+ B(Mov), R(context), R(5),
+ B(Mov), R(context), R(6),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(1),
+ B(PushContext), R(7),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaZero),
+ B(StaContextSlot), R(7), U8(11), U8(0),
+ B(Mov), R(context), R(10),
+ B(Mov), R(context), R(11),
+ /* 40 S> */ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
+ B(Star), R(12),
+ B(LdaNamedProperty), R(12), U8(2), U8(3),
+ B(Star), R(13),
+ B(CallProperty0), R(13), R(12), U8(5),
+ B(JumpIfJSReceiver), U8(7),
+ B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
+ /* 40 E> */ B(StaContextSlot), R(7), U8(9), U8(0),
+ B(Ldar), R(0),
+ B(SwitchOnSmiNoFeedback), U8(3), U8(1), I8(0),
+ B(LdaSmi), I8(-2),
+ B(TestEqualStrictNoFeedback), R(0),
+ B(JumpIfTrue), U8(11),
+ B(LdaSmi), I8(79),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kAbort), R(12), U8(1),
+ /* 37 S> */ B(LdaContextSlot), R(7), U8(9), U8(0),
+ B(Star), R(13),
+ B(LdaNamedProperty), R(13), U8(4), U8(9),
+ B(Star), R(12),
+ /* 37 E> */ B(CallProperty0), R(12), R(13), U8(7),
+ /* 37 E> */ B(StaContextSlot), R(7), U8(10), U8(0),
+ B(Star), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(12), U8(1),
+ B(ToBooleanLogicalNot),
+ B(JumpIfFalse), U8(13),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
+ B(Star), R(12),
+ B(LdaNamedProperty), R(12), U8(5), U8(11),
+ B(JumpIfToBooleanTrue), U8(165),
+ B(LdaContextSlot), R(7), U8(10), U8(0),
+ B(Star), R(12),
+ B(LdaNamedProperty), R(12), U8(6), U8(13),
+ B(StaContextSlot), R(7), U8(12), U8(0),
+ B(LdaSmi), I8(2),
+ B(StaContextSlot), R(7), U8(11), U8(0),
+ B(LdaContextSlot), R(7), U8(12), U8(0),
+ B(StaContextSlot), R(7), U8(8), U8(0),
+ /* 26 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(7),
+ B(PushContext), R(12),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaContextSlot), R(7), U8(8), U8(0),
+ B(StaCurrentContextSlot), U8(4),
+ /* 45 S> */ B(LdaImmutableContextSlot), R(7), U8(5), U8(0),
+ B(Star), R(13),
+ /* 51 S> */ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(StaContextSlot), R(7), U8(6), U8(0),
+ /* 45 S> */ B(LdaUndefined),
+ B(Star), R(14),
+ B(LdaImmutableContextSlot), R(7), U8(5), U8(0),
+ B(Star), R(15),
+ B(LdaContextSlot), R(7), U8(6), U8(0),
+ B(Star), R(16),
+ B(LdaContextSlot), R(7), U8(7), U8(0),
+ B(Star), R(17),
+ B(CallJSRuntime), U8(%async_function_await_uncaught), R(14), U8(4),
+ B(LdaContextSlot), R(7), U8(7), U8(0),
+ B(Star), R(14),
+ B(LdaZero),
+ B(SuspendGenerator), R(13), U8(2),
+ B(Ldar), R(14),
+ /* 54 S> */ B(Return),
+ B(LdaSmi), I8(-2),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(13), U8(1),
+ B(Star), R(14),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(13), U8(1),
+ B(Star), R(15),
+ B(LdaZero),
+ B(TestEqualStrictNoFeedback), R(15),
+ B(JumpIfTrue), U8(40),
+ B(LdaSmi), I8(2),
+ B(TestEqualStrictNoFeedback), R(15),
+ B(JumpIfTrue), U8(31),
+ B(LdaTrue),
+ B(Star), R(17),
+ B(Mov), R(14), R(16),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(16), U8(2),
+ B(PopContext), R(12),
+ B(PopContext), R(12),
+ B(PopContext), R(12),
+ B(PopContext), R(12),
+ B(PopContext), R(12),
+ B(PopContext), R(12),
+ B(Star), R(9),
+ B(LdaZero),
+ B(Star), R(8),
+ B(Jump), U8(75),
+ B(Ldar), R(14),
+ B(ReThrow),
+ B(PopContext), R(12),
+ B(LdaZero),
+ B(StaContextSlot), R(7), U8(11), U8(0),
+ B(JumpLoop), U8(233), I8(0),
+ B(Jump), U8(48),
+ B(Star), R(12),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(12), U8(8), U8(9),
+ B(Star), R(11),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(11),
+ B(PushContext), R(12),
+ B(LdaContextSlot), R(7), U8(11), U8(0),
+ B(Star), R(13),
+ B(LdaSmi), I8(2),
+ B(TestEqualStrict), R(13), U8(15),
+ B(JumpIfFalse), U8(8),
+ B(LdaSmi), I8(1),
+ B(StaContextSlot), R(7), U8(11), U8(0),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(13),
+ B(CallRuntime), U16(Runtime::kReThrow), R(13), U8(1),
+ B(PopContext), R(12),
+ B(LdaSmi), I8(-1),
+ B(Star), R(8),
+ B(Jump), U8(8),
+ B(Star), R(9),
+ B(LdaSmi), I8(1),
+ B(Star), R(8),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(10),
+ B(LdaContextSlot), R(7), U8(11), U8(0),
+ B(Star), R(11),
+ B(LdaZero),
+ B(TestEqualStrict), R(11), U8(16),
+ B(JumpIfTrue), U8(150),
+ B(LdaContextSlot), R(7), U8(9), U8(0),
+ B(Star), R(11),
+ B(LdaNamedProperty), R(11), U8(10), U8(17),
+ B(StaContextSlot), R(7), U8(13), U8(0),
+ B(LdaContextSlot), R(7), U8(13), U8(0),
+ B(TestUndetectable),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(127),
+ B(LdaContextSlot), R(7), U8(11), U8(0),
+ B(Star), R(11),
+ B(LdaSmi), I8(1),
+ B(TestEqualStrict), R(11), U8(20),
+ B(JumpIfFalse), U8(69),
+ B(LdaContextSlot), R(7), U8(13), U8(0),
+ B(TestTypeOf), U8(5),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(18),
+ B(Wide), B(LdaSmi), I16(130),
+ B(Star), R(11),
+ B(LdaConstant), U8(11),
+ B(Star), R(12),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
+ B(Throw),
+ B(Mov), R(context), R(11),
+ B(LdaContextSlot), R(7), U8(13), U8(0),
+ B(Star), R(12),
+ B(LdaContextSlot), R(7), U8(9), U8(0),
+ B(Star), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
+ B(Jump), U8(20),
+ B(Star), R(12),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(12), U8(8), U8(12),
+ B(Star), R(11),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(11),
+ B(PushContext), R(12),
+ B(PopContext), R(12),
+ B(Jump), U8(47),
+ B(LdaContextSlot), R(7), U8(13), U8(0),
+ B(Star), R(11),
+ B(LdaContextSlot), R(7), U8(9), U8(0),
+ B(Star), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
+ B(StaContextSlot), R(7), U8(14), U8(0),
+ B(LdaContextSlot), R(7), U8(14), U8(0),
+ B(Star), R(11),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(11), U8(1),
+ B(JumpIfToBooleanFalse), U8(4),
+ B(Jump), U8(13),
+ B(LdaContextSlot), R(7), U8(14), U8(0),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+ B(Ldar), R(10),
+ B(SetPendingMessage),
+ B(Ldar), R(8),
+ B(SwitchOnSmiNoFeedback), U8(13), U8(2), I8(0),
+ B(Jump), U8(25),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Mov), R(9), R(4),
+ B(Jump), U8(99),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(Ldar), R(9),
+ B(ReThrow),
+ B(PopContext), R(7),
+ B(LdaUndefined),
+ B(Star), R(7),
+ B(LdaCurrentContextSlot), U8(7),
+ B(Star), R(8),
+ B(LdaUndefined),
+ B(Star), R(9),
+ B(CallJSRuntime), U8(%promise_resolve), R(7), U8(3),
+ B(LdaCurrentContextSlot), U8(7),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(67),
+ B(Jump), U8(53),
+ B(Star), R(7),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(7), U8(8), U8(15),
+ B(Star), R(6),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(6),
+ B(PushContext), R(7),
+ B(LdaUndefined),
+ B(Star), R(8),
+ B(LdaContextSlot), R(7), U8(7), U8(0),
+ B(Star), R(9),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(10),
+ B(LdaFalse),
+ B(Star), R(11),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(8), U8(4),
+ B(LdaContextSlot), R(7), U8(7), U8(0),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(14),
+ B(LdaSmi), I8(-1),
+ B(Star), R(3),
+ B(Jump), U8(8),
+ B(Star), R(4),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(5),
+ B(LdaUndefined),
+ B(Star), R(6),
+ B(LdaCurrentContextSlot), U8(7),
+ B(Star), R(7),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(6), U8(2),
+ B(Ldar), R(5),
+ B(SetPendingMessage),
+ B(Ldar), R(3),
+ B(SwitchOnSmiNoFeedback), U8(16), U8(2), I8(0),
+ B(Jump), U8(8),
+ B(Ldar), R(4),
+ /* 54 S> */ B(Return),
+ B(Ldar), R(4),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 54 S> */ B(Return),
+]
+constant pool: [
+ Smi [100],
+ FIXED_ARRAY_TYPE,
+ SYMBOL_TYPE,
+ Smi [165],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
+ FIXED_ARRAY_TYPE,
+ Smi [6],
+ Smi [22],
+ FIXED_ARRAY_TYPE,
+ Smi [6],
+ Smi [9],
+]
+handlers: [
+ [64, 686, 692],
+ [67, 633, 635],
+ [84, 398, 404],
+ [87, 350, 352],
+ [491, 507, 509],
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
index a9ba5bbf32..8fc6222588 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/FunctionLiterals.golden
@@ -14,7 +14,7 @@ parameter count: 1
bytecode array length: 6
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
+ /* 34 S> */ B(CreateClosure), U8(0), U8(3), U8(2),
/* 55 S> */ B(Return),
]
constant pool: [
@@ -32,9 +32,9 @@ parameter count: 1
bytecode array length: 11
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateClosure), U8(0), U8(4), U8(2),
+ /* 34 S> */ B(CreateClosure), U8(0), U8(5), U8(2),
B(Star), R(0),
- /* 56 E> */ B(CallUndefinedReceiver0), R(0), U8(2),
+ /* 56 E> */ B(CallUndefinedReceiver0), R(0), U8(3),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -52,11 +52,11 @@ parameter count: 1
bytecode array length: 16
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateClosure), U8(0), U8(4), U8(2),
+ /* 34 S> */ B(CreateClosure), U8(0), U8(5), U8(2),
B(Star), R(0),
B(LdaSmi), I8(1),
B(Star), R(1),
- /* 67 E> */ B(CallUndefinedReceiver1), R(0), R(1), U8(2),
+ /* 67 E> */ B(CallUndefinedReceiver1), R(0), R(1), U8(3),
/* 71 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GenerateTestUndetectable.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GenerateTestUndetectable.golden
index b8b23b03f9..48f9afaa27 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GenerateTestUndetectable.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GenerateTestUndetectable.golden
@@ -17,7 +17,7 @@ parameter count: 1
bytecode array length: 25
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(2),
+ /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(2),
B(Mov), R(2), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
B(Star), R(1),
@@ -47,7 +47,7 @@ parameter count: 1
bytecode array length: 25
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(2),
+ /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(2),
B(Mov), R(2), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
B(Star), R(1),
@@ -77,7 +77,7 @@ parameter count: 1
bytecode array length: 25
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(2),
+ /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(2),
B(Mov), R(2), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
B(Star), R(1),
@@ -107,7 +107,7 @@ parameter count: 1
bytecode array length: 25
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(2),
+ /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(2),
B(Mov), R(2), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
B(Star), R(1),
@@ -137,7 +137,7 @@ parameter count: 1
bytecode array length: 24
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(2),
+ /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(2),
B(Mov), R(2), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
B(Star), R(1),
@@ -166,7 +166,7 @@ parameter count: 1
bytecode array length: 24
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(2),
+ /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(2),
B(Mov), R(2), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
B(Star), R(1),
@@ -195,7 +195,7 @@ parameter count: 1
bytecode array length: 24
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(2),
+ /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(2),
B(Mov), R(2), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
B(Star), R(1),
@@ -224,7 +224,7 @@ parameter count: 1
bytecode array length: 24
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(2),
+ /* 46 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(2),
B(Mov), R(2), R(0),
/* 63 S> */ B(LdaSmi), I8(10),
B(Star), R(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
index c92cf051ac..125e7b6502 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Generators.golden
@@ -11,64 +11,59 @@ snippet: "
function* f() { }
f();
"
-frame size: 12
+frame size: 11
parameter count: 1
-bytecode array length: 199
+bytecode array length: 173
bytecodes: [
B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(27),
- B(CallRuntime), U16(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
- B(PushContext), R(2),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
B(ResumeGenerator), R(new_target),
- B(Star), R(1),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(1),
- B(JumpIfTrue), U8(54),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(CreateFunctionContext), U8(2),
- B(PushContext), R(0),
- B(Ldar), R(this),
+ B(Star), R(0),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(2),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(StaCurrentContextSlot), U8(4),
/* 11 E> */ B(StackCheck),
B(Mov), R(context), R(5),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(7),
- B(Mov), R(closure), R(6),
- /* 11 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(6), U8(2),
- B(StaCurrentContextSlot), U8(5),
B(Star), R(6),
- B(LdaImmutableCurrentContextSlot), U8(5),
+ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(7),
B(LdaZero),
- B(SuspendGenerator), R(7), U8(0),
- B(Ldar), R(6),
+ /* 11 E> */ B(SuspendGenerator), R(6), U8(0),
+ B(Ldar), R(7),
/* 16 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(7), U8(1),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(6), U8(1),
+ B(Star), R(7),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(6), U8(1),
B(Star), R(8),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(7), U8(1),
- B(Star), R(9),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(9),
- B(JumpIfTrue), U8(30),
+ B(TestEqualStrictNoFeedback), R(8),
+ B(JumpIfTrue), U8(28),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(9),
- B(JumpIfTrue), U8(21),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(8),
+ B(JumpIfTrue), U8(19),
B(LdaTrue),
- B(Star), R(11),
- B(Mov), R(8), R(10),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(10), U8(2),
+ B(Star), R(10),
+ B(Mov), R(7), R(9),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(9), U8(2),
B(Star), R(4),
B(LdaZero),
B(Star), R(3),
- B(Jump), U8(35),
- B(Ldar), R(8),
+ B(Jump), U8(34),
+ B(Ldar), R(7),
/* 11 E> */ B(Throw),
B(LdaUndefined),
B(Star), R(6),
@@ -76,35 +71,26 @@ bytecodes: [
B(Star), R(7),
B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(6), U8(2),
B(Star), R(4),
- B(LdaSmi), I8(1),
+ B(LdaZero),
B(Star), R(3),
B(Jump), U8(14),
B(LdaSmi), I8(-1),
B(Star), R(3),
B(Jump), U8(8),
B(Star), R(4),
- B(LdaSmi), I8(2),
+ B(LdaSmi), I8(1),
B(Star), R(3),
B(LdaTheHole),
B(SetPendingMessage),
B(Star), R(5),
- B(LdaImmutableCurrentContextSlot), U8(5),
+ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(6),
- B(CallRuntime), U16(Runtime::k_GeneratorClose), R(6), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(6), U8(1),
B(Ldar), R(5),
B(SetPendingMessage),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(16),
- B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(13),
- B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(10),
- B(Jump), U8(11),
- B(Ldar), R(4),
- /* 16 S> */ B(Return),
+ B(Ldar), R(3),
+ B(SwitchOnSmiNoFeedback), U8(1), U8(2), I8(0),
+ B(Jump), U8(8),
B(Ldar), R(4),
/* 16 S> */ B(Return),
B(Ldar), R(4),
@@ -113,9 +99,12 @@ bytecodes: [
/* 16 S> */ B(Return),
]
constant pool: [
+ Smi [52],
+ Smi [6],
+ Smi [9],
]
handlers: [
- [45, 141, 147],
+ [51, 130, 136],
]
---
@@ -123,102 +112,93 @@ snippet: "
function* f() { yield 42 }
f();
"
-frame size: 12
+frame size: 11
parameter count: 1
-bytecode array length: 291
+bytecode array length: 245
bytecodes: [
B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(33),
- B(CallRuntime), U16(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
- B(PushContext), R(2),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
B(ResumeGenerator), R(new_target),
- B(Star), R(1),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(1),
- B(JumpIfTrue), U8(60),
- B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(1),
- B(JumpIfTrue), U8(130),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
B(LdaSmi), I8(79),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(CreateFunctionContext), U8(2),
- B(PushContext), R(0),
- B(Ldar), R(this),
+ B(Star), R(0),
+ B(CreateFunctionContext), U8(1),
+ B(PushContext), R(2),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(StaCurrentContextSlot), U8(4),
/* 11 E> */ B(StackCheck),
B(Mov), R(context), R(5),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(7),
- B(Mov), R(closure), R(6),
- /* 11 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(6), U8(2),
- B(StaCurrentContextSlot), U8(5),
B(Star), R(6),
- B(LdaImmutableCurrentContextSlot), U8(5),
+ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(7),
B(LdaZero),
- B(SuspendGenerator), R(7), U8(0),
- B(Ldar), R(6),
+ /* 11 E> */ B(SuspendGenerator), R(6), U8(0),
+ B(Ldar), R(7),
/* 25 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(7), U8(1),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(6), U8(1),
+ B(Star), R(7),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(6), U8(1),
B(Star), R(8),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(7), U8(1),
- B(Star), R(9),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(9),
- B(JumpIfTrue), U8(30),
+ B(TestEqualStrictNoFeedback), R(8),
+ B(JumpIfTrue), U8(28),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(9),
- B(JumpIfTrue), U8(21),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(8),
+ B(JumpIfTrue), U8(19),
B(LdaTrue),
- B(Star), R(11),
- B(Mov), R(8), R(10),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(10), U8(2),
+ B(Star), R(10),
+ B(Mov), R(7), R(9),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(9), U8(2),
B(Star), R(4),
B(LdaZero),
B(Star), R(3),
- B(Jump), U8(112),
- B(Ldar), R(8),
+ B(Jump), U8(106),
+ B(Ldar), R(7),
/* 11 E> */ B(Throw),
- /* 16 S> */ B(LdaSmi), I8(42),
+ /* 16 S> */ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(6),
- B(LdaFalse),
+ B(LdaSmi), I8(42),
B(Star), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(6), U8(2),
- B(Star), R(6),
- B(LdaImmutableCurrentContextSlot), U8(5),
+ B(LdaFalse),
+ B(Star), R(8),
+ /* 16 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(7), U8(2),
B(Star), R(7),
B(LdaSmi), I8(1),
- B(SuspendGenerator), R(7), U8(0),
- B(Ldar), R(6),
+ B(SuspendGenerator), R(6), U8(0),
+ B(Ldar), R(7),
/* 25 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(7), U8(1),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(6), U8(1),
+ B(Star), R(7),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(6), U8(1),
B(Star), R(8),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(7), U8(1),
- B(Star), R(9),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(9),
- B(JumpIfTrue), U8(31),
+ B(TestEqualStrictNoFeedback), R(8),
+ B(JumpIfTrue), U8(28),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(9),
- B(JumpIfTrue), U8(22),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(8),
+ B(JumpIfTrue), U8(19),
B(LdaTrue),
- B(Star), R(11),
- B(Mov), R(8), R(10),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(10), U8(2),
+ B(Star), R(10),
+ B(Mov), R(7), R(9),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(9), U8(2),
B(Star), R(4),
- B(LdaSmi), I8(1),
+ B(LdaZero),
B(Star), R(3),
- B(Jump), U8(35),
- B(Ldar), R(8),
+ B(Jump), U8(34),
+ B(Ldar), R(7),
/* 16 E> */ B(Throw),
B(LdaUndefined),
B(Star), R(6),
@@ -226,40 +206,26 @@ bytecodes: [
B(Star), R(7),
B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(6), U8(2),
B(Star), R(4),
- B(LdaSmi), I8(2),
+ B(LdaZero),
B(Star), R(3),
B(Jump), U8(14),
B(LdaSmi), I8(-1),
B(Star), R(3),
B(Jump), U8(8),
B(Star), R(4),
- B(LdaSmi), I8(3),
+ B(LdaSmi), I8(1),
B(Star), R(3),
B(LdaTheHole),
B(SetPendingMessage),
B(Star), R(5),
- B(LdaImmutableCurrentContextSlot), U8(5),
+ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(6),
- B(CallRuntime), U16(Runtime::k_GeneratorClose), R(6), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(6), U8(1),
B(Ldar), R(5),
B(SetPendingMessage),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(22),
- B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(19),
- B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(16),
- B(LdaSmi), I8(3),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(13),
- B(Jump), U8(14),
- B(Ldar), R(4),
- /* 25 S> */ B(Return),
- B(Ldar), R(4),
- /* 25 S> */ B(Return),
+ B(Ldar), R(3),
+ B(SwitchOnSmiNoFeedback), U8(2), U8(2), I8(0),
+ B(Jump), U8(8),
B(Ldar), R(4),
/* 25 S> */ B(Return),
B(Ldar), R(4),
@@ -268,9 +234,13 @@ bytecodes: [
/* 25 S> */ B(Return),
]
constant pool: [
+ Smi [52],
+ Smi [124],
+ Smi [6],
+ Smi [9],
]
handlers: [
- [51, 224, 230],
+ [51, 202, 208],
]
---
@@ -278,335 +248,305 @@ snippet: "
function* f() { for (let x of [42]) yield x }
f();
"
-frame size: 18
+frame size: 17
parameter count: 1
-bytecode array length: 751
+bytecode array length: 691
bytecodes: [
B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(33),
- B(CallRuntime), U16(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
- B(PushContext), R(4),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
B(ResumeGenerator), R(new_target),
- B(Star), R(3),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(60),
- B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(152),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
B(LdaSmi), I8(79),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kAbort), R(5), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
- B(Star), R(3),
- B(CreateFunctionContext), U8(9),
- B(PushContext), R(0),
- B(Ldar), R(this),
+ B(Star), R(0),
+ B(CreateFunctionContext), U8(8),
+ B(PushContext), R(2),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(StaCurrentContextSlot), U8(4),
/* 11 E> */ B(StackCheck),
- B(Mov), R(context), R(7),
+ B(Mov), R(context), R(5),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(9),
- B(Mov), R(closure), R(8),
- /* 11 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(8), U8(2),
- B(StaCurrentContextSlot), U8(5),
- B(Star), R(8),
- B(LdaImmutableCurrentContextSlot), U8(5),
- B(Star), R(9),
+ B(Star), R(6),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(7),
B(LdaZero),
- B(SuspendGenerator), R(9), U8(0),
- B(Ldar), R(8),
+ /* 11 E> */ B(SuspendGenerator), R(6), U8(0),
+ B(Ldar), R(7),
/* 44 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(9), U8(1),
- B(Star), R(10),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(9), U8(1),
- B(Star), R(11),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(6), U8(1),
+ B(Star), R(7),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(6), U8(1),
+ B(Star), R(8),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(11),
- B(JumpIfTrue), U8(30),
+ B(TestEqualStrictNoFeedback), R(8),
+ B(JumpIfTrue), U8(28),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(11),
- B(JumpIfTrue), U8(21),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(8),
+ B(JumpIfTrue), U8(19),
B(LdaTrue),
- B(Star), R(13),
- B(Mov), R(10), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(12), U8(2),
- B(Star), R(6),
+ B(Star), R(10),
+ B(Mov), R(7), R(9),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(9), U8(2),
+ B(Star), R(4),
B(LdaZero),
- B(Star), R(5),
- B(JumpConstant), U8(12),
- B(Ldar), R(10),
+ B(Star), R(3),
+ B(JumpConstant), U8(17),
+ B(Ldar), R(7),
/* 11 E> */ B(Throw),
B(Ldar), R(closure),
- B(CreateBlockContext), U8(0),
- B(PushContext), R(1),
+ B(CreateBlockContext), U8(2),
+ B(PushContext), R(6),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
B(LdaZero),
- B(StaContextSlot), R(1), U8(9), U8(0),
+ B(StaContextSlot), R(6), U8(8), U8(0),
+ B(Mov), R(context), R(9),
B(Mov), R(context), R(10),
- B(Mov), R(context), R(11),
- /* 30 S> */ B(CreateArrayLiteral), U8(1), U8(2), U8(9),
+ /* 30 S> */ B(CreateArrayLiteral), U8(3), U8(3), U8(17),
+ B(Star), R(11),
+ B(LdaNamedProperty), R(11), U8(4), U8(4),
B(Star), R(12),
- B(LdaNamedProperty), R(12), U8(2), U8(3),
- B(Star), R(13),
- B(CallProperty0), R(13), R(12), U8(5),
+ B(CallProperty0), R(12), R(11), U8(6),
B(JumpIfJSReceiver), U8(7),
B(CallRuntime), U16(Runtime::kThrowSymbolIteratorInvalid), R(0), U8(0),
- /* 30 E> */ B(StaContextSlot), R(1), U8(7), U8(0),
+ /* 30 E> */ B(StaContextSlot), R(6), U8(6), U8(0),
+ B(Ldar), R(0),
+ B(SwitchOnSmiNoFeedback), U8(5), U8(1), I8(1),
B(LdaSmi), I8(-2),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(17),
- B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(3),
- B(JumpIfTrue), U8(134),
+ B(TestEqualStrictNoFeedback), R(0),
+ B(JumpIfTrue), U8(11),
B(LdaSmi), I8(79),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kAbort), R(11), U8(1),
+ /* 27 S> */ B(LdaContextSlot), R(6), U8(6), U8(0),
B(Star), R(12),
- B(CallRuntime), U16(Runtime::kAbort), R(12), U8(1),
- /* 27 S> */ B(LdaContextSlot), R(1), U8(7), U8(0),
- B(Star), R(13),
- B(LdaNamedProperty), R(13), U8(3), U8(9),
- B(Star), R(12),
- /* 27 E> */ B(CallProperty0), R(12), R(13), U8(7),
- /* 27 E> */ B(StaContextSlot), R(1), U8(8), U8(0),
- B(Star), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(12), U8(1),
+ B(LdaNamedProperty), R(12), U8(6), U8(10),
+ B(Star), R(11),
+ /* 27 E> */ B(CallProperty0), R(11), R(12), U8(8),
+ /* 27 E> */ B(StaContextSlot), R(6), U8(7), U8(0),
+ B(Star), R(11),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(11), U8(1),
B(ToBooleanLogicalNot),
B(JumpIfFalse), U8(13),
- B(LdaContextSlot), R(1), U8(8), U8(0),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(12), U8(1),
- B(LdaContextSlot), R(1), U8(8), U8(0),
- B(Star), R(12),
- B(LdaNamedProperty), R(12), U8(4), U8(11),
- B(JumpIfToBooleanTrue), U8(146),
- B(LdaContextSlot), R(1), U8(8), U8(0),
- B(Star), R(12),
- B(LdaNamedProperty), R(12), U8(5), U8(13),
- B(StaContextSlot), R(1), U8(10), U8(0),
+ B(LdaContextSlot), R(6), U8(7), U8(0),
+ B(Star), R(11),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
+ B(LdaContextSlot), R(6), U8(7), U8(0),
+ B(Star), R(11),
+ B(LdaNamedProperty), R(11), U8(7), U8(12),
+ B(JumpIfToBooleanTrue), U8(142),
+ B(LdaContextSlot), R(6), U8(7), U8(0),
+ B(Star), R(11),
+ B(LdaNamedProperty), R(11), U8(8), U8(14),
+ B(StaContextSlot), R(6), U8(9), U8(0),
B(LdaSmi), I8(2),
- B(StaContextSlot), R(1), U8(9), U8(0),
- B(LdaContextSlot), R(1), U8(10), U8(0),
- B(StaContextSlot), R(1), U8(6), U8(0),
+ B(StaContextSlot), R(6), U8(8), U8(0),
+ B(LdaContextSlot), R(6), U8(9), U8(0),
+ B(StaContextSlot), R(6), U8(5), U8(0),
/* 16 E> */ B(StackCheck),
B(Ldar), R(closure),
- B(CreateBlockContext), U8(6),
- B(PushContext), R(2),
+ B(CreateBlockContext), U8(9),
+ B(PushContext), R(11),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(LdaContextSlot), R(1), U8(6), U8(0),
+ B(LdaContextSlot), R(6), U8(5), U8(0),
B(StaCurrentContextSlot), U8(4),
- /* 36 S> */ B(LdaImmutableCurrentContextSlot), U8(4),
+ /* 36 S> */ B(LdaImmutableContextSlot), R(6), U8(4), U8(0),
B(Star), R(12),
- B(LdaFalse),
+ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(12), U8(2),
- B(Star), R(12),
- B(LdaImmutableContextSlot), R(1), U8(5), U8(0),
+ B(LdaFalse),
+ B(Star), R(14),
+ /* 42 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(13), U8(2),
B(Star), R(13),
B(LdaSmi), I8(1),
- B(SuspendGenerator), R(13), U8(0),
- B(Ldar), R(12),
+ B(SuspendGenerator), R(12), U8(0),
+ B(Ldar), R(13),
/* 44 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(13), U8(1),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(12), U8(1),
+ B(Star), R(13),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(12), U8(1),
B(Star), R(14),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(13), U8(1),
- B(Star), R(15),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(15),
- B(JumpIfTrue), U8(42),
+ B(TestEqualStrictNoFeedback), R(14),
+ B(JumpIfTrue), U8(40),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(15),
- B(JumpIfTrue), U8(33),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(14),
+ B(JumpIfTrue), U8(31),
B(LdaTrue),
- B(Star), R(17),
- B(Mov), R(14), R(16),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(16), U8(2),
- B(PopContext), R(2),
- B(PopContext), R(2),
- B(PopContext), R(2),
- B(PopContext), R(2),
- B(PopContext), R(2),
- B(PopContext), R(2),
- B(Star), R(9),
- B(LdaZero),
+ B(Star), R(16),
+ B(Mov), R(13), R(15),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(15), U8(2),
+ B(PopContext), R(11),
+ B(PopContext), R(11),
+ B(PopContext), R(11),
+ B(PopContext), R(11),
+ B(PopContext), R(11),
+ B(PopContext), R(11),
B(Star), R(8),
+ B(LdaZero),
+ B(Star), R(7),
B(Jump), U8(71),
- B(Ldar), R(14),
+ B(Ldar), R(13),
/* 36 E> */ B(Throw),
- B(PopContext), R(2),
+ B(PopContext), R(11),
B(LdaZero),
- B(StaContextSlot), R(1), U8(9), U8(0),
- B(JumpLoop), U8(214), I8(0),
+ B(StaContextSlot), R(6), U8(8), U8(0),
+ B(JumpLoop), U8(210), I8(0),
B(Jump), U8(44),
- B(Star), R(12),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(12), U8(7), U8(8),
B(Star), R(11),
- B(PushContext), R(2),
- B(LdaContextSlot), R(1), U8(9), U8(0),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(11), U8(10), U8(11),
+ B(PushContext), R(11),
+ B(Star), R(10),
+ B(LdaContextSlot), R(6), U8(8), U8(0),
B(Star), R(12),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(12), U8(15),
+ B(TestEqualStrict), R(12), U8(16),
B(JumpIfFalse), U8(8),
B(LdaSmi), I8(1),
- B(StaContextSlot), R(1), U8(9), U8(0),
+ B(StaContextSlot), R(6), U8(8), U8(0),
B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(12),
B(CallRuntime), U16(Runtime::kReThrow), R(12), U8(1),
- B(PopContext), R(2),
+ B(PopContext), R(11),
B(LdaSmi), I8(-1),
- B(Star), R(8),
+ B(Star), R(7),
B(Jump), U8(8),
- B(Star), R(9),
- B(LdaSmi), I8(1),
B(Star), R(8),
+ B(LdaSmi), I8(1),
+ B(Star), R(7),
B(LdaTheHole),
B(SetPendingMessage),
+ B(Star), R(9),
+ B(LdaContextSlot), R(6), U8(8), U8(0),
B(Star), R(10),
- B(LdaContextSlot), R(1), U8(9), U8(0),
- B(Star), R(11),
B(LdaZero),
- B(TestEqualStrict), R(11), U8(16),
+ B(TestEqualStrict), R(10), U8(17),
B(JumpIfTrue), U8(150),
- B(LdaContextSlot), R(1), U8(7), U8(0),
- B(Star), R(11),
- B(LdaNamedProperty), R(11), U8(9), U8(17),
- B(StaContextSlot), R(1), U8(11), U8(0),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(LdaContextSlot), R(6), U8(6), U8(0),
+ B(Star), R(10),
+ B(LdaNamedProperty), R(10), U8(12), U8(18),
+ B(StaContextSlot), R(6), U8(10), U8(0),
+ B(LdaContextSlot), R(6), U8(10), U8(0),
B(TestUndetectable),
B(JumpIfFalse), U8(4),
B(Jump), U8(127),
- B(LdaContextSlot), R(1), U8(9), U8(0),
- B(Star), R(11),
+ B(LdaContextSlot), R(6), U8(8), U8(0),
+ B(Star), R(10),
B(LdaSmi), I8(1),
- B(TestEqualStrict), R(11), U8(20),
+ B(TestEqualStrict), R(10), U8(21),
B(JumpIfFalse), U8(69),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(LdaContextSlot), R(6), U8(10), U8(0),
B(TestTypeOf), U8(5),
B(JumpIfFalse), U8(4),
B(Jump), U8(18),
- B(Wide), B(LdaSmi), I16(132),
+ B(Wide), B(LdaSmi), I16(130),
+ B(Star), R(10),
+ B(LdaConstant), U8(13),
B(Star), R(11),
- B(LdaConstant), U8(10),
- B(Star), R(12),
- B(CallRuntime), U16(Runtime::kNewTypeError), R(11), U8(2),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(10), U8(2),
B(Throw),
- B(Mov), R(context), R(11),
- B(LdaContextSlot), R(1), U8(11), U8(0),
+ B(Mov), R(context), R(10),
+ B(LdaContextSlot), R(6), U8(10), U8(0),
+ B(Star), R(11),
+ B(LdaContextSlot), R(6), U8(6), U8(0),
B(Star), R(12),
- B(LdaContextSlot), R(1), U8(7), U8(0),
- B(Star), R(13),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(12), U8(2),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
B(Jump), U8(20),
- B(Star), R(12),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(12), U8(7), U8(11),
B(Star), R(11),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(11), U8(10), U8(14),
+ B(Star), R(10),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(11),
- B(PushContext), R(2),
- B(PopContext), R(2),
+ B(Ldar), R(10),
+ B(PushContext), R(11),
+ B(PopContext), R(11),
B(Jump), U8(47),
- B(LdaContextSlot), R(1), U8(11), U8(0),
- B(Star), R(11),
- B(LdaContextSlot), R(1), U8(7), U8(0),
- B(Star), R(12),
- B(InvokeIntrinsic), U8(Runtime::k_Call), R(11), U8(2),
- B(StaContextSlot), R(1), U8(12), U8(0),
- B(LdaContextSlot), R(1), U8(12), U8(0),
+ B(LdaContextSlot), R(6), U8(10), U8(0),
+ B(Star), R(10),
+ B(LdaContextSlot), R(6), U8(6), U8(0),
B(Star), R(11),
- B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(11), U8(1),
+ B(InvokeIntrinsic), U8(Runtime::k_Call), R(10), U8(2),
+ B(StaContextSlot), R(6), U8(11), U8(0),
+ B(LdaContextSlot), R(6), U8(11), U8(0),
+ B(Star), R(10),
+ B(InvokeIntrinsic), U8(Runtime::k_IsJSReceiver), R(10), U8(1),
B(JumpIfToBooleanFalse), U8(4),
B(Jump), U8(13),
- B(LdaContextSlot), R(1), U8(12), U8(0),
- B(Star), R(11),
- B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(11), U8(1),
- B(Ldar), R(10),
+ B(LdaContextSlot), R(6), U8(11), U8(0),
+ B(Star), R(10),
+ B(CallRuntime), U16(Runtime::kThrowIteratorResultNotAnObject), R(10), U8(1),
+ B(Ldar), R(9),
B(SetPendingMessage),
+ B(Ldar), R(7),
+ B(SwitchOnSmiNoFeedback), U8(15), U8(2), I8(0),
+ B(Jump), U8(27),
+ B(PopContext), R(6),
+ B(PopContext), R(6),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(8),
- B(JumpIfTrue), U8(10),
- B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(8),
- B(JumpIfTrue), U8(17),
- B(Jump), U8(28),
- B(PopContext), R(1),
- B(PopContext), R(1),
+ B(Star), R(3),
+ B(Mov), R(8), R(4),
+ B(Jump), U8(46),
+ B(PopContext), R(6),
+ B(PopContext), R(6),
B(LdaSmi), I8(1),
- B(Star), R(5),
- B(Mov), R(9), R(6),
- B(Jump), U8(47),
- B(PopContext), R(1),
- B(PopContext), R(1),
- B(LdaSmi), I8(2),
- B(Star), R(5),
- B(Mov), R(9), R(6),
- B(Jump), U8(34),
- B(PopContext), R(1),
+ B(Star), R(3),
+ B(Mov), R(8), R(4),
+ B(Jump), U8(33),
+ B(PopContext), R(6),
B(LdaUndefined),
- B(Star), R(8),
- B(LdaTrue),
- B(Star), R(9),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(8), U8(2),
B(Star), R(6),
- B(LdaSmi), I8(3),
- B(Star), R(5),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(6), U8(2),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
B(Jump), U8(14),
B(LdaSmi), I8(-1),
- B(Star), R(5),
+ B(Star), R(3),
B(Jump), U8(8),
- B(Star), R(6),
- B(LdaSmi), I8(4),
- B(Star), R(5),
+ B(Star), R(4),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
B(LdaTheHole),
B(SetPendingMessage),
- B(Star), R(7),
- B(LdaImmutableCurrentContextSlot), U8(5),
- B(Star), R(8),
- B(CallRuntime), U16(Runtime::k_GeneratorClose), R(8), U8(1),
- B(Ldar), R(7),
+ B(Star), R(5),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(6), U8(1),
+ B(Ldar), R(5),
B(SetPendingMessage),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(28),
- B(LdaSmi), I8(1),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(25),
- B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(22),
- B(LdaSmi), I8(3),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(19),
- B(LdaSmi), I8(4),
- B(TestEqualStrictNoFeedback), R(5),
- B(JumpIfTrue), U8(16),
- B(Jump), U8(17),
- B(Ldar), R(6),
- /* 44 S> */ B(Return),
- B(Ldar), R(6),
- /* 44 S> */ B(Return),
- B(Ldar), R(6),
- B(ReThrow),
- B(Ldar), R(6),
+ B(Ldar), R(3),
+ B(SwitchOnSmiNoFeedback), U8(18), U8(2), I8(0),
+ B(Jump), U8(8),
+ B(Ldar), R(4),
/* 44 S> */ B(Return),
- B(Ldar), R(6),
+ B(Ldar), R(4),
B(ReThrow),
B(LdaUndefined),
/* 44 S> */ B(Return),
]
constant pool: [
+ Smi [52],
+ Smi [146],
FIXED_ARRAY_TYPE,
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
SYMBOL_TYPE,
+ Smi [142],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["next"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["done"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["value"],
@@ -616,12 +556,16 @@ constant pool: [
ONE_BYTE_INTERNALIZED_STRING_TYPE ["return"],
ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
FIXED_ARRAY_TYPE,
- Smi [563],
+ Smi [6],
+ Smi [18],
+ Smi [552],
+ Smi [6],
+ Smi [9],
]
handlers: [
- [51, 675, 681],
- [146, 437, 443],
- [149, 393, 395],
- [530, 546, 548],
+ [51, 648, 654],
+ [130, 417, 423],
+ [133, 373, 375],
+ [510, 526, 528],
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
index b085a663ea..20be404cac 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
@@ -17,9 +17,9 @@ parameter count: 1
bytecode array length: 11
bytecodes: [
/* 26 E> */ B(StackCheck),
- /* 31 S> */ B(LdaGlobal), U8(0), U8(2),
- B(BitwiseAndSmi), I8(1), U8(4),
- /* 45 E> */ B(StaGlobalSloppy), U8(0), U8(5),
+ /* 31 S> */ B(LdaGlobal), U8(0), U8(3),
+ B(BitwiseAndSmi), I8(1), U8(5),
+ /* 45 E> */ B(StaGlobalSloppy), U8(0), U8(6),
/* 51 S> */ B(Return),
]
constant pool: [
@@ -39,9 +39,9 @@ parameter count: 1
bytecode array length: 11
bytecodes: [
/* 27 E> */ B(StackCheck),
- /* 32 S> */ B(LdaGlobal), U8(0), U8(2),
- B(AddSmi), I8(1), U8(4),
- /* 51 E> */ B(StaGlobalSloppy), U8(0), U8(5),
+ /* 32 S> */ B(LdaGlobal), U8(0), U8(3),
+ B(AddSmi), I8(1), U8(5),
+ /* 51 E> */ B(StaGlobalSloppy), U8(0), U8(6),
/* 57 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
index c40db11faf..e76e402f79 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
@@ -17,9 +17,9 @@ parameter count: 1
bytecode array length: 10
bytecodes: [
/* 26 E> */ B(StackCheck),
- /* 31 S> */ B(LdaGlobal), U8(0), U8(2),
- B(Inc), U8(6),
- /* 40 E> */ B(StaGlobalSloppy), U8(0), U8(4),
+ /* 31 S> */ B(LdaGlobal), U8(0), U8(3),
+ B(Inc), U8(7),
+ /* 40 E> */ B(StaGlobalSloppy), U8(0), U8(5),
/* 48 S> */ B(Return),
]
constant pool: [
@@ -39,11 +39,11 @@ parameter count: 1
bytecode array length: 17
bytecodes: [
/* 26 E> */ B(StackCheck),
- /* 31 S> */ B(LdaGlobal), U8(0), U8(2),
- B(ToNumber), R(0), U8(6),
+ /* 31 S> */ B(LdaGlobal), U8(0), U8(3),
+ B(ToNumber), R(0), U8(7),
B(Ldar), R(0),
- B(Dec), U8(6),
- /* 44 E> */ B(StaGlobalSloppy), U8(0), U8(4),
+ B(Dec), U8(7),
+ /* 44 E> */ B(StaGlobalSloppy), U8(0), U8(5),
B(Ldar), R(0),
/* 48 S> */ B(Return),
]
@@ -64,9 +64,9 @@ parameter count: 1
bytecode array length: 10
bytecodes: [
/* 27 E> */ B(StackCheck),
- /* 46 S> */ B(LdaGlobal), U8(0), U8(2),
- B(Dec), U8(6),
- /* 55 E> */ B(StaGlobalStrict), U8(0), U8(4),
+ /* 46 S> */ B(LdaGlobal), U8(0), U8(3),
+ B(Dec), U8(7),
+ /* 55 E> */ B(StaGlobalStrict), U8(0), U8(5),
/* 68 S> */ B(Return),
]
constant pool: [
@@ -86,11 +86,11 @@ parameter count: 1
bytecode array length: 17
bytecodes: [
/* 27 E> */ B(StackCheck),
- /* 32 S> */ B(LdaGlobal), U8(0), U8(2),
- B(ToNumber), R(0), U8(6),
+ /* 32 S> */ B(LdaGlobal), U8(0), U8(3),
+ B(ToNumber), R(0), U8(7),
B(Ldar), R(0),
- B(Inc), U8(6),
- /* 50 E> */ B(StaGlobalSloppy), U8(0), U8(4),
+ B(Inc), U8(7),
+ /* 50 E> */ B(StaGlobalSloppy), U8(0), U8(5),
B(Ldar), R(0),
/* 54 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden
index 9491511c56..5bfea8531b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalDelete.golden
@@ -19,7 +19,7 @@ parameter count: 1
bytecode array length: 11
bytecodes: [
/* 32 E> */ B(StackCheck),
- /* 39 S> */ B(LdaGlobal), U8(0), U8(2),
+ /* 39 S> */ B(LdaGlobal), U8(0), U8(3),
B(Star), R(0),
B(LdaConstant), U8(1),
B(DeletePropertySloppy), R(0),
@@ -46,7 +46,7 @@ parameter count: 1
bytecode array length: 11
bytecodes: [
/* 28 E> */ B(StackCheck),
- /* 51 S> */ B(LdaGlobal), U8(0), U8(2),
+ /* 51 S> */ B(LdaGlobal), U8(0), U8(3),
B(Star), R(0),
B(LdaSmi), I8(1),
B(DeletePropertyStrict), R(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
index 77e010d831..452a36cab4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/IfConditions.golden
@@ -121,7 +121,7 @@ bytecodes: [
B(Star), R(0),
/* 30 S> */ B(JumpIfToBooleanFalse), U8(11),
/* 43 S> */ B(Ldar), R(0),
- B(AddSmi), I8(1), U8(2),
+ B(AddSmi), I8(1), U8(3),
B(Star), R(0),
B(Jump), U8(5),
/* 66 S> */ B(LdaSmi), I8(2),
@@ -151,7 +151,7 @@ bytecode array length: 19
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 18 S> */ B(LdaZero),
- /* 24 E> */ B(TestLessThanOrEqual), R(arg0), U8(2),
+ /* 24 E> */ B(TestLessThanOrEqual), R(arg0), U8(3),
B(JumpIfFalse), U8(7),
/* 36 S> */ B(Wide), B(LdaSmi), I16(200),
/* 80 S> */ B(Return),
@@ -258,7 +258,7 @@ snippet: "
"
frame size: 2
parameter count: 2
-bytecode array length: 410
+bytecode array length: 156
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 24 S> */ B(LdaZero),
@@ -266,136 +266,136 @@ bytecodes: [
/* 35 S> */ B(LdaZero),
B(Star), R(1),
/* 38 S> */ B(LdaConstant), U8(0),
- /* 44 E> */ B(TestEqualStrict), R(0), U8(2),
- B(JumpIfFalseConstant), U8(1),
+ /* 44 E> */ B(TestEqualStrict), R(0), U8(3),
+ B(JumpIfFalse), U8(137),
/* 58 S> */ B(Mov), R(0), R(1),
- /* 65 S> */ B(Mov), R(1), R(0),
- /* 74 S> */ B(Mov), R(0), R(1),
- /* 81 S> */ B(Mov), R(1), R(0),
- /* 90 S> */ B(Mov), R(0), R(1),
- /* 97 S> */ B(Mov), R(1), R(0),
- /* 106 S> */ B(Mov), R(0), R(1),
- /* 113 S> */ B(Mov), R(1), R(0),
- /* 122 S> */ B(Mov), R(0), R(1),
- /* 129 S> */ B(Mov), R(1), R(0),
- /* 138 S> */ B(Mov), R(0), R(1),
- /* 145 S> */ B(Mov), R(1), R(0),
- /* 154 S> */ B(Mov), R(0), R(1),
- /* 161 S> */ B(Mov), R(1), R(0),
- /* 170 S> */ B(Mov), R(0), R(1),
- /* 177 S> */ B(Mov), R(1), R(0),
- /* 186 S> */ B(Mov), R(0), R(1),
- /* 193 S> */ B(Mov), R(1), R(0),
- /* 202 S> */ B(Mov), R(0), R(1),
- /* 209 S> */ B(Mov), R(1), R(0),
- /* 218 S> */ B(Mov), R(0), R(1),
- /* 225 S> */ B(Mov), R(1), R(0),
- /* 234 S> */ B(Mov), R(0), R(1),
- /* 241 S> */ B(Mov), R(1), R(0),
- /* 250 S> */ B(Mov), R(0), R(1),
- /* 257 S> */ B(Mov), R(1), R(0),
- /* 266 S> */ B(Mov), R(0), R(1),
- /* 273 S> */ B(Mov), R(1), R(0),
- /* 282 S> */ B(Mov), R(0), R(1),
- /* 289 S> */ B(Mov), R(1), R(0),
- /* 298 S> */ B(Mov), R(0), R(1),
- /* 305 S> */ B(Mov), R(1), R(0),
- /* 314 S> */ B(Mov), R(0), R(1),
- /* 321 S> */ B(Mov), R(1), R(0),
- /* 330 S> */ B(Mov), R(0), R(1),
- /* 337 S> */ B(Mov), R(1), R(0),
- /* 346 S> */ B(Mov), R(0), R(1),
- /* 353 S> */ B(Mov), R(1), R(0),
- /* 362 S> */ B(Mov), R(0), R(1),
- /* 369 S> */ B(Mov), R(1), R(0),
- /* 378 S> */ B(Mov), R(0), R(1),
- /* 385 S> */ B(Mov), R(1), R(0),
- /* 394 S> */ B(Mov), R(0), R(1),
- /* 401 S> */ B(Mov), R(1), R(0),
- /* 410 S> */ B(Mov), R(0), R(1),
- /* 417 S> */ B(Mov), R(1), R(0),
- /* 426 S> */ B(Mov), R(0), R(1),
- /* 433 S> */ B(Mov), R(1), R(0),
- /* 442 S> */ B(Mov), R(0), R(1),
- /* 449 S> */ B(Mov), R(1), R(0),
- /* 458 S> */ B(Mov), R(0), R(1),
- /* 465 S> */ B(Mov), R(1), R(0),
- /* 474 S> */ B(Mov), R(0), R(1),
- /* 481 S> */ B(Mov), R(1), R(0),
- /* 490 S> */ B(Mov), R(0), R(1),
- /* 497 S> */ B(Mov), R(1), R(0),
- /* 506 S> */ B(Mov), R(0), R(1),
- /* 513 S> */ B(Mov), R(1), R(0),
- /* 522 S> */ B(Mov), R(0), R(1),
- /* 529 S> */ B(Mov), R(1), R(0),
- /* 538 S> */ B(Mov), R(0), R(1),
- /* 545 S> */ B(Mov), R(1), R(0),
- /* 554 S> */ B(Mov), R(0), R(1),
- /* 561 S> */ B(Mov), R(1), R(0),
- /* 570 S> */ B(Mov), R(0), R(1),
- /* 577 S> */ B(Mov), R(1), R(0),
- /* 586 S> */ B(Mov), R(0), R(1),
- /* 593 S> */ B(Mov), R(1), R(0),
- /* 602 S> */ B(Mov), R(0), R(1),
- /* 609 S> */ B(Mov), R(1), R(0),
- /* 618 S> */ B(Mov), R(0), R(1),
- /* 625 S> */ B(Mov), R(1), R(0),
- /* 634 S> */ B(Mov), R(0), R(1),
- /* 641 S> */ B(Mov), R(1), R(0),
- /* 650 S> */ B(Mov), R(0), R(1),
- /* 657 S> */ B(Mov), R(1), R(0),
- /* 666 S> */ B(Mov), R(0), R(1),
- /* 673 S> */ B(Mov), R(1), R(0),
- /* 682 S> */ B(Mov), R(0), R(1),
- /* 689 S> */ B(Mov), R(1), R(0),
- /* 698 S> */ B(Mov), R(0), R(1),
- /* 705 S> */ B(Mov), R(1), R(0),
- /* 714 S> */ B(Mov), R(0), R(1),
- /* 721 S> */ B(Mov), R(1), R(0),
- /* 730 S> */ B(Mov), R(0), R(1),
- /* 737 S> */ B(Mov), R(1), R(0),
- /* 746 S> */ B(Mov), R(0), R(1),
- /* 753 S> */ B(Mov), R(1), R(0),
- /* 762 S> */ B(Mov), R(0), R(1),
- /* 769 S> */ B(Mov), R(1), R(0),
- /* 778 S> */ B(Mov), R(0), R(1),
- /* 785 S> */ B(Mov), R(1), R(0),
- /* 794 S> */ B(Mov), R(0), R(1),
- /* 801 S> */ B(Mov), R(1), R(0),
- /* 810 S> */ B(Mov), R(0), R(1),
- /* 817 S> */ B(Mov), R(1), R(0),
- /* 826 S> */ B(Mov), R(0), R(1),
- /* 833 S> */ B(Mov), R(1), R(0),
- /* 842 S> */ B(Mov), R(0), R(1),
- /* 849 S> */ B(Mov), R(1), R(0),
- /* 858 S> */ B(Mov), R(0), R(1),
- /* 865 S> */ B(Mov), R(1), R(0),
- /* 874 S> */ B(Mov), R(0), R(1),
- /* 881 S> */ B(Mov), R(1), R(0),
- /* 890 S> */ B(Mov), R(0), R(1),
- /* 897 S> */ B(Mov), R(1), R(0),
- /* 906 S> */ B(Mov), R(0), R(1),
- /* 913 S> */ B(Mov), R(1), R(0),
- /* 922 S> */ B(Mov), R(0), R(1),
- /* 929 S> */ B(Mov), R(1), R(0),
- /* 938 S> */ B(Mov), R(0), R(1),
- /* 945 S> */ B(Mov), R(1), R(0),
- /* 954 S> */ B(Mov), R(0), R(1),
- /* 961 S> */ B(Mov), R(1), R(0),
- /* 970 S> */ B(Mov), R(0), R(1),
- /* 977 S> */ B(Mov), R(1), R(0),
- /* 986 S> */ B(Mov), R(0), R(1),
- /* 993 S> */ B(Mov), R(1), R(0),
- /* 1002 S> */ B(Mov), R(0), R(1),
- /* 1009 S> */ B(Mov), R(1), R(0),
- /* 1018 S> */ B(Mov), R(0), R(1),
- /* 1025 S> */ B(Mov), R(1), R(0),
- /* 1034 S> */ B(Mov), R(0), R(1),
- /* 1041 S> */ B(Mov), R(1), R(0),
- /* 1050 S> */ B(Mov), R(0), R(1),
- /* 1057 S> */ B(Mov), R(1), R(0),
- /* 1066 S> */ B(Mov), R(0), R(1),
- /* 1073 S> */ B(Mov), R(1), R(0),
+ /* 65 S> */ B(Nop),
+ /* 74 S> */ B(Nop),
+ /* 81 S> */ B(Nop),
+ /* 90 S> */ B(Nop),
+ /* 97 S> */ B(Nop),
+ /* 106 S> */ B(Nop),
+ /* 113 S> */ B(Nop),
+ /* 122 S> */ B(Nop),
+ /* 129 S> */ B(Nop),
+ /* 138 S> */ B(Nop),
+ /* 145 S> */ B(Nop),
+ /* 154 S> */ B(Nop),
+ /* 161 S> */ B(Nop),
+ /* 170 S> */ B(Nop),
+ /* 177 S> */ B(Nop),
+ /* 186 S> */ B(Nop),
+ /* 193 S> */ B(Nop),
+ /* 202 S> */ B(Nop),
+ /* 209 S> */ B(Nop),
+ /* 218 S> */ B(Nop),
+ /* 225 S> */ B(Nop),
+ /* 234 S> */ B(Nop),
+ /* 241 S> */ B(Nop),
+ /* 250 S> */ B(Nop),
+ /* 257 S> */ B(Nop),
+ /* 266 S> */ B(Nop),
+ /* 273 S> */ B(Nop),
+ /* 282 S> */ B(Nop),
+ /* 289 S> */ B(Nop),
+ /* 298 S> */ B(Nop),
+ /* 305 S> */ B(Nop),
+ /* 314 S> */ B(Nop),
+ /* 321 S> */ B(Nop),
+ /* 330 S> */ B(Nop),
+ /* 337 S> */ B(Nop),
+ /* 346 S> */ B(Nop),
+ /* 353 S> */ B(Nop),
+ /* 362 S> */ B(Nop),
+ /* 369 S> */ B(Nop),
+ /* 378 S> */ B(Nop),
+ /* 385 S> */ B(Nop),
+ /* 394 S> */ B(Nop),
+ /* 401 S> */ B(Nop),
+ /* 410 S> */ B(Nop),
+ /* 417 S> */ B(Nop),
+ /* 426 S> */ B(Nop),
+ /* 433 S> */ B(Nop),
+ /* 442 S> */ B(Nop),
+ /* 449 S> */ B(Nop),
+ /* 458 S> */ B(Nop),
+ /* 465 S> */ B(Nop),
+ /* 474 S> */ B(Nop),
+ /* 481 S> */ B(Nop),
+ /* 490 S> */ B(Nop),
+ /* 497 S> */ B(Nop),
+ /* 506 S> */ B(Nop),
+ /* 513 S> */ B(Nop),
+ /* 522 S> */ B(Nop),
+ /* 529 S> */ B(Nop),
+ /* 538 S> */ B(Nop),
+ /* 545 S> */ B(Nop),
+ /* 554 S> */ B(Nop),
+ /* 561 S> */ B(Nop),
+ /* 570 S> */ B(Nop),
+ /* 577 S> */ B(Nop),
+ /* 586 S> */ B(Nop),
+ /* 593 S> */ B(Nop),
+ /* 602 S> */ B(Nop),
+ /* 609 S> */ B(Nop),
+ /* 618 S> */ B(Nop),
+ /* 625 S> */ B(Nop),
+ /* 634 S> */ B(Nop),
+ /* 641 S> */ B(Nop),
+ /* 650 S> */ B(Nop),
+ /* 657 S> */ B(Nop),
+ /* 666 S> */ B(Nop),
+ /* 673 S> */ B(Nop),
+ /* 682 S> */ B(Nop),
+ /* 689 S> */ B(Nop),
+ /* 698 S> */ B(Nop),
+ /* 705 S> */ B(Nop),
+ /* 714 S> */ B(Nop),
+ /* 721 S> */ B(Nop),
+ /* 730 S> */ B(Nop),
+ /* 737 S> */ B(Nop),
+ /* 746 S> */ B(Nop),
+ /* 753 S> */ B(Nop),
+ /* 762 S> */ B(Nop),
+ /* 769 S> */ B(Nop),
+ /* 778 S> */ B(Nop),
+ /* 785 S> */ B(Nop),
+ /* 794 S> */ B(Nop),
+ /* 801 S> */ B(Nop),
+ /* 810 S> */ B(Nop),
+ /* 817 S> */ B(Nop),
+ /* 826 S> */ B(Nop),
+ /* 833 S> */ B(Nop),
+ /* 842 S> */ B(Nop),
+ /* 849 S> */ B(Nop),
+ /* 858 S> */ B(Nop),
+ /* 865 S> */ B(Nop),
+ /* 874 S> */ B(Nop),
+ /* 881 S> */ B(Nop),
+ /* 890 S> */ B(Nop),
+ /* 897 S> */ B(Nop),
+ /* 906 S> */ B(Nop),
+ /* 913 S> */ B(Nop),
+ /* 922 S> */ B(Nop),
+ /* 929 S> */ B(Nop),
+ /* 938 S> */ B(Nop),
+ /* 945 S> */ B(Nop),
+ /* 954 S> */ B(Nop),
+ /* 961 S> */ B(Nop),
+ /* 970 S> */ B(Nop),
+ /* 977 S> */ B(Nop),
+ /* 986 S> */ B(Nop),
+ /* 993 S> */ B(Nop),
+ /* 1002 S> */ B(Nop),
+ /* 1009 S> */ B(Nop),
+ /* 1018 S> */ B(Nop),
+ /* 1025 S> */ B(Nop),
+ /* 1034 S> */ B(Nop),
+ /* 1041 S> */ B(Nop),
+ /* 1050 S> */ B(Nop),
+ /* 1057 S> */ B(Nop),
+ /* 1066 S> */ B(Nop),
+ /* 1073 S> */ B(Nop),
/* 1081 S> */ B(Wide), B(LdaSmi), I16(200),
/* 1117 S> */ B(Return),
/* 1102 S> */ B(Wide), B(LdaSmi), I16(-200),
@@ -405,7 +405,6 @@ bytecodes: [
]
constant pool: [
HEAP_NUMBER_TYPE [0.01],
- Smi [391],
]
handlers: [
]
@@ -485,7 +484,7 @@ snippet: "
"
frame size: 2
parameter count: 1
-bytecode array length: 407
+bytecode array length: 153
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 25 S> */ B(LdaZero),
@@ -493,135 +492,135 @@ bytecodes: [
/* 36 S> */ B(LdaZero),
B(Star), R(1),
/* 41 S> */ B(Ldar), R(0),
- B(JumpIfToBooleanFalseConstant), U8(0),
+ B(JumpIfToBooleanFalse), U8(137),
/* 52 S> */ B(Mov), R(0), R(1),
- /* 59 S> */ B(Mov), R(1), R(0),
- /* 68 S> */ B(Mov), R(0), R(1),
- /* 75 S> */ B(Mov), R(1), R(0),
- /* 84 S> */ B(Mov), R(0), R(1),
- /* 91 S> */ B(Mov), R(1), R(0),
- /* 100 S> */ B(Mov), R(0), R(1),
- /* 107 S> */ B(Mov), R(1), R(0),
- /* 116 S> */ B(Mov), R(0), R(1),
- /* 123 S> */ B(Mov), R(1), R(0),
- /* 132 S> */ B(Mov), R(0), R(1),
- /* 139 S> */ B(Mov), R(1), R(0),
- /* 148 S> */ B(Mov), R(0), R(1),
- /* 155 S> */ B(Mov), R(1), R(0),
- /* 164 S> */ B(Mov), R(0), R(1),
- /* 171 S> */ B(Mov), R(1), R(0),
- /* 180 S> */ B(Mov), R(0), R(1),
- /* 187 S> */ B(Mov), R(1), R(0),
- /* 196 S> */ B(Mov), R(0), R(1),
- /* 203 S> */ B(Mov), R(1), R(0),
- /* 212 S> */ B(Mov), R(0), R(1),
- /* 219 S> */ B(Mov), R(1), R(0),
- /* 228 S> */ B(Mov), R(0), R(1),
- /* 235 S> */ B(Mov), R(1), R(0),
- /* 244 S> */ B(Mov), R(0), R(1),
- /* 251 S> */ B(Mov), R(1), R(0),
- /* 260 S> */ B(Mov), R(0), R(1),
- /* 267 S> */ B(Mov), R(1), R(0),
- /* 276 S> */ B(Mov), R(0), R(1),
- /* 283 S> */ B(Mov), R(1), R(0),
- /* 292 S> */ B(Mov), R(0), R(1),
- /* 299 S> */ B(Mov), R(1), R(0),
- /* 308 S> */ B(Mov), R(0), R(1),
- /* 315 S> */ B(Mov), R(1), R(0),
- /* 324 S> */ B(Mov), R(0), R(1),
- /* 331 S> */ B(Mov), R(1), R(0),
- /* 340 S> */ B(Mov), R(0), R(1),
- /* 347 S> */ B(Mov), R(1), R(0),
- /* 356 S> */ B(Mov), R(0), R(1),
- /* 363 S> */ B(Mov), R(1), R(0),
- /* 372 S> */ B(Mov), R(0), R(1),
- /* 379 S> */ B(Mov), R(1), R(0),
- /* 388 S> */ B(Mov), R(0), R(1),
- /* 395 S> */ B(Mov), R(1), R(0),
- /* 404 S> */ B(Mov), R(0), R(1),
- /* 411 S> */ B(Mov), R(1), R(0),
- /* 420 S> */ B(Mov), R(0), R(1),
- /* 427 S> */ B(Mov), R(1), R(0),
- /* 436 S> */ B(Mov), R(0), R(1),
- /* 443 S> */ B(Mov), R(1), R(0),
- /* 452 S> */ B(Mov), R(0), R(1),
- /* 459 S> */ B(Mov), R(1), R(0),
- /* 468 S> */ B(Mov), R(0), R(1),
- /* 475 S> */ B(Mov), R(1), R(0),
- /* 484 S> */ B(Mov), R(0), R(1),
- /* 491 S> */ B(Mov), R(1), R(0),
- /* 500 S> */ B(Mov), R(0), R(1),
- /* 507 S> */ B(Mov), R(1), R(0),
- /* 516 S> */ B(Mov), R(0), R(1),
- /* 523 S> */ B(Mov), R(1), R(0),
- /* 532 S> */ B(Mov), R(0), R(1),
- /* 539 S> */ B(Mov), R(1), R(0),
- /* 548 S> */ B(Mov), R(0), R(1),
- /* 555 S> */ B(Mov), R(1), R(0),
- /* 564 S> */ B(Mov), R(0), R(1),
- /* 571 S> */ B(Mov), R(1), R(0),
- /* 580 S> */ B(Mov), R(0), R(1),
- /* 587 S> */ B(Mov), R(1), R(0),
- /* 596 S> */ B(Mov), R(0), R(1),
- /* 603 S> */ B(Mov), R(1), R(0),
- /* 612 S> */ B(Mov), R(0), R(1),
- /* 619 S> */ B(Mov), R(1), R(0),
- /* 628 S> */ B(Mov), R(0), R(1),
- /* 635 S> */ B(Mov), R(1), R(0),
- /* 644 S> */ B(Mov), R(0), R(1),
- /* 651 S> */ B(Mov), R(1), R(0),
- /* 660 S> */ B(Mov), R(0), R(1),
- /* 667 S> */ B(Mov), R(1), R(0),
- /* 676 S> */ B(Mov), R(0), R(1),
- /* 683 S> */ B(Mov), R(1), R(0),
- /* 692 S> */ B(Mov), R(0), R(1),
- /* 699 S> */ B(Mov), R(1), R(0),
- /* 708 S> */ B(Mov), R(0), R(1),
- /* 715 S> */ B(Mov), R(1), R(0),
- /* 724 S> */ B(Mov), R(0), R(1),
- /* 731 S> */ B(Mov), R(1), R(0),
- /* 740 S> */ B(Mov), R(0), R(1),
- /* 747 S> */ B(Mov), R(1), R(0),
- /* 756 S> */ B(Mov), R(0), R(1),
- /* 763 S> */ B(Mov), R(1), R(0),
- /* 772 S> */ B(Mov), R(0), R(1),
- /* 779 S> */ B(Mov), R(1), R(0),
- /* 788 S> */ B(Mov), R(0), R(1),
- /* 795 S> */ B(Mov), R(1), R(0),
- /* 804 S> */ B(Mov), R(0), R(1),
- /* 811 S> */ B(Mov), R(1), R(0),
- /* 820 S> */ B(Mov), R(0), R(1),
- /* 827 S> */ B(Mov), R(1), R(0),
- /* 836 S> */ B(Mov), R(0), R(1),
- /* 843 S> */ B(Mov), R(1), R(0),
- /* 852 S> */ B(Mov), R(0), R(1),
- /* 859 S> */ B(Mov), R(1), R(0),
- /* 868 S> */ B(Mov), R(0), R(1),
- /* 875 S> */ B(Mov), R(1), R(0),
- /* 884 S> */ B(Mov), R(0), R(1),
- /* 891 S> */ B(Mov), R(1), R(0),
- /* 900 S> */ B(Mov), R(0), R(1),
- /* 907 S> */ B(Mov), R(1), R(0),
- /* 916 S> */ B(Mov), R(0), R(1),
- /* 923 S> */ B(Mov), R(1), R(0),
- /* 932 S> */ B(Mov), R(0), R(1),
- /* 939 S> */ B(Mov), R(1), R(0),
- /* 948 S> */ B(Mov), R(0), R(1),
- /* 955 S> */ B(Mov), R(1), R(0),
- /* 964 S> */ B(Mov), R(0), R(1),
- /* 971 S> */ B(Mov), R(1), R(0),
- /* 980 S> */ B(Mov), R(0), R(1),
- /* 987 S> */ B(Mov), R(1), R(0),
- /* 996 S> */ B(Mov), R(0), R(1),
- /* 1003 S> */ B(Mov), R(1), R(0),
- /* 1012 S> */ B(Mov), R(0), R(1),
- /* 1019 S> */ B(Mov), R(1), R(0),
- /* 1028 S> */ B(Mov), R(0), R(1),
- /* 1035 S> */ B(Mov), R(1), R(0),
- /* 1044 S> */ B(Mov), R(0), R(1),
- /* 1051 S> */ B(Mov), R(1), R(0),
- /* 1060 S> */ B(Mov), R(0), R(1),
- /* 1067 S> */ B(Mov), R(1), R(0),
+ /* 59 S> */ B(Nop),
+ /* 68 S> */ B(Nop),
+ /* 75 S> */ B(Nop),
+ /* 84 S> */ B(Nop),
+ /* 91 S> */ B(Nop),
+ /* 100 S> */ B(Nop),
+ /* 107 S> */ B(Nop),
+ /* 116 S> */ B(Nop),
+ /* 123 S> */ B(Nop),
+ /* 132 S> */ B(Nop),
+ /* 139 S> */ B(Nop),
+ /* 148 S> */ B(Nop),
+ /* 155 S> */ B(Nop),
+ /* 164 S> */ B(Nop),
+ /* 171 S> */ B(Nop),
+ /* 180 S> */ B(Nop),
+ /* 187 S> */ B(Nop),
+ /* 196 S> */ B(Nop),
+ /* 203 S> */ B(Nop),
+ /* 212 S> */ B(Nop),
+ /* 219 S> */ B(Nop),
+ /* 228 S> */ B(Nop),
+ /* 235 S> */ B(Nop),
+ /* 244 S> */ B(Nop),
+ /* 251 S> */ B(Nop),
+ /* 260 S> */ B(Nop),
+ /* 267 S> */ B(Nop),
+ /* 276 S> */ B(Nop),
+ /* 283 S> */ B(Nop),
+ /* 292 S> */ B(Nop),
+ /* 299 S> */ B(Nop),
+ /* 308 S> */ B(Nop),
+ /* 315 S> */ B(Nop),
+ /* 324 S> */ B(Nop),
+ /* 331 S> */ B(Nop),
+ /* 340 S> */ B(Nop),
+ /* 347 S> */ B(Nop),
+ /* 356 S> */ B(Nop),
+ /* 363 S> */ B(Nop),
+ /* 372 S> */ B(Nop),
+ /* 379 S> */ B(Nop),
+ /* 388 S> */ B(Nop),
+ /* 395 S> */ B(Nop),
+ /* 404 S> */ B(Nop),
+ /* 411 S> */ B(Nop),
+ /* 420 S> */ B(Nop),
+ /* 427 S> */ B(Nop),
+ /* 436 S> */ B(Nop),
+ /* 443 S> */ B(Nop),
+ /* 452 S> */ B(Nop),
+ /* 459 S> */ B(Nop),
+ /* 468 S> */ B(Nop),
+ /* 475 S> */ B(Nop),
+ /* 484 S> */ B(Nop),
+ /* 491 S> */ B(Nop),
+ /* 500 S> */ B(Nop),
+ /* 507 S> */ B(Nop),
+ /* 516 S> */ B(Nop),
+ /* 523 S> */ B(Nop),
+ /* 532 S> */ B(Nop),
+ /* 539 S> */ B(Nop),
+ /* 548 S> */ B(Nop),
+ /* 555 S> */ B(Nop),
+ /* 564 S> */ B(Nop),
+ /* 571 S> */ B(Nop),
+ /* 580 S> */ B(Nop),
+ /* 587 S> */ B(Nop),
+ /* 596 S> */ B(Nop),
+ /* 603 S> */ B(Nop),
+ /* 612 S> */ B(Nop),
+ /* 619 S> */ B(Nop),
+ /* 628 S> */ B(Nop),
+ /* 635 S> */ B(Nop),
+ /* 644 S> */ B(Nop),
+ /* 651 S> */ B(Nop),
+ /* 660 S> */ B(Nop),
+ /* 667 S> */ B(Nop),
+ /* 676 S> */ B(Nop),
+ /* 683 S> */ B(Nop),
+ /* 692 S> */ B(Nop),
+ /* 699 S> */ B(Nop),
+ /* 708 S> */ B(Nop),
+ /* 715 S> */ B(Nop),
+ /* 724 S> */ B(Nop),
+ /* 731 S> */ B(Nop),
+ /* 740 S> */ B(Nop),
+ /* 747 S> */ B(Nop),
+ /* 756 S> */ B(Nop),
+ /* 763 S> */ B(Nop),
+ /* 772 S> */ B(Nop),
+ /* 779 S> */ B(Nop),
+ /* 788 S> */ B(Nop),
+ /* 795 S> */ B(Nop),
+ /* 804 S> */ B(Nop),
+ /* 811 S> */ B(Nop),
+ /* 820 S> */ B(Nop),
+ /* 827 S> */ B(Nop),
+ /* 836 S> */ B(Nop),
+ /* 843 S> */ B(Nop),
+ /* 852 S> */ B(Nop),
+ /* 859 S> */ B(Nop),
+ /* 868 S> */ B(Nop),
+ /* 875 S> */ B(Nop),
+ /* 884 S> */ B(Nop),
+ /* 891 S> */ B(Nop),
+ /* 900 S> */ B(Nop),
+ /* 907 S> */ B(Nop),
+ /* 916 S> */ B(Nop),
+ /* 923 S> */ B(Nop),
+ /* 932 S> */ B(Nop),
+ /* 939 S> */ B(Nop),
+ /* 948 S> */ B(Nop),
+ /* 955 S> */ B(Nop),
+ /* 964 S> */ B(Nop),
+ /* 971 S> */ B(Nop),
+ /* 980 S> */ B(Nop),
+ /* 987 S> */ B(Nop),
+ /* 996 S> */ B(Nop),
+ /* 1003 S> */ B(Nop),
+ /* 1012 S> */ B(Nop),
+ /* 1019 S> */ B(Nop),
+ /* 1028 S> */ B(Nop),
+ /* 1035 S> */ B(Nop),
+ /* 1044 S> */ B(Nop),
+ /* 1051 S> */ B(Nop),
+ /* 1060 S> */ B(Nop),
+ /* 1067 S> */ B(Nop),
/* 1076 S> */ B(Wide), B(LdaSmi), I16(200),
/* 1112 S> */ B(Return),
/* 1097 S> */ B(Wide), B(LdaSmi), I16(-200),
@@ -630,7 +629,6 @@ bytecodes: [
/* 1112 S> */ B(Return),
]
constant pool: [
- Smi [391],
]
handlers: [
]
@@ -656,32 +654,32 @@ bytecode array length: 81
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 21 S> */ B(Ldar), R(arg1),
- /* 27 E> */ B(TestEqual), R(arg0), U8(2),
+ /* 27 E> */ B(TestEqual), R(arg0), U8(3),
B(JumpIfFalse), U8(5),
/* 35 S> */ B(LdaSmi), I8(1),
/* 262 S> */ B(Return),
/* 49 S> */ B(Ldar), R(arg1),
- /* 55 E> */ B(TestEqualStrict), R(arg0), U8(3),
+ /* 55 E> */ B(TestEqualStrict), R(arg0), U8(4),
B(JumpIfFalse), U8(5),
/* 64 S> */ B(LdaSmi), I8(1),
/* 262 S> */ B(Return),
/* 78 S> */ B(Ldar), R(arg1),
- /* 84 E> */ B(TestLessThan), R(arg0), U8(4),
+ /* 84 E> */ B(TestLessThan), R(arg0), U8(5),
B(JumpIfFalse), U8(5),
/* 91 S> */ B(LdaSmi), I8(1),
/* 262 S> */ B(Return),
/* 105 S> */ B(Ldar), R(arg1),
- /* 111 E> */ B(TestGreaterThan), R(arg0), U8(5),
+ /* 111 E> */ B(TestGreaterThan), R(arg0), U8(6),
B(JumpIfFalse), U8(5),
/* 118 S> */ B(LdaSmi), I8(1),
/* 262 S> */ B(Return),
/* 132 S> */ B(Ldar), R(arg1),
- /* 138 E> */ B(TestLessThanOrEqual), R(arg0), U8(6),
+ /* 138 E> */ B(TestLessThanOrEqual), R(arg0), U8(7),
B(JumpIfFalse), U8(5),
/* 146 S> */ B(LdaSmi), I8(1),
/* 262 S> */ B(Return),
/* 160 S> */ B(Ldar), R(arg1),
- /* 166 E> */ B(TestGreaterThanOrEqual), R(arg0), U8(7),
+ /* 166 E> */ B(TestGreaterThanOrEqual), R(arg0), U8(8),
B(JumpIfFalse), U8(5),
/* 174 S> */ B(LdaSmi), I8(1),
/* 262 S> */ B(Return),
@@ -754,18 +752,18 @@ bytecode array length: 36
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 21 S> */ B(Ldar), R(arg1),
- /* 27 E> */ B(TestEqual), R(arg0), U8(2),
+ /* 27 E> */ B(TestEqual), R(arg0), U8(3),
B(JumpIfTrue), U8(8),
B(LdaZero),
- /* 37 E> */ B(TestLessThan), R(arg0), U8(3),
+ /* 37 E> */ B(TestLessThan), R(arg0), U8(4),
B(JumpIfFalse), U8(5),
/* 48 S> */ B(LdaSmi), I8(1),
/* 133 S> */ B(Return),
/* 67 S> */ B(LdaZero),
- /* 73 E> */ B(TestGreaterThan), R(arg0), U8(4),
+ /* 73 E> */ B(TestGreaterThan), R(arg0), U8(5),
B(JumpIfFalse), U8(10),
B(LdaZero),
- /* 82 E> */ B(TestGreaterThan), R(arg1), U8(5),
+ /* 82 E> */ B(TestGreaterThan), R(arg1), U8(6),
B(JumpIfFalse), U8(4),
/* 93 S> */ B(LdaZero),
/* 133 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden
index 292247b425..0086de7bca 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/JumpsRequiringConstantWideOperands.golden
@@ -957,19 +957,19 @@ bytecodes: [
/* 4103 S> */ B(LdaZero),
B(Star), R(1),
/* 4108 S> */ B(LdaSmi), I8(3),
- /* 4108 E> */ B(TestLessThan), R(1), U8(2),
+ /* 4108 E> */ B(TestLessThan), R(1), U8(3),
B(Wide), B(JumpIfFalse), U16(39),
/* 4090 E> */ B(StackCheck),
/* 4122 S> */ B(LdaSmi), I8(1),
- /* 4128 E> */ B(TestEqual), R(1), U8(4),
+ /* 4128 E> */ B(TestEqual), R(1), U8(5),
B(Wide), B(JumpIfFalse), U16(7),
/* 4134 S> */ B(Wide), B(Jump), U16(16),
/* 4146 S> */ B(LdaSmi), I8(2),
- /* 4152 E> */ B(TestEqual), R(1), U8(5),
+ /* 4152 E> */ B(TestEqual), R(1), U8(6),
B(Wide), B(JumpIfFalse), U16(7),
/* 4158 S> */ B(Wide), B(Jump), U16(12),
/* 4114 S> */ B(Ldar), R(1),
- B(Inc), U8(3),
+ B(Inc), U8(4),
B(Star), R(1),
B(JumpLoop), U8(42), I8(0),
/* 4167 S> */ B(LdaSmi), I8(3),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden
index 60f236f17a..9ac2838412 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariable.golden
@@ -11,10 +11,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 10
+bytecode array length: 7
bytecodes: [
- B(LdaTheHole),
- B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
@@ -32,10 +30,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 10
+bytecode array length: 7
bytecodes: [
- B(LdaTheHole),
- B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
@@ -53,7 +49,7 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 29
+bytecode array length: 26
bytecodes: [
B(LdaTheHole),
B(Star), R(0),
@@ -66,7 +62,6 @@ bytecodes: [
B(Star), R(2),
/* 45 E> */ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(2), U8(1),
B(Mov), R(1), R(0),
- B(Mov), R(1), R(0),
B(LdaUndefined),
/* 52 S> */ B(Return),
]
@@ -82,10 +77,8 @@ snippet: "
"
frame size: 1
parameter count: 1
-bytecode array length: 14
+bytecode array length: 11
bytecodes: [
- B(LdaTheHole),
- B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
index 5d10939959..d471754930 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LetVariableContextSlot.golden
@@ -17,7 +17,7 @@ bytecodes: [
B(PushContext), R(1),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(CreateClosure), U8(0), U8(2), U8(2),
+ B(CreateClosure), U8(0), U8(3), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
@@ -43,7 +43,7 @@ bytecodes: [
B(PushContext), R(1),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(CreateClosure), U8(0), U8(2), U8(2),
+ B(CreateClosure), U8(0), U8(3), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
@@ -69,7 +69,7 @@ bytecodes: [
B(PushContext), R(1),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(CreateClosure), U8(0), U8(2), U8(2),
+ B(CreateClosure), U8(0), U8(3), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 45 S> */ B(LdaSmi), I8(20),
@@ -104,7 +104,7 @@ bytecodes: [
B(PushContext), R(1),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
- B(CreateClosure), U8(0), U8(2), U8(2),
+ B(CreateClosure), U8(0), U8(3), U8(2),
B(Star), R(0),
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(10),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden
index addfa78a98..537f38b956 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LoadGlobal.golden
@@ -17,7 +17,7 @@ parameter count: 1
bytecode array length: 5
bytecodes: [
/* 21 E> */ B(StackCheck),
- /* 26 S> */ B(LdaGlobal), U8(0), U8(2),
+ /* 26 S> */ B(LdaGlobal), U8(0), U8(3),
/* 36 S> */ B(Return),
]
constant pool: [
@@ -37,7 +37,7 @@ parameter count: 1
bytecode array length: 5
bytecodes: [
/* 27 E> */ B(StackCheck),
- /* 32 S> */ B(LdaGlobal), U8(0), U8(2),
+ /* 32 S> */ B(LdaGlobal), U8(0), U8(3),
/* 42 S> */ B(Return),
]
constant pool: [
@@ -57,7 +57,7 @@ parameter count: 1
bytecode array length: 5
bytecodes: [
/* 17 E> */ B(StackCheck),
- /* 22 S> */ B(LdaGlobal), U8(0), U8(2),
+ /* 22 S> */ B(LdaGlobal), U8(0), U8(3),
/* 32 S> */ B(Return),
]
constant pool: [
@@ -208,262 +208,262 @@ bytecode array length: 652
bytecodes: [
/* 17 E> */ B(StackCheck),
/* 25 S> */ B(Nop),
- /* 26 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(2),
+ /* 26 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(3),
/* 35 S> */ B(Nop),
- /* 36 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(4),
+ /* 36 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(5),
/* 45 S> */ B(Nop),
- /* 46 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(6),
+ /* 46 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(7),
/* 55 S> */ B(Nop),
- /* 56 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(8),
+ /* 56 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(9),
/* 65 S> */ B(Nop),
- /* 66 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(10),
+ /* 66 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(11),
/* 75 S> */ B(Nop),
- /* 76 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(12),
+ /* 76 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(13),
/* 85 S> */ B(Nop),
- /* 86 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(14),
+ /* 86 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(15),
/* 95 S> */ B(Nop),
- /* 96 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(16),
+ /* 96 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(17),
/* 105 S> */ B(Nop),
- /* 106 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(18),
+ /* 106 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(19),
/* 115 S> */ B(Nop),
- /* 116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(20),
+ /* 116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(21),
/* 125 S> */ B(Nop),
- /* 126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(22),
+ /* 126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(23),
/* 135 S> */ B(Nop),
- /* 136 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(24),
+ /* 136 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(25),
/* 145 S> */ B(Nop),
- /* 146 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(26),
+ /* 146 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(27),
/* 155 S> */ B(Nop),
- /* 156 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(28),
+ /* 156 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(29),
/* 165 S> */ B(Nop),
- /* 166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(30),
+ /* 166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(31),
/* 175 S> */ B(Nop),
- /* 176 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(32),
+ /* 176 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(33),
/* 185 S> */ B(Nop),
- /* 186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(34),
+ /* 186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(35),
/* 195 S> */ B(Nop),
- /* 196 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(36),
+ /* 196 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(37),
/* 205 S> */ B(Nop),
- /* 206 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(38),
+ /* 206 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(39),
/* 215 S> */ B(Nop),
- /* 216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(40),
+ /* 216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(41),
/* 225 S> */ B(Nop),
- /* 226 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(42),
+ /* 226 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(43),
/* 235 S> */ B(Nop),
- /* 236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(44),
+ /* 236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(45),
/* 245 S> */ B(Nop),
- /* 246 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(46),
+ /* 246 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(47),
/* 255 S> */ B(Nop),
- /* 256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(48),
+ /* 256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(49),
/* 265 S> */ B(Nop),
- /* 266 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(50),
+ /* 266 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(51),
/* 275 S> */ B(Nop),
- /* 276 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(52),
+ /* 276 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(53),
/* 285 S> */ B(Nop),
- /* 286 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(54),
+ /* 286 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(55),
/* 295 S> */ B(Nop),
- /* 296 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(56),
+ /* 296 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(57),
/* 305 S> */ B(Nop),
- /* 306 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(58),
+ /* 306 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(59),
/* 315 S> */ B(Nop),
- /* 316 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(60),
+ /* 316 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(61),
/* 325 S> */ B(Nop),
- /* 326 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(62),
+ /* 326 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(63),
/* 335 S> */ B(Nop),
- /* 336 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(64),
+ /* 336 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(65),
/* 345 S> */ B(Nop),
- /* 346 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(66),
+ /* 346 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(67),
/* 355 S> */ B(Nop),
- /* 356 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(68),
+ /* 356 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(69),
/* 365 S> */ B(Nop),
- /* 366 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(70),
+ /* 366 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(71),
/* 375 S> */ B(Nop),
- /* 376 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(72),
+ /* 376 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(73),
/* 385 S> */ B(Nop),
- /* 386 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(74),
+ /* 386 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(75),
/* 395 S> */ B(Nop),
- /* 396 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(76),
+ /* 396 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(77),
/* 405 S> */ B(Nop),
- /* 406 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(78),
+ /* 406 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(79),
/* 415 S> */ B(Nop),
- /* 416 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(80),
+ /* 416 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(81),
/* 425 S> */ B(Nop),
- /* 426 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(82),
+ /* 426 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(83),
/* 435 S> */ B(Nop),
- /* 436 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(84),
+ /* 436 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(85),
/* 445 S> */ B(Nop),
- /* 446 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(86),
+ /* 446 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(87),
/* 455 S> */ B(Nop),
- /* 456 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(88),
+ /* 456 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(89),
/* 465 S> */ B(Nop),
- /* 466 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(90),
+ /* 466 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(91),
/* 475 S> */ B(Nop),
- /* 476 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(92),
+ /* 476 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(93),
/* 485 S> */ B(Nop),
- /* 486 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(94),
+ /* 486 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(95),
/* 495 S> */ B(Nop),
- /* 496 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(96),
+ /* 496 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(97),
/* 505 S> */ B(Nop),
- /* 506 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(98),
+ /* 506 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(99),
/* 515 S> */ B(Nop),
- /* 516 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(100),
+ /* 516 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(101),
/* 525 S> */ B(Nop),
- /* 526 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(102),
+ /* 526 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(103),
/* 535 S> */ B(Nop),
- /* 536 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(104),
+ /* 536 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(105),
/* 545 S> */ B(Nop),
- /* 546 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(106),
+ /* 546 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(107),
/* 555 S> */ B(Nop),
- /* 556 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(108),
+ /* 556 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(109),
/* 565 S> */ B(Nop),
- /* 566 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(110),
+ /* 566 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(111),
/* 575 S> */ B(Nop),
- /* 576 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(112),
+ /* 576 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(113),
/* 585 S> */ B(Nop),
- /* 586 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(114),
+ /* 586 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(115),
/* 595 S> */ B(Nop),
- /* 596 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(116),
+ /* 596 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(117),
/* 605 S> */ B(Nop),
- /* 606 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(118),
+ /* 606 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(119),
/* 615 S> */ B(Nop),
- /* 616 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(120),
+ /* 616 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(121),
/* 625 S> */ B(Nop),
- /* 626 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(122),
+ /* 626 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(123),
/* 635 S> */ B(Nop),
- /* 636 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(124),
+ /* 636 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(125),
/* 645 S> */ B(Nop),
- /* 646 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(126),
+ /* 646 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(127),
/* 655 S> */ B(Nop),
- /* 656 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(128),
+ /* 656 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(129),
/* 665 S> */ B(Nop),
- /* 666 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(130),
+ /* 666 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(131),
/* 675 S> */ B(Nop),
- /* 676 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(132),
+ /* 676 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(133),
/* 685 S> */ B(Nop),
- /* 686 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(134),
+ /* 686 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(135),
/* 695 S> */ B(Nop),
- /* 696 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(136),
+ /* 696 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(137),
/* 705 S> */ B(Nop),
- /* 706 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(138),
+ /* 706 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(139),
/* 715 S> */ B(Nop),
- /* 716 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(140),
+ /* 716 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(141),
/* 725 S> */ B(Nop),
- /* 726 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(142),
+ /* 726 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(143),
/* 735 S> */ B(Nop),
- /* 736 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(144),
+ /* 736 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(145),
/* 745 S> */ B(Nop),
- /* 746 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(146),
+ /* 746 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(147),
/* 755 S> */ B(Nop),
- /* 756 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(148),
+ /* 756 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(149),
/* 765 S> */ B(Nop),
- /* 766 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(150),
+ /* 766 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(151),
/* 775 S> */ B(Nop),
- /* 776 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(152),
+ /* 776 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(153),
/* 785 S> */ B(Nop),
- /* 786 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(154),
+ /* 786 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(155),
/* 795 S> */ B(Nop),
- /* 796 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(156),
+ /* 796 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(157),
/* 805 S> */ B(Nop),
- /* 806 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(158),
+ /* 806 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(159),
/* 815 S> */ B(Nop),
- /* 816 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(160),
+ /* 816 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(161),
/* 825 S> */ B(Nop),
- /* 826 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(162),
+ /* 826 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(163),
/* 835 S> */ B(Nop),
- /* 836 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(164),
+ /* 836 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(165),
/* 845 S> */ B(Nop),
- /* 846 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(166),
+ /* 846 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(167),
/* 855 S> */ B(Nop),
- /* 856 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(168),
+ /* 856 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(169),
/* 865 S> */ B(Nop),
- /* 866 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(170),
+ /* 866 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(171),
/* 875 S> */ B(Nop),
- /* 876 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(172),
+ /* 876 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(173),
/* 885 S> */ B(Nop),
- /* 886 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(174),
+ /* 886 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(175),
/* 895 S> */ B(Nop),
- /* 896 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(176),
+ /* 896 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(177),
/* 905 S> */ B(Nop),
- /* 906 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(178),
+ /* 906 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(179),
/* 915 S> */ B(Nop),
- /* 916 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(180),
+ /* 916 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(181),
/* 925 S> */ B(Nop),
- /* 926 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(182),
+ /* 926 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(183),
/* 935 S> */ B(Nop),
- /* 936 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(184),
+ /* 936 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(185),
/* 945 S> */ B(Nop),
- /* 946 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(186),
+ /* 946 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(187),
/* 955 S> */ B(Nop),
- /* 956 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(188),
+ /* 956 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(189),
/* 965 S> */ B(Nop),
- /* 966 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(190),
+ /* 966 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(191),
/* 975 S> */ B(Nop),
- /* 976 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(192),
+ /* 976 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(193),
/* 985 S> */ B(Nop),
- /* 986 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(194),
+ /* 986 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(195),
/* 995 S> */ B(Nop),
- /* 996 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(196),
+ /* 996 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(197),
/* 1005 S> */ B(Nop),
- /* 1006 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(198),
+ /* 1006 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(199),
/* 1015 S> */ B(Nop),
- /* 1016 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(200),
+ /* 1016 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(201),
/* 1025 S> */ B(Nop),
- /* 1026 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(202),
+ /* 1026 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(203),
/* 1035 S> */ B(Nop),
- /* 1036 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(204),
+ /* 1036 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(205),
/* 1045 S> */ B(Nop),
- /* 1046 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(206),
+ /* 1046 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(207),
/* 1055 S> */ B(Nop),
- /* 1056 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(208),
+ /* 1056 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(209),
/* 1065 S> */ B(Nop),
- /* 1066 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(210),
+ /* 1066 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(211),
/* 1075 S> */ B(Nop),
- /* 1076 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(212),
+ /* 1076 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(213),
/* 1085 S> */ B(Nop),
- /* 1086 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(214),
+ /* 1086 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(215),
/* 1095 S> */ B(Nop),
- /* 1096 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(216),
+ /* 1096 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(217),
/* 1105 S> */ B(Nop),
- /* 1106 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(218),
+ /* 1106 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(219),
/* 1115 S> */ B(Nop),
- /* 1116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(220),
+ /* 1116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(221),
/* 1125 S> */ B(Nop),
- /* 1126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(222),
+ /* 1126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(223),
/* 1135 S> */ B(Nop),
- /* 1136 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(224),
+ /* 1136 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(225),
/* 1145 S> */ B(Nop),
- /* 1146 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(226),
+ /* 1146 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(227),
/* 1155 S> */ B(Nop),
- /* 1156 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(228),
+ /* 1156 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(229),
/* 1165 S> */ B(Nop),
- /* 1166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(230),
+ /* 1166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(231),
/* 1175 S> */ B(Nop),
- /* 1176 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(232),
+ /* 1176 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(233),
/* 1185 S> */ B(Nop),
- /* 1186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(234),
+ /* 1186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(235),
/* 1195 S> */ B(Nop),
- /* 1196 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(236),
+ /* 1196 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(237),
/* 1205 S> */ B(Nop),
- /* 1206 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(238),
+ /* 1206 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(239),
/* 1215 S> */ B(Nop),
- /* 1216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(240),
+ /* 1216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(241),
/* 1225 S> */ B(Nop),
- /* 1226 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(242),
+ /* 1226 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(243),
/* 1235 S> */ B(Nop),
- /* 1236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(244),
+ /* 1236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(245),
/* 1245 S> */ B(Nop),
- /* 1246 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(246),
+ /* 1246 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(247),
/* 1255 S> */ B(Nop),
- /* 1256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(248),
+ /* 1256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(249),
/* 1265 S> */ B(Nop),
- /* 1266 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(250),
+ /* 1266 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(251),
/* 1275 S> */ B(Nop),
- /* 1276 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(252),
+ /* 1276 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(253),
/* 1285 S> */ B(Nop),
- /* 1286 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(254),
+ /* 1286 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(255),
/* 1295 S> */ B(Nop),
- /* 1296 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(256),
- /* 1305 S> */ B(Wide), B(LdaGlobal), U16(1), U16(258),
+ /* 1296 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(257),
+ /* 1305 S> */ B(Wide), B(LdaGlobal), U16(1), U16(259),
/* 1315 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden
index 8cfa8474f9..2f109764a4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LogicalExpressions.golden
@@ -37,7 +37,7 @@ bytecodes: [
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(LdaSmi), I8(1),
- /* 55 E> */ B(TestEqual), R(0), U8(2),
+ /* 55 E> */ B(TestEqual), R(0), U8(3),
B(JumpIfTrue), U8(4),
B(LdaSmi), I8(3),
/* 67 S> */ B(Return),
@@ -79,7 +79,7 @@ bytecodes: [
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(LdaZero),
- /* 55 E> */ B(TestEqual), R(0), U8(2),
+ /* 55 E> */ B(TestEqual), R(0), U8(3),
B(JumpIfFalse), U8(4),
B(LdaSmi), I8(3),
/* 67 S> */ B(Return),
@@ -556,7 +556,7 @@ bytecodes: [
/* 60 S> */ B(LdaSmi), I8(3),
B(Star), R(2),
/* 63 S> */ B(LdaSmi), I8(3),
- /* 73 E> */ B(TestGreaterThan), R(0), U8(2),
+ /* 73 E> */ B(TestGreaterThan), R(0), U8(3),
B(JumpIfTrueConstant), U8(0),
B(LdaSmi), I8(1),
B(Star), R(1),
@@ -743,7 +743,7 @@ bytecodes: [
/* 60 S> */ B(LdaSmi), I8(3),
B(Star), R(2),
/* 63 S> */ B(LdaSmi), I8(5),
- /* 73 E> */ B(TestLessThan), R(0), U8(2),
+ /* 73 E> */ B(TestLessThan), R(0), U8(3),
B(JumpIfFalseConstant), U8(0),
B(LdaSmi), I8(1),
B(Star), R(1),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
index 8ecf2c316d..d5501dc513 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlot.golden
@@ -23,7 +23,7 @@ bytecodes: [
B(Ldar), R(new_target),
B(StaCurrentContextSlot), U8(5),
/* 10 E> */ B(StackCheck),
- /* 14 S> */ B(LdaLookupGlobalSlot), U8(0), U8(4), U8(1),
+ /* 14 S> */ B(LdaLookupGlobalSlot), U8(0), U8(5), U8(1),
B(Star), R(1),
B(LdaConstant), U8(1),
B(Star), R(2),
@@ -38,8 +38,8 @@ bytecodes: [
B(Mov), R(closure), R(5),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(3), U8(6),
B(Star), R(1),
- /* 14 E> */ B(CallUndefinedReceiver1), R(1), R(2), U8(2),
- /* 35 S> */ B(LdaLookupGlobalSlot), U8(2), U8(6), U8(1),
+ /* 14 E> */ B(CallUndefinedReceiver1), R(1), R(2), U8(3),
+ /* 35 S> */ B(LdaLookupGlobalSlot), U8(2), U8(7), U8(1),
/* 45 S> */ B(Return),
]
constant pool: [
@@ -67,7 +67,7 @@ bytecodes: [
B(Ldar), R(new_target),
B(StaCurrentContextSlot), U8(5),
/* 10 E> */ B(StackCheck),
- /* 14 S> */ B(LdaLookupGlobalSlot), U8(0), U8(4), U8(1),
+ /* 14 S> */ B(LdaLookupGlobalSlot), U8(0), U8(5), U8(1),
B(Star), R(1),
B(LdaConstant), U8(1),
B(Star), R(2),
@@ -82,8 +82,8 @@ bytecodes: [
B(Mov), R(closure), R(5),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(3), U8(6),
B(Star), R(1),
- /* 14 E> */ B(CallUndefinedReceiver1), R(1), R(2), U8(2),
- /* 35 S> */ B(LdaLookupGlobalSlotInsideTypeof), U8(2), U8(6), U8(1),
+ /* 14 E> */ B(CallUndefinedReceiver1), R(1), R(2), U8(3),
+ /* 35 S> */ B(LdaLookupGlobalSlotInsideTypeof), U8(2), U8(7), U8(1),
B(TypeOf),
/* 52 S> */ B(Return),
]
@@ -114,7 +114,7 @@ bytecodes: [
/* 10 E> */ B(StackCheck),
/* 14 S> */ B(LdaSmi), I8(20),
/* 16 E> */ B(StaLookupSlotSloppy), U8(0),
- /* 22 S> */ B(LdaLookupGlobalSlot), U8(1), U8(4), U8(1),
+ /* 22 S> */ B(LdaLookupGlobalSlot), U8(1), U8(5), U8(1),
B(Star), R(1),
B(LdaConstant), U8(2),
B(Star), R(2),
@@ -129,7 +129,7 @@ bytecodes: [
B(Mov), R(closure), R(5),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(3), U8(6),
B(Star), R(1),
- /* 29 E> */ B(CallUndefinedReceiver1), R(1), R(2), U8(2),
+ /* 29 E> */ B(CallUndefinedReceiver1), R(1), R(2), U8(3),
/* 39 S> */ B(Return),
]
constant pool: [
@@ -162,7 +162,7 @@ bytecodes: [
B(Ldar), R(new_target),
B(StaCurrentContextSlot), U8(5),
/* 38 E> */ B(StackCheck),
- /* 44 S> */ B(LdaLookupGlobalSlot), U8(0), U8(4), U8(1),
+ /* 44 S> */ B(LdaLookupGlobalSlot), U8(0), U8(5), U8(1),
B(Star), R(1),
B(LdaConstant), U8(1),
B(Star), R(2),
@@ -177,7 +177,7 @@ bytecodes: [
B(Mov), R(closure), R(5),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(3), U8(6),
B(Star), R(1),
- /* 44 E> */ B(CallUndefinedReceiver1), R(1), R(2), U8(2),
+ /* 44 E> */ B(CallUndefinedReceiver1), R(1), R(2), U8(3),
/* 66 S> */ B(LdaLookupContextSlot), U8(2), U8(6), U8(1),
/* 76 S> */ B(Return),
]
@@ -211,7 +211,7 @@ bytecodes: [
B(Ldar), R(new_target),
B(StaCurrentContextSlot), U8(5),
/* 34 E> */ B(StackCheck),
- /* 40 S> */ B(LdaLookupGlobalSlot), U8(0), U8(4), U8(1),
+ /* 40 S> */ B(LdaLookupGlobalSlot), U8(0), U8(5), U8(1),
B(Star), R(1),
B(LdaConstant), U8(1),
B(Star), R(2),
@@ -226,8 +226,8 @@ bytecodes: [
B(Mov), R(closure), R(5),
B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(3), U8(6),
B(Star), R(1),
- /* 40 E> */ B(CallUndefinedReceiver1), R(1), R(2), U8(2),
- /* 62 S> */ B(LdaLookupGlobalSlot), U8(2), U8(6), U8(1),
+ /* 40 E> */ B(CallUndefinedReceiver1), R(1), R(2), U8(3),
+ /* 62 S> */ B(LdaLookupGlobalSlot), U8(2), U8(7), U8(1),
/* 72 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden
index 9a40416535..77e2438dc0 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotInEval.golden
@@ -20,7 +20,7 @@ parameter count: 1
bytecode array length: 6
bytecodes: [
/* 10 E> */ B(StackCheck),
- /* 15 S> */ B(LdaLookupGlobalSlot), U8(0), U8(2), U8(1),
+ /* 15 S> */ B(LdaLookupGlobalSlot), U8(0), U8(3), U8(1),
/* 25 S> */ B(Return),
]
constant pool: [
@@ -93,7 +93,7 @@ parameter count: 1
bytecode array length: 7
bytecodes: [
/* 10 E> */ B(StackCheck),
- /* 15 S> */ B(LdaLookupGlobalSlotInsideTypeof), U8(0), U8(2), U8(1),
+ /* 15 S> */ B(LdaLookupGlobalSlotInsideTypeof), U8(0), U8(3), U8(1),
B(TypeOf),
/* 32 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden
index 2dcd93b231..f90c3668b7 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/LookupSlotWideInEval.golden
@@ -792,7 +792,7 @@ bytecodes: [
B(Star), R(0),
/* 3082 S> */ B(LdaConstant), U8(255),
B(Star), R(0),
- /* 3086 S> */ B(Wide), B(LdaLookupGlobalSlot), U16(256), U16(2), U16(1),
+ /* 3086 S> */ B(Wide), B(LdaLookupGlobalSlot), U16(256), U16(3), U16(1),
/* 3095 S> */ B(Return),
]
constant pool: [
@@ -1843,7 +1843,7 @@ bytecodes: [
B(Star), R(0),
/* 3082 S> */ B(LdaConstant), U8(255),
B(Star), R(0),
- /* 3086 S> */ B(Wide), B(LdaLookupGlobalSlotInsideTypeof), U16(256), U16(2), U16(1),
+ /* 3086 S> */ B(Wide), B(LdaLookupGlobalSlotInsideTypeof), U16(256), U16(3), U16(1),
B(TypeOf),
/* 3102 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
index 510e573f54..2a44009043 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Modules.golden
@@ -11,68 +11,62 @@ top level: yes
snippet: "
import \"bar\";
"
-frame size: 9
+frame size: 8
parameter count: 2
-bytecode array length: 143
+bytecode array length: 130
bytecodes: [
B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(27),
- B(CallRuntime), U16(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
- B(PushContext), R(2),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
B(ResumeGenerator), R(new_target),
- B(Star), R(1),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(1),
- B(JumpIfTrue), U8(64),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(LdaConstant), U8(0),
- B(Star), R(5),
- B(Mov), R(arg0), R(3),
- B(Mov), R(closure), R(4),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
- B(PushContext), R(0),
- B(Ldar), R(this),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(2),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(StaCurrentContextSlot), U8(4),
/* 0 E> */ B(StackCheck),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(4),
- B(Mov), R(closure), R(3),
- /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(3), U8(2),
- B(StaCurrentContextSlot), U8(5),
B(Star), R(3),
- B(LdaImmutableCurrentContextSlot), U8(5),
+ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(4),
B(LdaZero),
- B(SuspendGenerator), R(4), U8(0),
- B(Ldar), R(3),
+ /* 0 E> */ B(SuspendGenerator), R(3), U8(0),
+ B(Ldar), R(4),
/* 13 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(4), U8(1),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(3), U8(1),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
B(Star), R(5),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(4), U8(1),
- B(Star), R(6),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(6),
- B(JumpIfTrue), U8(24),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(22),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(6),
- B(JumpIfTrue), U8(15),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(13),
B(LdaTrue),
- B(Star), R(8),
- B(Mov), R(5), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(7), U8(2),
+ B(Star), R(7),
+ B(Mov), R(4), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(6), U8(2),
/* 13 S> */ B(Return),
- B(Ldar), R(5),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
- B(Ldar), R(5),
- B(StaCurrentContextSlot), U8(6),
- B(LdaCurrentContextSlot), U8(6),
+ B(Ldar), R(4),
+ B(StaCurrentContextSlot), U8(5),
+ B(LdaCurrentContextSlot), U8(5),
B(Star), R(3),
B(LdaTrue),
B(Star), R(4),
@@ -80,6 +74,7 @@ bytecodes: [
/* 13 S> */ B(Return),
]
constant pool: [
+ Smi [59],
FIXED_ARRAY_TYPE,
]
handlers: [
@@ -89,68 +84,62 @@ handlers: [
snippet: "
import {foo} from \"bar\";
"
-frame size: 9
+frame size: 8
parameter count: 2
-bytecode array length: 143
+bytecode array length: 130
bytecodes: [
B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(27),
- B(CallRuntime), U16(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
- B(PushContext), R(2),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
B(ResumeGenerator), R(new_target),
- B(Star), R(1),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(1),
- B(JumpIfTrue), U8(64),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(LdaConstant), U8(0),
- B(Star), R(5),
- B(Mov), R(arg0), R(3),
- B(Mov), R(closure), R(4),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
- B(PushContext), R(0),
- B(Ldar), R(this),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(2),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(StaCurrentContextSlot), U8(4),
/* 0 E> */ B(StackCheck),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(4),
- B(Mov), R(closure), R(3),
- /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(3), U8(2),
- B(StaCurrentContextSlot), U8(5),
B(Star), R(3),
- B(LdaImmutableCurrentContextSlot), U8(5),
+ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(4),
B(LdaZero),
- B(SuspendGenerator), R(4), U8(0),
- B(Ldar), R(3),
+ /* 0 E> */ B(SuspendGenerator), R(3), U8(0),
+ B(Ldar), R(4),
/* 24 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(4), U8(1),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(3), U8(1),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
B(Star), R(5),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(4), U8(1),
- B(Star), R(6),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(6),
- B(JumpIfTrue), U8(24),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(22),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(6),
- B(JumpIfTrue), U8(15),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(13),
B(LdaTrue),
- B(Star), R(8),
- B(Mov), R(5), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(7), U8(2),
+ B(Star), R(7),
+ B(Mov), R(4), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(6), U8(2),
/* 24 S> */ B(Return),
- B(Ldar), R(5),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
- B(Ldar), R(5),
- B(StaCurrentContextSlot), U8(6),
- B(LdaCurrentContextSlot), U8(6),
+ B(Ldar), R(4),
+ B(StaCurrentContextSlot), U8(5),
+ B(LdaCurrentContextSlot), U8(5),
B(Star), R(3),
B(LdaTrue),
B(Star), R(4),
@@ -158,6 +147,7 @@ bytecodes: [
/* 24 S> */ B(Return),
]
constant pool: [
+ Smi [59],
FIXED_ARRAY_TYPE,
]
handlers: [
@@ -169,100 +159,95 @@ snippet: "
goo(42);
{ let x; { goo(42) } };
"
-frame size: 10
+frame size: 8
parameter count: 2
-bytecode array length: 205
+bytecode array length: 192
bytecodes: [
B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(27),
- B(CallRuntime), U16(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
- B(PushContext), R(3),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
B(ResumeGenerator), R(new_target),
- B(Star), R(2),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(2),
- B(JumpIfTrue), U8(64),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kAbort), R(4), U8(1),
- B(LdaSmi), I8(-2),
B(Star), R(2),
- B(LdaConstant), U8(0),
- B(Star), R(6),
- B(Mov), R(arg0), R(4),
- B(Mov), R(closure), R(5),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(4), U8(3),
- B(PushContext), R(0),
- B(Ldar), R(this),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(LdaSmi), I8(-2),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(2),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(StaCurrentContextSlot), U8(4),
/* 0 E> */ B(StackCheck),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(5),
- B(Mov), R(closure), R(4),
- /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(4), U8(2),
- B(StaCurrentContextSlot), U8(5),
+ B(Star), R(3),
+ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(4),
- B(LdaImmutableCurrentContextSlot), U8(5),
- B(Star), R(5),
B(LdaZero),
- B(SuspendGenerator), R(5), U8(0),
+ /* 0 E> */ B(SuspendGenerator), R(3), U8(0),
B(Ldar), R(4),
/* 64 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(5), U8(1),
- B(Star), R(6),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(5), U8(1),
- B(Star), R(7),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(3), U8(1),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
+ B(Star), R(5),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(7),
- B(JumpIfTrue), U8(24),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(22),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(7),
- B(JumpIfTrue), U8(15),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(13),
B(LdaTrue),
- B(Star), R(9),
- B(Mov), R(6), R(8),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(8), U8(2),
+ B(Star), R(7),
+ B(Mov), R(4), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(6), U8(2),
/* 64 S> */ B(Return),
- B(Ldar), R(6),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
/* 32 S> */ B(LdaModuleVariable), I8(-1), U8(0),
B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(1),
- B(Star), R(5),
- B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
+ B(LdaConstant), U8(2),
B(Star), R(4),
+ B(CallRuntime), U16(Runtime::kThrowReferenceError), R(4), U8(1),
+ B(Star), R(3),
B(LdaSmi), I8(42),
- B(Star), R(5),
- /* 32 E> */ B(CallUndefinedReceiver1), R(4), R(5), U8(2),
+ B(Star), R(4),
+ /* 32 E> */ B(CallUndefinedReceiver1), R(3), R(4), U8(3),
B(Ldar), R(closure),
- B(CreateBlockContext), U8(2),
- B(PushContext), R(1),
+ B(CreateBlockContext), U8(3),
+ B(PushContext), R(3),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
/* 47 S> */ B(LdaUndefined),
/* 47 E> */ B(StaCurrentContextSlot), U8(4),
/* 52 S> */ B(LdaModuleVariable), I8(-1), U8(1),
B(JumpIfNotHole), U8(11),
- B(LdaConstant), U8(1),
+ B(LdaConstant), U8(2),
B(Star), R(5),
B(CallRuntime), U16(Runtime::kThrowReferenceError), R(5), U8(1),
B(Star), R(4),
B(LdaSmi), I8(42),
B(Star), R(5),
- /* 52 E> */ B(CallUndefinedReceiver1), R(4), R(5), U8(4),
- B(StaContextSlot), R(1), U8(6), U8(0),
- B(PopContext), R(1),
- B(LdaCurrentContextSlot), U8(6),
- B(Star), R(4),
+ /* 52 E> */ B(CallUndefinedReceiver1), R(4), R(5), U8(5),
+ B(StaContextSlot), R(3), U8(5), U8(0),
+ B(PopContext), R(3),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(3),
B(LdaTrue),
- B(Star), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(4), U8(2),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(3), U8(2),
/* 64 S> */ B(Return),
]
constant pool: [
+ Smi [59],
FIXED_ARRAY_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["goo"],
FIXED_ARRAY_TYPE,
@@ -276,93 +261,88 @@ snippet: "
foo++;
{ let x; { foo++ } };
"
-frame size: 10
+frame size: 8
parameter count: 2
-bytecode array length: 185
+bytecode array length: 172
bytecodes: [
B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(27),
- B(CallRuntime), U16(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
- B(PushContext), R(3),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
B(ResumeGenerator), R(new_target),
- B(Star), R(2),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(2),
- B(JumpIfTrue), U8(64),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kAbort), R(4), U8(1),
- B(LdaSmi), I8(-2),
B(Star), R(2),
- B(LdaConstant), U8(0),
- B(Star), R(6),
- B(Mov), R(arg0), R(4),
- B(Mov), R(closure), R(5),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(4), U8(3),
- B(PushContext), R(0),
- B(Ldar), R(this),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(LdaSmi), I8(-2),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(2),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(StaCurrentContextSlot), U8(4),
/* 0 E> */ B(StackCheck),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(5),
- B(Mov), R(closure), R(4),
- /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(4), U8(2),
- B(StaCurrentContextSlot), U8(5),
+ B(Star), R(3),
+ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(4),
- B(LdaImmutableCurrentContextSlot), U8(5),
- B(Star), R(5),
B(LdaZero),
- B(SuspendGenerator), R(5), U8(0),
+ /* 0 E> */ B(SuspendGenerator), R(3), U8(0),
B(Ldar), R(4),
/* 49 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(5), U8(1),
- B(Star), R(6),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(5), U8(1),
- B(Star), R(7),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(3), U8(1),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
+ B(Star), R(5),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(7),
- B(JumpIfTrue), U8(24),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(22),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(7),
- B(JumpIfTrue), U8(15),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(13),
B(LdaTrue),
- B(Star), R(9),
- B(Mov), R(6), R(8),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(8), U8(2),
+ B(Star), R(7),
+ B(Mov), R(4), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(6), U8(2),
/* 49 S> */ B(Return),
- B(Ldar), R(6),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
/* 17 S> */ B(LdaSmi), I8(42),
/* 17 E> */ B(StaModuleVariable), I8(1), U8(0),
/* 21 S> */ B(LdaModuleVariable), I8(1), U8(0),
- B(Inc), U8(2),
+ B(Inc), U8(3),
/* 24 E> */ B(StaModuleVariable), I8(1), U8(0),
B(Ldar), R(closure),
- B(CreateBlockContext), U8(1),
- B(PushContext), R(1),
+ B(CreateBlockContext), U8(2),
+ B(PushContext), R(3),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
/* 34 S> */ B(LdaUndefined),
/* 34 E> */ B(StaCurrentContextSlot), U8(4),
/* 39 S> */ B(LdaModuleVariable), I8(1), U8(1),
- B(ToNumber), R(4), U8(3),
+ B(ToNumber), R(4), U8(4),
B(Ldar), R(4),
- B(Inc), U8(3),
+ B(Inc), U8(4),
/* 42 E> */ B(StaModuleVariable), I8(1), U8(1),
B(Ldar), R(4),
- B(StaContextSlot), R(1), U8(6), U8(0),
- B(PopContext), R(1),
- B(LdaCurrentContextSlot), U8(6),
- B(Star), R(4),
+ B(StaContextSlot), R(3), U8(5), U8(0),
+ B(PopContext), R(3),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(3),
B(LdaTrue),
- B(Star), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(4), U8(2),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(3), U8(2),
/* 49 S> */ B(Return),
]
constant pool: [
+ Smi [59],
FIXED_ARRAY_TYPE,
FIXED_ARRAY_TYPE,
]
@@ -375,95 +355,90 @@ snippet: "
foo++;
{ let x; { foo++ } };
"
-frame size: 10
+frame size: 8
parameter count: 2
-bytecode array length: 189
+bytecode array length: 176
bytecodes: [
B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(27),
- B(CallRuntime), U16(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
- B(PushContext), R(3),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
B(ResumeGenerator), R(new_target),
- B(Star), R(2),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(2),
- B(JumpIfTrue), U8(68),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kAbort), R(4), U8(1),
- B(LdaSmi), I8(-2),
B(Star), R(2),
- B(LdaConstant), U8(0),
- B(Star), R(6),
- B(Mov), R(arg0), R(4),
- B(Mov), R(closure), R(5),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(4), U8(3),
- B(PushContext), R(0),
- B(Ldar), R(this),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(LdaSmi), I8(-2),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(2),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
B(StaModuleVariable), I8(1), U8(0),
/* 0 E> */ B(StackCheck),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(5),
- B(Mov), R(closure), R(4),
- /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(4), U8(2),
- B(StaCurrentContextSlot), U8(5),
+ B(Star), R(3),
+ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(4),
- B(LdaImmutableCurrentContextSlot), U8(5),
- B(Star), R(5),
B(LdaZero),
- B(SuspendGenerator), R(5), U8(0),
+ /* 0 E> */ B(SuspendGenerator), R(3), U8(0),
B(Ldar), R(4),
/* 49 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(5), U8(1),
- B(Star), R(6),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(5), U8(1),
- B(Star), R(7),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(3), U8(1),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
+ B(Star), R(5),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(7),
- B(JumpIfTrue), U8(24),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(22),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(7),
- B(JumpIfTrue), U8(15),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(13),
B(LdaTrue),
- B(Star), R(9),
- B(Mov), R(6), R(8),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(8), U8(2),
+ B(Star), R(7),
+ B(Mov), R(4), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(6), U8(2),
/* 49 S> */ B(Return),
- B(Ldar), R(6),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
/* 17 S> */ B(LdaSmi), I8(42),
/* 17 E> */ B(StaModuleVariable), I8(1), U8(0),
/* 21 S> */ B(LdaModuleVariable), I8(1), U8(0),
- B(Inc), U8(2),
+ B(Inc), U8(3),
/* 24 E> */ B(StaModuleVariable), I8(1), U8(0),
B(Ldar), R(closure),
- B(CreateBlockContext), U8(1),
- B(PushContext), R(1),
+ B(CreateBlockContext), U8(2),
+ B(PushContext), R(3),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
/* 34 S> */ B(LdaUndefined),
/* 34 E> */ B(StaCurrentContextSlot), U8(4),
/* 39 S> */ B(LdaModuleVariable), I8(1), U8(1),
- B(ToNumber), R(4), U8(3),
+ B(ToNumber), R(4), U8(4),
B(Ldar), R(4),
- B(Inc), U8(3),
+ B(Inc), U8(4),
/* 42 E> */ B(StaModuleVariable), I8(1), U8(1),
B(Ldar), R(4),
- B(StaContextSlot), R(1), U8(6), U8(0),
- B(PopContext), R(1),
- B(LdaCurrentContextSlot), U8(6),
- B(Star), R(4),
+ B(StaContextSlot), R(3), U8(5), U8(0),
+ B(PopContext), R(3),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(3),
B(LdaTrue),
- B(Star), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(4), U8(2),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(3), U8(2),
/* 49 S> */ B(Return),
]
constant pool: [
+ Smi [63],
FIXED_ARRAY_TYPE,
FIXED_ARRAY_TYPE,
]
@@ -476,95 +451,90 @@ snippet: "
foo++;
{ let x; { foo++ } };
"
-frame size: 10
+frame size: 8
parameter count: 2
-bytecode array length: 193
+bytecode array length: 180
bytecodes: [
B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(27),
- B(CallRuntime), U16(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
- B(PushContext), R(3),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
B(ResumeGenerator), R(new_target),
- B(Star), R(2),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(2),
- B(JumpIfTrue), U8(68),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kAbort), R(4), U8(1),
- B(LdaSmi), I8(-2),
B(Star), R(2),
- B(LdaConstant), U8(0),
- B(Star), R(6),
- B(Mov), R(arg0), R(4),
- B(Mov), R(closure), R(5),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(4), U8(3),
- B(PushContext), R(0),
- B(Ldar), R(this),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(LdaSmi), I8(-2),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(2),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
B(StaModuleVariable), I8(1), U8(0),
/* 0 E> */ B(StackCheck),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(5),
- B(Mov), R(closure), R(4),
- /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(4), U8(2),
- B(StaCurrentContextSlot), U8(5),
+ B(Star), R(3),
+ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(4),
- B(LdaImmutableCurrentContextSlot), U8(5),
- B(Star), R(5),
B(LdaZero),
- B(SuspendGenerator), R(5), U8(0),
+ /* 0 E> */ B(SuspendGenerator), R(3), U8(0),
B(Ldar), R(4),
/* 51 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(2),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(5), U8(1),
- B(Star), R(6),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(5), U8(1),
- B(Star), R(7),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(3), U8(1),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
+ B(Star), R(5),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(7),
- B(JumpIfTrue), U8(24),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(22),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(7),
- B(JumpIfTrue), U8(15),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(13),
B(LdaTrue),
- B(Star), R(9),
- B(Mov), R(6), R(8),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(8), U8(2),
+ B(Star), R(7),
+ B(Mov), R(4), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(6), U8(2),
/* 51 S> */ B(Return),
- B(Ldar), R(6),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
/* 19 S> */ B(LdaSmi), I8(42),
/* 19 E> */ B(StaModuleVariable), I8(1), U8(0),
/* 23 S> */ B(LdaModuleVariable), I8(1), U8(0),
- B(Inc), U8(2),
+ B(Inc), U8(3),
/* 26 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
B(Ldar), R(closure),
- B(CreateBlockContext), U8(1),
- B(PushContext), R(1),
+ B(CreateBlockContext), U8(2),
+ B(PushContext), R(3),
B(LdaTheHole),
B(StaCurrentContextSlot), U8(4),
/* 36 S> */ B(LdaUndefined),
/* 36 E> */ B(StaCurrentContextSlot), U8(4),
/* 41 S> */ B(LdaModuleVariable), I8(1), U8(1),
- B(ToNumber), R(4), U8(3),
+ B(ToNumber), R(4), U8(4),
B(Ldar), R(4),
- B(Inc), U8(3),
+ B(Inc), U8(4),
/* 44 E> */ B(CallRuntime), U16(Runtime::kThrowConstAssignError), R(0), U8(0),
B(Ldar), R(4),
- B(StaContextSlot), R(1), U8(6), U8(0),
- B(PopContext), R(1),
- B(LdaCurrentContextSlot), U8(6),
- B(Star), R(4),
+ B(StaContextSlot), R(3), U8(5), U8(0),
+ B(PopContext), R(3),
+ B(LdaCurrentContextSlot), U8(5),
+ B(Star), R(3),
B(LdaTrue),
- B(Star), R(5),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(4), U8(2),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(3), U8(2),
/* 51 S> */ B(Return),
]
constant pool: [
+ Smi [63],
FIXED_ARRAY_TYPE,
FIXED_ARRAY_TYPE,
]
@@ -575,72 +545,66 @@ handlers: [
snippet: "
export default (function () {});
"
-frame size: 9
+frame size: 8
parameter count: 2
-bytecode array length: 154
+bytecode array length: 141
bytecodes: [
B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(27),
- B(CallRuntime), U16(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
- B(PushContext), R(2),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
B(ResumeGenerator), R(new_target),
- B(Star), R(1),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(1),
- B(JumpIfTrue), U8(68),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(LdaConstant), U8(0),
- B(Star), R(5),
- B(Mov), R(arg0), R(3),
- B(Mov), R(closure), R(4),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
- B(PushContext), R(0),
- B(Ldar), R(this),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(2),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
B(StaModuleVariable), I8(1), U8(0),
/* 0 E> */ B(StackCheck),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(4),
- B(Mov), R(closure), R(3),
- /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(3), U8(2),
- B(StaCurrentContextSlot), U8(5),
B(Star), R(3),
- B(LdaImmutableCurrentContextSlot), U8(5),
+ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(4),
B(LdaZero),
- B(SuspendGenerator), R(4), U8(0),
- B(Ldar), R(3),
+ /* 0 E> */ B(SuspendGenerator), R(3), U8(0),
+ B(Ldar), R(4),
/* 32 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(4), U8(1),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(3), U8(1),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
B(Star), R(5),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(4), U8(1),
- B(Star), R(6),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(6),
- B(JumpIfTrue), U8(24),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(22),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(6),
- B(JumpIfTrue), U8(15),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(13),
B(LdaTrue),
- B(Star), R(8),
- B(Mov), R(5), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(7), U8(2),
+ B(Star), R(7),
+ B(Mov), R(4), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(6), U8(2),
/* 32 S> */ B(Return),
- B(Ldar), R(5),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
- B(Ldar), R(5),
- B(StaCurrentContextSlot), U8(6),
- B(CreateClosure), U8(1), U8(2), U8(0),
+ B(Ldar), R(4),
+ B(StaCurrentContextSlot), U8(5),
+ B(CreateClosure), U8(2), U8(3), U8(0),
B(StaModuleVariable), I8(1), U8(0),
- B(LdaCurrentContextSlot), U8(6),
+ B(LdaCurrentContextSlot), U8(5),
B(Star), R(3),
B(LdaTrue),
B(Star), R(4),
@@ -648,6 +612,7 @@ bytecodes: [
/* 32 S> */ B(Return),
]
constant pool: [
+ Smi [63],
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
]
@@ -658,70 +623,64 @@ handlers: [
snippet: "
export default (class {});
"
-frame size: 9
+frame size: 8
parameter count: 2
-bytecode array length: 191
+bytecode array length: 174
bytecodes: [
B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(27),
- B(CallRuntime), U16(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
- B(PushContext), R(2),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
B(ResumeGenerator), R(new_target),
- B(Star), R(1),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(1),
- B(JumpIfTrue), U8(68),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(LdaConstant), U8(0),
- B(Star), R(5),
- B(Mov), R(arg0), R(3),
- B(Mov), R(closure), R(4),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
- B(PushContext), R(0),
- B(Ldar), R(this),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(2),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(StaCurrentContextSlot), U8(4),
B(LdaTheHole),
B(StaModuleVariable), I8(1), U8(0),
/* 0 E> */ B(StackCheck),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(4),
- B(Mov), R(closure), R(3),
- /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(3), U8(2),
- B(StaCurrentContextSlot), U8(5),
B(Star), R(3),
- B(LdaImmutableCurrentContextSlot), U8(5),
+ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(4),
B(LdaZero),
- B(SuspendGenerator), R(4), U8(0),
- B(Ldar), R(3),
+ /* 0 E> */ B(SuspendGenerator), R(3), U8(0),
+ B(Ldar), R(4),
/* 26 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(4), U8(1),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(3), U8(1),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
B(Star), R(5),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(4), U8(1),
- B(Star), R(6),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(6),
- B(JumpIfTrue), U8(24),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(22),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(6),
- B(JumpIfTrue), U8(15),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(13),
B(LdaTrue),
- B(Star), R(8),
- B(Mov), R(5), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(7), U8(2),
+ B(Star), R(7),
+ B(Mov), R(4), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(6), U8(2),
/* 26 S> */ B(Return),
- B(Ldar), R(5),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
- B(Ldar), R(5),
- B(StaCurrentContextSlot), U8(7),
- /* 16 S> */ B(CreateClosure), U8(1), U8(2), U8(0),
+ B(Ldar), R(4),
+ B(StaCurrentContextSlot), U8(5),
+ B(CreateClosure), U8(2), U8(3), U8(0),
B(Star), R(3),
B(LdaTheHole),
B(Star), R(4),
@@ -734,10 +693,8 @@ bytecodes: [
B(Star), R(4),
B(CallRuntime), U16(Runtime::kInstallClassNameAccessor), R(3), U8(1),
B(CallRuntime), U16(Runtime::kToFastProperties), R(3), U8(1),
- B(StaCurrentContextSlot), U8(6),
- B(LdaCurrentContextSlot), U8(6),
- /* 16 E> */ B(StaModuleVariable), I8(1), U8(0),
- B(LdaCurrentContextSlot), U8(7),
+ B(StaModuleVariable), I8(1), U8(0),
+ B(LdaCurrentContextSlot), U8(5),
B(Star), R(3),
B(LdaTrue),
B(Star), R(4),
@@ -745,6 +702,7 @@ bytecodes: [
/* 26 S> */ B(Return),
]
constant pool: [
+ Smi [63],
FIXED_ARRAY_TYPE,
SHARED_FUNCTION_INFO_TYPE,
]
@@ -755,68 +713,62 @@ handlers: [
snippet: "
export {foo as goo} from \"bar\"
"
-frame size: 9
+frame size: 8
parameter count: 2
-bytecode array length: 143
+bytecode array length: 130
bytecodes: [
B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(27),
- B(CallRuntime), U16(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
- B(PushContext), R(2),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
B(ResumeGenerator), R(new_target),
- B(Star), R(1),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(1),
- B(JumpIfTrue), U8(64),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(LdaConstant), U8(0),
- B(Star), R(5),
- B(Mov), R(arg0), R(3),
- B(Mov), R(closure), R(4),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
- B(PushContext), R(0),
- B(Ldar), R(this),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(2),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(StaCurrentContextSlot), U8(4),
/* 0 E> */ B(StackCheck),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(4),
- B(Mov), R(closure), R(3),
- /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(3), U8(2),
- B(StaCurrentContextSlot), U8(5),
B(Star), R(3),
- B(LdaImmutableCurrentContextSlot), U8(5),
+ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(4),
B(LdaZero),
- B(SuspendGenerator), R(4), U8(0),
- B(Ldar), R(3),
+ /* 0 E> */ B(SuspendGenerator), R(3), U8(0),
+ B(Ldar), R(4),
/* 30 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(4), U8(1),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(3), U8(1),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
B(Star), R(5),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(4), U8(1),
- B(Star), R(6),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(6),
- B(JumpIfTrue), U8(24),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(22),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(6),
- B(JumpIfTrue), U8(15),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(13),
B(LdaTrue),
- B(Star), R(8),
- B(Mov), R(5), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(7), U8(2),
+ B(Star), R(7),
+ B(Mov), R(4), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(6), U8(2),
/* 30 S> */ B(Return),
- B(Ldar), R(5),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
- B(Ldar), R(5),
- B(StaCurrentContextSlot), U8(6),
- B(LdaCurrentContextSlot), U8(6),
+ B(Ldar), R(4),
+ B(StaCurrentContextSlot), U8(5),
+ B(LdaCurrentContextSlot), U8(5),
B(Star), R(3),
B(LdaTrue),
B(Star), R(4),
@@ -824,6 +776,7 @@ bytecodes: [
/* 30 S> */ B(Return),
]
constant pool: [
+ Smi [59],
FIXED_ARRAY_TYPE,
]
handlers: [
@@ -833,68 +786,62 @@ handlers: [
snippet: "
export * from \"bar\"
"
-frame size: 9
+frame size: 8
parameter count: 2
-bytecode array length: 143
+bytecode array length: 130
bytecodes: [
B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(27),
- B(CallRuntime), U16(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
- B(PushContext), R(2),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
B(ResumeGenerator), R(new_target),
- B(Star), R(1),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(1),
- B(JumpIfTrue), U8(64),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(LdaConstant), U8(0),
- B(Star), R(5),
- B(Mov), R(arg0), R(3),
- B(Mov), R(closure), R(4),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
- B(PushContext), R(0),
- B(Ldar), R(this),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(2),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(StaCurrentContextSlot), U8(4),
/* 0 E> */ B(StackCheck),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(4),
- B(Mov), R(closure), R(3),
- /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(3), U8(2),
- B(StaCurrentContextSlot), U8(5),
B(Star), R(3),
- B(LdaImmutableCurrentContextSlot), U8(5),
+ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(4),
B(LdaZero),
- B(SuspendGenerator), R(4), U8(0),
- B(Ldar), R(3),
+ /* 0 E> */ B(SuspendGenerator), R(3), U8(0),
+ B(Ldar), R(4),
/* 19 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(4), U8(1),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(3), U8(1),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
B(Star), R(5),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(4), U8(1),
- B(Star), R(6),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(6),
- B(JumpIfTrue), U8(24),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(22),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(6),
- B(JumpIfTrue), U8(15),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(13),
B(LdaTrue),
- B(Star), R(8),
- B(Mov), R(5), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(7), U8(2),
+ B(Star), R(7),
+ B(Mov), R(4), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(6), U8(2),
/* 19 S> */ B(Return),
- B(Ldar), R(5),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
- B(Ldar), R(5),
- B(StaCurrentContextSlot), U8(6),
- B(LdaCurrentContextSlot), U8(6),
+ B(Ldar), R(4),
+ B(StaCurrentContextSlot), U8(5),
+ B(LdaCurrentContextSlot), U8(5),
B(Star), R(3),
B(LdaTrue),
B(Star), R(4),
@@ -902,6 +849,7 @@ bytecodes: [
/* 19 S> */ B(Return),
]
constant pool: [
+ Smi [59],
FIXED_ARRAY_TYPE,
]
handlers: [
@@ -912,82 +860,76 @@ snippet: "
import * as foo from \"bar\"
foo.f(foo, foo.x);
"
-frame size: 9
+frame size: 8
parameter count: 2
-bytecode array length: 181
+bytecode array length: 168
bytecodes: [
B(Ldar), R(new_target),
- B(JumpIfUndefined), U8(27),
- B(CallRuntime), U16(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
- B(PushContext), R(2),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
B(ResumeGenerator), R(new_target),
- B(Star), R(1),
- B(LdaZero),
- B(TestEqualStrictNoFeedback), R(1),
- B(JumpIfTrue), U8(74),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
B(LdaSmi), I8(79),
- B(Star), R(3),
- B(CallRuntime), U16(Runtime::kAbort), R(3), U8(1),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(LdaConstant), U8(0),
- B(Star), R(5),
- B(Mov), R(arg0), R(3),
- B(Mov), R(closure), R(4),
- B(CallRuntime), U16(Runtime::kPushModuleContext), R(3), U8(3),
- B(PushContext), R(0),
- B(Ldar), R(this),
+ B(Star), R(0),
+ B(LdaConstant), U8(1),
+ B(Star), R(4),
+ B(Mov), R(arg0), R(2),
+ B(Mov), R(closure), R(3),
+ B(CallRuntime), U16(Runtime::kPushModuleContext), R(2), U8(3),
+ B(PushContext), R(2),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
B(StaCurrentContextSlot), U8(4),
B(LdaZero),
B(Star), R(3),
B(CallRuntime), U16(Runtime::kGetModuleNamespace), R(3), U8(1),
- B(StaCurrentContextSlot), U8(6),
+ B(StaCurrentContextSlot), U8(5),
/* 0 E> */ B(StackCheck),
B(LdaImmutableCurrentContextSlot), U8(4),
- B(Star), R(4),
- B(Mov), R(closure), R(3),
- /* 0 E> */ B(CallRuntime), U16(Runtime::kCreateJSGeneratorObject), R(3), U8(2),
- B(StaCurrentContextSlot), U8(5),
B(Star), R(3),
- B(LdaImmutableCurrentContextSlot), U8(5),
+ B(LdaImmutableCurrentContextSlot), U8(4),
B(Star), R(4),
B(LdaZero),
- B(SuspendGenerator), R(4), U8(0),
- B(Ldar), R(3),
+ /* 0 E> */ B(SuspendGenerator), R(3), U8(0),
+ B(Ldar), R(4),
/* 45 S> */ B(Return),
B(LdaSmi), I8(-2),
- B(Star), R(1),
- B(CallRuntime), U16(Runtime::k_GeneratorGetInputOrDebugPos), R(4), U8(1),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(3), U8(1),
+ B(Star), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(3), U8(1),
B(Star), R(5),
- B(CallRuntime), U16(Runtime::k_GeneratorGetResumeMode), R(4), U8(1),
- B(Star), R(6),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(6),
- B(JumpIfTrue), U8(24),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(22),
B(LdaSmi), I8(2),
- B(TestEqualStrictNoFeedback), R(6),
- B(JumpIfTrue), U8(15),
- B(Jump), U8(2),
+ B(TestEqualStrictNoFeedback), R(5),
+ B(JumpIfTrue), U8(13),
B(LdaTrue),
- B(Star), R(8),
- B(Mov), R(5), R(7),
- B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(7), U8(2),
+ B(Star), R(7),
+ B(Mov), R(4), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(6), U8(2),
/* 45 S> */ B(Return),
- B(Ldar), R(5),
+ B(Ldar), R(4),
/* 0 E> */ B(Throw),
- /* 27 S> */ B(LdaImmutableCurrentContextSlot), U8(6),
+ /* 27 S> */ B(LdaImmutableCurrentContextSlot), U8(5),
B(Star), R(4),
- /* 30 E> */ B(LdaNamedProperty), R(4), U8(1), U8(4),
+ /* 30 E> */ B(LdaNamedProperty), R(4), U8(2), U8(5),
B(Star), R(3),
- B(LdaImmutableCurrentContextSlot), U8(6),
+ B(LdaImmutableCurrentContextSlot), U8(5),
B(Star), R(5),
- B(LdaImmutableCurrentContextSlot), U8(6),
+ B(LdaImmutableCurrentContextSlot), U8(5),
B(Star), R(6),
- /* 41 E> */ B(LdaNamedProperty), R(6), U8(2), U8(6),
+ /* 41 E> */ B(LdaNamedProperty), R(6), U8(3), U8(7),
B(Star), R(6),
- /* 31 E> */ B(CallProperty2), R(3), R(4), R(5), R(6), U8(2),
- B(StaCurrentContextSlot), U8(7),
- B(LdaCurrentContextSlot), U8(7),
+ /* 31 E> */ B(CallProperty2), R(3), R(4), R(5), R(6), U8(3),
+ B(StaCurrentContextSlot), U8(6),
+ B(LdaCurrentContextSlot), U8(6),
B(Star), R(3),
B(LdaTrue),
B(Star), R(4),
@@ -995,6 +937,7 @@ bytecodes: [
/* 45 S> */ B(Return),
]
constant pool: [
+ Smi [69],
FIXED_ARRAY_TYPE,
ONE_BYTE_INTERNALIZED_STRING_TYPE ["f"],
ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
index e77314533c..9c5a9f0158 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/NewAndSpread.golden
@@ -10,41 +10,36 @@ snippet: "
class A { constructor(...args) { this.args = args; } }
new A(...[1, 2, 3]);
"
-frame size: 8
+frame size: 7
parameter count: 1
-bytecode array length: 64
+bytecode array length: 56
bytecodes: [
- B(LdaTheHole),
- B(Star), R(2),
/* 30 E> */ B(StackCheck),
+ B(CreateClosure), U8(0), U8(3), U8(2),
+ B(Star), R(2),
B(LdaTheHole),
- B(Star), R(0),
- /* 34 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
B(Star), R(3),
- B(LdaTheHole),
- B(Star), R(4),
B(LdaSmi), I8(34),
- B(Star), R(6),
+ B(Star), R(5),
B(LdaSmi), I8(88),
- B(Star), R(7),
- B(Mov), R(3), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kInstallClassNameAccessor), R(3), U8(1),
- B(CallRuntime), U16(Runtime::kToFastProperties), R(3), U8(1),
+ B(Star), R(6),
+ B(Mov), R(2), R(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kInstallClassNameAccessor), R(2), U8(1),
+ B(CallRuntime), U16(Runtime::kToFastProperties), R(2), U8(1),
B(Star), R(0),
B(Star), R(1),
- B(Star), R(2),
- /* 89 S> */ B(CreateArrayLiteral), U8(1), U8(5), U8(9),
- B(Star), R(4),
- B(Ldar), R(2),
- /* 89 E> */ B(ConstructWithSpread), R(2), R(4), U8(1),
+ /* 89 S> */ B(CreateArrayLiteral), U8(1), U8(6), U8(17),
+ B(Star), R(3),
+ B(Ldar), R(1),
+ /* 89 E> */ B(ConstructWithSpread), R(1), R(3), U8(1),
B(LdaUndefined),
/* 110 S> */ B(Return),
]
constant pool: [
SHARED_FUNCTION_INFO_TYPE,
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
]
handlers: [
]
@@ -54,43 +49,38 @@ snippet: "
class A { constructor(...args) { this.args = args; } }
new A(0, ...[1, 2, 3]);
"
-frame size: 8
+frame size: 7
parameter count: 1
-bytecode array length: 67
+bytecode array length: 59
bytecodes: [
- B(LdaTheHole),
- B(Star), R(2),
/* 30 E> */ B(StackCheck),
+ B(CreateClosure), U8(0), U8(3), U8(2),
+ B(Star), R(2),
B(LdaTheHole),
- B(Star), R(0),
- /* 34 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
B(Star), R(3),
- B(LdaTheHole),
- B(Star), R(4),
B(LdaSmi), I8(34),
- B(Star), R(6),
+ B(Star), R(5),
B(LdaSmi), I8(88),
- B(Star), R(7),
- B(Mov), R(3), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kInstallClassNameAccessor), R(3), U8(1),
- B(CallRuntime), U16(Runtime::kToFastProperties), R(3), U8(1),
+ B(Star), R(6),
+ B(Mov), R(2), R(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kInstallClassNameAccessor), R(2), U8(1),
+ B(CallRuntime), U16(Runtime::kToFastProperties), R(2), U8(1),
B(Star), R(0),
B(Star), R(1),
- B(Star), R(2),
/* 89 S> */ B(LdaZero),
+ B(Star), R(3),
+ B(CreateArrayLiteral), U8(1), U8(6), U8(17),
B(Star), R(4),
- B(CreateArrayLiteral), U8(1), U8(5), U8(9),
- B(Star), R(5),
- B(Ldar), R(2),
- /* 89 E> */ B(ConstructWithSpread), R(2), R(4), U8(2),
+ B(Ldar), R(1),
+ /* 89 E> */ B(ConstructWithSpread), R(1), R(3), U8(2),
B(LdaUndefined),
/* 113 S> */ B(Return),
]
constant pool: [
SHARED_FUNCTION_INFO_TYPE,
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
]
handlers: [
]
@@ -100,57 +90,52 @@ snippet: "
class A { constructor(...args) { this.args = args; } }
new A(0, ...[1, 2, 3], 4);
"
-frame size: 9
+frame size: 8
parameter count: 1
-bytecode array length: 98
+bytecode array length: 90
bytecodes: [
- B(LdaTheHole),
- B(Star), R(2),
/* 30 E> */ B(StackCheck),
+ B(CreateClosure), U8(0), U8(3), U8(2),
+ B(Star), R(2),
B(LdaTheHole),
- B(Star), R(0),
- /* 34 S> */ B(CreateClosure), U8(0), U8(2), U8(2),
B(Star), R(3),
- B(LdaTheHole),
- B(Star), R(4),
B(LdaSmi), I8(34),
- B(Star), R(6),
+ B(Star), R(5),
B(LdaSmi), I8(88),
- B(Star), R(7),
- B(Mov), R(3), R(5),
- B(CallRuntime), U16(Runtime::kDefineClass), R(4), U8(4),
- B(Star), R(4),
- B(CallRuntime), U16(Runtime::kInstallClassNameAccessor), R(3), U8(1),
- B(CallRuntime), U16(Runtime::kToFastProperties), R(3), U8(1),
+ B(Star), R(6),
+ B(Mov), R(2), R(4),
+ B(CallRuntime), U16(Runtime::kDefineClass), R(3), U8(4),
+ B(Star), R(3),
+ B(CallRuntime), U16(Runtime::kInstallClassNameAccessor), R(2), U8(1),
+ B(CallRuntime), U16(Runtime::kToFastProperties), R(2), U8(1),
B(Star), R(0),
B(Star), R(1),
- B(Star), R(2),
/* 89 S> */ B(LdaUndefined),
- B(Star), R(3),
+ B(Star), R(2),
B(LdaUndefined),
+ B(Star), R(4),
+ /* 93 E> */ B(CreateArrayLiteral), U8(1), U8(4), U8(17),
B(Star), R(5),
- /* 93 E> */ B(CreateArrayLiteral), U8(1), U8(3), U8(9),
- B(Star), R(6),
B(LdaUndefined),
+ B(Star), R(6),
+ B(CreateArrayLiteral), U8(2), U8(5), U8(17),
B(Star), R(7),
- B(CreateArrayLiteral), U8(2), U8(4), U8(9),
- B(Star), R(8),
- B(CallJSRuntime), U8(%spread_iterable), R(7), U8(2),
+ B(CallJSRuntime), U8(%spread_iterable), R(6), U8(2),
+ B(Star), R(6),
+ B(CreateArrayLiteral), U8(3), U8(6), U8(17),
B(Star), R(7),
- B(CreateArrayLiteral), U8(3), U8(5), U8(9),
- B(Star), R(8),
- B(CallJSRuntime), U8(%spread_arguments), R(5), U8(4),
- B(Star), R(5),
- B(Mov), R(1), R(4),
- B(CallJSRuntime), U8(%reflect_construct), R(3), U8(3),
+ B(CallJSRuntime), U8(%spread_arguments), R(4), U8(4),
+ B(Star), R(4),
+ B(Mov), R(0), R(3),
+ B(CallJSRuntime), U8(%reflect_construct), R(2), U8(3),
B(LdaUndefined),
/* 116 S> */ B(Return),
]
constant pool: [
SHARED_FUNCTION_INFO_TYPE,
- CONSTANT_ELEMENTS_PAIR_TYPE,
- CONSTANT_ELEMENTS_PAIR_TYPE,
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
+ TUPLE2_TYPE,
+ TUPLE2_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
index 7a31554ac7..5b1046054b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiterals.golden
@@ -14,7 +14,7 @@ parameter count: 1
bytecode array length: 9
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(35), R(0),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(19), R(0),
B(Ldar), R(0),
/* 46 S> */ B(Return),
]
@@ -33,7 +33,7 @@ parameter count: 1
bytecode array length: 9
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(0),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(0),
B(Ldar), R(0),
/* 71 S> */ B(Return),
]
@@ -54,8 +54,8 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(1),
- /* 75 E> */ B(StaNamedOwnProperty), R(1), U8(1), U8(3),
+ /* 45 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(1),
+ /* 75 E> */ B(StaNamedOwnProperty), R(1), U8(1), U8(4),
B(Ldar), R(1),
/* 80 S> */ B(Return),
]
@@ -77,9 +77,9 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(1),
- /* 69 E> */ B(AddSmi), I8(1), U8(2),
- B(StaNamedOwnProperty), R(1), U8(1), U8(4),
+ /* 45 S> */ B(CreateObjectLiteral), U8(0), U8(4), U8(1), R(1),
+ /* 69 E> */ B(AddSmi), I8(1), U8(3),
+ B(StaNamedOwnProperty), R(1), U8(1), U8(5),
B(Ldar), R(1),
/* 76 S> */ B(Return),
]
@@ -99,9 +99,9 @@ parameter count: 1
bytecode array length: 17
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(0),
- B(CreateClosure), U8(1), U8(2), U8(2),
- B(StaNamedOwnProperty), R(0), U8(2), U8(4),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(4), U8(1), R(0),
+ B(CreateClosure), U8(1), U8(3), U8(2),
+ B(StaNamedOwnProperty), R(0), U8(2), U8(5),
B(Ldar), R(0),
/* 67 S> */ B(Return),
]
@@ -122,9 +122,9 @@ parameter count: 1
bytecode array length: 17
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(0),
- B(CreateClosure), U8(1), U8(2), U8(2),
- B(StaNamedOwnProperty), R(0), U8(2), U8(4),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(4), U8(1), R(0),
+ B(CreateClosure), U8(1), U8(3), U8(2),
+ B(StaNamedOwnProperty), R(0), U8(2), U8(5),
B(Ldar), R(0),
/* 68 S> */ B(Return),
]
@@ -145,10 +145,10 @@ parameter count: 1
bytecode array length: 33
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(0),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(4), U8(1), R(0),
B(LdaConstant), U8(1),
B(Star), R(2),
- B(CreateClosure), U8(2), U8(2), U8(2),
+ B(CreateClosure), U8(2), U8(3), U8(2),
B(Star), R(3),
B(LdaNull),
B(Star), R(4),
@@ -176,12 +176,12 @@ parameter count: 1
bytecode array length: 36
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(4), U8(1), R(0),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(5), U8(1), R(0),
B(LdaConstant), U8(1),
B(Star), R(2),
- B(CreateClosure), U8(2), U8(2), U8(2),
+ B(CreateClosure), U8(2), U8(3), U8(2),
B(Star), R(3),
- B(CreateClosure), U8(3), U8(3), U8(2),
+ B(CreateClosure), U8(3), U8(4), U8(2),
B(Star), R(4),
B(LdaZero),
B(Star), R(5),
@@ -208,12 +208,12 @@ parameter count: 1
bytecode array length: 33
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(0),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(4), U8(1), R(0),
B(LdaConstant), U8(1),
B(Star), R(2),
B(LdaNull),
B(Star), R(3),
- B(CreateClosure), U8(2), U8(2), U8(2),
+ B(CreateClosure), U8(2), U8(3), U8(2),
B(Star), R(4),
B(LdaZero),
B(Star), R(5),
@@ -241,7 +241,7 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
- /* 45 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(1),
+ /* 45 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(1),
B(LdaSmi), I8(1),
B(Star), R(3),
B(LdaZero),
@@ -262,17 +262,13 @@ handlers: [
snippet: "
return { __proto__: null };
"
-frame size: 3
+frame size: 1
parameter count: 1
-bytecode array length: 20
+bytecode array length: 9
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(35), R(0),
- B(LdaNull),
- B(Star), R(2),
- B(Mov), R(0), R(1),
- B(CallRuntime), U16(Runtime::kInternalSetPrototype), R(1), U8(2),
- B(Ldar), R(1),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(27), R(0),
+ B(Ldar), R(0),
/* 62 S> */ B(Return),
]
constant pool: [
@@ -285,18 +281,17 @@ handlers: [
snippet: "
var a = 'test'; return { [a]: 1 };
"
-frame size: 4
+frame size: 3
parameter count: 1
-bytecode array length: 24
+bytecode array length: 22
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
- /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(2), U8(35), R(1),
+ /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(3), U8(19), R(1),
/* 60 E> */ B(ToName), R(2),
B(LdaSmi), I8(1),
- B(Star), R(3),
- B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(3),
+ B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(4),
B(Ldar), R(1),
/* 69 S> */ B(Return),
]
@@ -311,19 +306,18 @@ handlers: [
snippet: "
var a = 'test'; return { val: a, [a]: 1 };
"
-frame size: 4
+frame size: 3
parameter count: 1
-bytecode array length: 28
+bytecode array length: 26
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
- /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(2), U8(1), R(1),
- /* 64 E> */ B(StaNamedOwnProperty), R(1), U8(2), U8(3),
+ /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(3), U8(1), R(1),
+ /* 64 E> */ B(StaNamedOwnProperty), R(1), U8(2), U8(4),
/* 68 E> */ B(ToName), R(2),
B(LdaSmi), I8(1),
- B(Star), R(3),
- B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(5),
+ B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(6),
B(Ldar), R(1),
/* 77 S> */ B(Return),
]
@@ -346,12 +340,12 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
- /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(3), U8(35), R(1),
+ /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(4), U8(19), R(1),
/* 60 E> */ B(ToName), R(2),
B(LdaSmi), I8(1),
+ B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(5),
+ B(CreateObjectLiteral), U8(1), U8(3), U8(19), R(4),
B(Star), R(3),
- B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(4),
- B(CreateObjectLiteral), U8(1), U8(2), U8(35), R(4),
B(Mov), R(1), R(2),
B(Mov), R(4), R(3),
B(CallRuntime), U16(Runtime::kInternalSetPrototype), R(2), U8(2),
@@ -376,14 +370,14 @@ bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaConstant), U8(0),
B(Star), R(0),
- /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(4), U8(35), R(1),
+ /* 50 S> */ B(CreateObjectLiteral), U8(1), U8(5), U8(19), R(1),
/* 60 E> */ B(ToName), R(2),
B(LdaConstant), U8(2),
+ B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(6),
B(Star), R(3),
- B(StaDataPropertyInLiteral), R(1), R(2), U8(0), U8(5),
B(LdaConstant), U8(3),
B(Star), R(3),
- B(CreateClosure), U8(4), U8(2), U8(2),
+ B(CreateClosure), U8(4), U8(3), U8(2),
B(Star), R(4),
B(LdaZero),
B(Star), R(5),
@@ -391,7 +385,7 @@ bytecodes: [
B(CallRuntime), U16(Runtime::kDefineGetterPropertyUnchecked), R(2), U8(4),
B(LdaConstant), U8(3),
B(Star), R(3),
- B(CreateClosure), U8(5), U8(3), U8(2),
+ B(CreateClosure), U8(5), U8(4), U8(2),
B(Star), R(4),
B(LdaZero),
B(Star), R(5),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden
index e1f320e397..54f00f400d 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/ObjectLiteralsWide.golden
@@ -783,7 +783,7 @@ bytecodes: [
B(Star), R(0),
/* 2591 S> */ B(LdaConstant), U8(255),
B(Star), R(0),
- /* 2601 S> */ B(Wide), B(CreateObjectLiteral), U16(256), U16(2), U8(1), R16(1),
+ /* 2601 S> */ B(Wide), B(CreateObjectLiteral), U16(256), U16(3), U8(1), R16(1),
B(Ldar), R(1),
/* 2638 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden
index d72a32aef4..474a5c8c92 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/OuterContextVariables.golden
@@ -26,7 +26,7 @@ bytecodes: [
/* 102 S> */ B(LdaImmutableContextSlot), R(context), U8(4), U8(1),
B(Star), R(0),
B(LdaImmutableCurrentContextSlot), U8(4),
- /* 118 E> */ B(Mul), R(0), U8(2),
+ /* 118 E> */ B(Mul), R(0), U8(3),
/* 130 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden
index 62c7c14efd..7784d86192 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrimitiveExpressions.golden
@@ -36,7 +36,7 @@ bytecodes: [
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(Nop),
- /* 54 E> */ B(AddSmi), I8(3), U8(2),
+ /* 54 E> */ B(AddSmi), I8(3), U8(3),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -58,7 +58,7 @@ bytecodes: [
/* 45 S> */ B(LdaSmi), I8(3),
B(Star), R(1),
B(Ldar), R(0),
- /* 54 E> */ B(Add), R(1), U8(2),
+ /* 54 E> */ B(Add), R(1), U8(3),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -78,7 +78,7 @@ bytecodes: [
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 45 S> */ B(Nop),
- /* 54 E> */ B(SubSmi), I8(3), U8(2),
+ /* 54 E> */ B(SubSmi), I8(3), U8(3),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -100,7 +100,7 @@ bytecodes: [
/* 45 S> */ B(LdaSmi), I8(3),
B(Star), R(1),
B(Ldar), R(0),
- /* 54 E> */ B(Sub), R(1), U8(2),
+ /* 54 E> */ B(Sub), R(1), U8(3),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -120,7 +120,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), I8(4),
B(Star), R(0),
/* 45 S> */ B(Nop),
- /* 54 E> */ B(MulSmi), I8(3), U8(2),
+ /* 54 E> */ B(MulSmi), I8(3), U8(3),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -140,7 +140,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), I8(4),
B(Star), R(0),
/* 45 S> */ B(Nop),
- /* 54 E> */ B(MulSmi), I8(3), U8(2),
+ /* 54 E> */ B(MulSmi), I8(3), U8(3),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -160,7 +160,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), I8(4),
B(Star), R(0),
/* 45 S> */ B(Nop),
- /* 54 E> */ B(DivSmi), I8(3), U8(2),
+ /* 54 E> */ B(DivSmi), I8(3), U8(3),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -182,7 +182,7 @@ bytecodes: [
/* 45 S> */ B(LdaSmi), I8(3),
B(Star), R(1),
B(Ldar), R(0),
- /* 54 E> */ B(Div), R(1), U8(2),
+ /* 54 E> */ B(Div), R(1), U8(3),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -202,7 +202,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), I8(4),
B(Star), R(0),
/* 45 S> */ B(Nop),
- /* 54 E> */ B(ModSmi), I8(3), U8(2),
+ /* 54 E> */ B(ModSmi), I8(3), U8(3),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -224,7 +224,7 @@ bytecodes: [
/* 45 S> */ B(LdaSmi), I8(3),
B(Star), R(1),
B(Ldar), R(0),
- /* 54 E> */ B(Mod), R(1), U8(2),
+ /* 54 E> */ B(Mod), R(1), U8(3),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -244,7 +244,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(Nop),
- /* 54 E> */ B(BitwiseOrSmi), I8(2), U8(2),
+ /* 54 E> */ B(BitwiseOrSmi), I8(2), U8(3),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -264,7 +264,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(Nop),
- /* 54 E> */ B(BitwiseOrSmi), I8(2), U8(2),
+ /* 54 E> */ B(BitwiseOrSmi), I8(2), U8(3),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -284,7 +284,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(Nop),
- /* 54 E> */ B(BitwiseXorSmi), I8(2), U8(2),
+ /* 54 E> */ B(BitwiseXorSmi), I8(2), U8(3),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -304,7 +304,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(Nop),
- /* 54 E> */ B(BitwiseXorSmi), I8(2), U8(2),
+ /* 54 E> */ B(BitwiseXorSmi), I8(2), U8(3),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -324,7 +324,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(Nop),
- /* 54 E> */ B(BitwiseAndSmi), I8(2), U8(2),
+ /* 54 E> */ B(BitwiseAndSmi), I8(2), U8(3),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -344,7 +344,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 45 S> */ B(Nop),
- /* 54 E> */ B(BitwiseAndSmi), I8(2), U8(2),
+ /* 54 E> */ B(BitwiseAndSmi), I8(2), U8(3),
/* 59 S> */ B(Return),
]
constant pool: [
@@ -364,7 +364,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
/* 46 S> */ B(Nop),
- /* 55 E> */ B(ShiftLeftSmi), I8(3), U8(2),
+ /* 55 E> */ B(ShiftLeftSmi), I8(3), U8(3),
/* 61 S> */ B(Return),
]
constant pool: [
@@ -386,7 +386,7 @@ bytecodes: [
/* 46 S> */ B(LdaSmi), I8(3),
B(Star), R(1),
B(Ldar), R(0),
- /* 55 E> */ B(ShiftLeft), R(1), U8(2),
+ /* 55 E> */ B(ShiftLeft), R(1), U8(3),
/* 61 S> */ B(Return),
]
constant pool: [
@@ -406,7 +406,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
/* 46 S> */ B(Nop),
- /* 55 E> */ B(ShiftRightSmi), I8(3), U8(2),
+ /* 55 E> */ B(ShiftRightSmi), I8(3), U8(3),
/* 61 S> */ B(Return),
]
constant pool: [
@@ -428,7 +428,7 @@ bytecodes: [
/* 46 S> */ B(LdaSmi), I8(3),
B(Star), R(1),
B(Ldar), R(0),
- /* 55 E> */ B(ShiftRight), R(1), U8(2),
+ /* 55 E> */ B(ShiftRight), R(1), U8(3),
/* 61 S> */ B(Return),
]
constant pool: [
@@ -448,7 +448,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), I8(10),
B(Star), R(0),
/* 46 S> */ B(Nop),
- /* 55 E> */ B(ShiftRightLogicalSmi), I8(3), U8(2),
+ /* 55 E> */ B(ShiftRightLogicalSmi), I8(3), U8(3),
/* 62 S> */ B(Return),
]
constant pool: [
@@ -470,7 +470,7 @@ bytecodes: [
/* 46 S> */ B(LdaSmi), I8(3),
B(Star), R(1),
B(Ldar), R(0),
- /* 55 E> */ B(ShiftRightLogical), R(1), U8(2),
+ /* 55 E> */ B(ShiftRightLogical), R(1), U8(3),
/* 62 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
index 2feef5ed76..8bf592611c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyCall.golden
@@ -17,9 +17,9 @@ bytecode array length: 13
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(Nop),
- /* 24 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(4),
+ /* 24 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(5),
B(Star), R(0),
- /* 25 E> */ B(CallProperty0), R(0), R(arg0), U8(2),
+ /* 25 E> */ B(CallProperty0), R(0), R(arg0), U8(3),
/* 33 S> */ B(Return),
]
constant pool: [
@@ -39,9 +39,9 @@ bytecode array length: 15
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 22 S> */ B(Nop),
- /* 30 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(4),
+ /* 30 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(5),
B(Star), R(0),
- /* 31 E> */ B(CallProperty2), R(0), R(arg0), R(arg1), R(arg2), U8(2),
+ /* 31 E> */ B(CallProperty2), R(0), R(arg0), R(arg1), R(arg2), U8(3),
/* 43 S> */ B(Return),
]
constant pool: [
@@ -61,12 +61,12 @@ bytecode array length: 22
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 19 S> */ B(Nop),
- /* 27 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(4),
+ /* 27 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(5),
B(Star), R(0),
B(Ldar), R(arg1),
- /* 35 E> */ B(Add), R(arg1), U8(6),
+ /* 35 E> */ B(Add), R(arg1), U8(7),
B(Star), R(2),
- /* 28 E> */ B(CallProperty2), R(0), R(arg0), R(2), R(arg1), U8(2),
+ /* 28 E> */ B(CallProperty2), R(0), R(arg0), R(2), R(arg1), U8(3),
/* 44 S> */ B(Return),
]
constant pool: [
@@ -215,265 +215,265 @@ bytecode array length: 665
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 17 S> */ B(Nop),
- /* 18 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(2),
+ /* 18 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(3),
/* 26 S> */ B(Nop),
- /* 27 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(4),
+ /* 27 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(5),
/* 35 S> */ B(Nop),
- /* 36 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(6),
+ /* 36 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(7),
/* 44 S> */ B(Nop),
- /* 45 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(8),
+ /* 45 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(9),
/* 53 S> */ B(Nop),
- /* 54 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(10),
+ /* 54 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(11),
/* 62 S> */ B(Nop),
- /* 63 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(12),
+ /* 63 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(13),
/* 71 S> */ B(Nop),
- /* 72 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(14),
+ /* 72 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(15),
/* 80 S> */ B(Nop),
- /* 81 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(16),
+ /* 81 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(17),
/* 89 S> */ B(Nop),
- /* 90 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(18),
+ /* 90 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(19),
/* 98 S> */ B(Nop),
- /* 99 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(20),
+ /* 99 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(21),
/* 107 S> */ B(Nop),
- /* 108 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(22),
+ /* 108 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(23),
/* 116 S> */ B(Nop),
- /* 117 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(24),
+ /* 117 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(25),
/* 125 S> */ B(Nop),
- /* 126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(26),
+ /* 126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(27),
/* 134 S> */ B(Nop),
- /* 135 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(28),
+ /* 135 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(29),
/* 143 S> */ B(Nop),
- /* 144 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(30),
+ /* 144 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(31),
/* 152 S> */ B(Nop),
- /* 153 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(32),
+ /* 153 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(33),
/* 161 S> */ B(Nop),
- /* 162 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(34),
+ /* 162 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(35),
/* 170 S> */ B(Nop),
- /* 171 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(36),
+ /* 171 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(37),
/* 179 S> */ B(Nop),
- /* 180 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(38),
+ /* 180 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(39),
/* 188 S> */ B(Nop),
- /* 189 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(40),
+ /* 189 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(41),
/* 197 S> */ B(Nop),
- /* 198 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(42),
+ /* 198 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(43),
/* 206 S> */ B(Nop),
- /* 207 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(44),
+ /* 207 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(45),
/* 215 S> */ B(Nop),
- /* 216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(46),
+ /* 216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(47),
/* 224 S> */ B(Nop),
- /* 225 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(48),
+ /* 225 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(49),
/* 233 S> */ B(Nop),
- /* 234 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(50),
+ /* 234 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(51),
/* 242 S> */ B(Nop),
- /* 243 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(52),
+ /* 243 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(53),
/* 251 S> */ B(Nop),
- /* 252 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(54),
+ /* 252 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(55),
/* 260 S> */ B(Nop),
- /* 261 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(56),
+ /* 261 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(57),
/* 269 S> */ B(Nop),
- /* 270 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(58),
+ /* 270 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(59),
/* 278 S> */ B(Nop),
- /* 279 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(60),
+ /* 279 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(61),
/* 287 S> */ B(Nop),
- /* 288 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(62),
+ /* 288 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(63),
/* 296 S> */ B(Nop),
- /* 297 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(64),
+ /* 297 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(65),
/* 305 S> */ B(Nop),
- /* 306 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(66),
+ /* 306 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(67),
/* 314 S> */ B(Nop),
- /* 315 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(68),
+ /* 315 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(69),
/* 323 S> */ B(Nop),
- /* 324 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(70),
+ /* 324 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(71),
/* 332 S> */ B(Nop),
- /* 333 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(72),
+ /* 333 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(73),
/* 341 S> */ B(Nop),
- /* 342 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(74),
+ /* 342 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(75),
/* 350 S> */ B(Nop),
- /* 351 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(76),
+ /* 351 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(77),
/* 359 S> */ B(Nop),
- /* 360 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(78),
+ /* 360 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(79),
/* 368 S> */ B(Nop),
- /* 369 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(80),
+ /* 369 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(81),
/* 377 S> */ B(Nop),
- /* 378 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(82),
+ /* 378 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(83),
/* 386 S> */ B(Nop),
- /* 387 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(84),
+ /* 387 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(85),
/* 395 S> */ B(Nop),
- /* 396 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(86),
+ /* 396 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(87),
/* 404 S> */ B(Nop),
- /* 405 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(88),
+ /* 405 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(89),
/* 413 S> */ B(Nop),
- /* 414 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(90),
+ /* 414 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(91),
/* 422 S> */ B(Nop),
- /* 423 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(92),
+ /* 423 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(93),
/* 431 S> */ B(Nop),
- /* 432 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(94),
+ /* 432 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(95),
/* 440 S> */ B(Nop),
- /* 441 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(96),
+ /* 441 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(97),
/* 449 S> */ B(Nop),
- /* 450 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(98),
+ /* 450 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(99),
/* 458 S> */ B(Nop),
- /* 459 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(100),
+ /* 459 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(101),
/* 467 S> */ B(Nop),
- /* 468 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(102),
+ /* 468 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(103),
/* 476 S> */ B(Nop),
- /* 477 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(104),
+ /* 477 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(105),
/* 485 S> */ B(Nop),
- /* 486 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(106),
+ /* 486 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(107),
/* 494 S> */ B(Nop),
- /* 495 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(108),
+ /* 495 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(109),
/* 503 S> */ B(Nop),
- /* 504 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(110),
+ /* 504 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(111),
/* 512 S> */ B(Nop),
- /* 513 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(112),
+ /* 513 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(113),
/* 521 S> */ B(Nop),
- /* 522 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(114),
+ /* 522 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(115),
/* 530 S> */ B(Nop),
- /* 531 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(116),
+ /* 531 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(117),
/* 539 S> */ B(Nop),
- /* 540 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(118),
+ /* 540 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(119),
/* 548 S> */ B(Nop),
- /* 549 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(120),
+ /* 549 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(121),
/* 557 S> */ B(Nop),
- /* 558 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(122),
+ /* 558 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(123),
/* 566 S> */ B(Nop),
- /* 567 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(124),
+ /* 567 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(125),
/* 575 S> */ B(Nop),
- /* 576 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(126),
+ /* 576 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(127),
/* 584 S> */ B(Nop),
- /* 585 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(128),
+ /* 585 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(129),
/* 593 S> */ B(Nop),
- /* 594 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(130),
+ /* 594 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(131),
/* 602 S> */ B(Nop),
- /* 603 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(132),
+ /* 603 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(133),
/* 611 S> */ B(Nop),
- /* 612 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(134),
+ /* 612 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(135),
/* 620 S> */ B(Nop),
- /* 621 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(136),
+ /* 621 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(137),
/* 629 S> */ B(Nop),
- /* 630 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(138),
+ /* 630 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(139),
/* 638 S> */ B(Nop),
- /* 639 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(140),
+ /* 639 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(141),
/* 647 S> */ B(Nop),
- /* 648 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(142),
+ /* 648 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(143),
/* 656 S> */ B(Nop),
- /* 657 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(144),
+ /* 657 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(145),
/* 665 S> */ B(Nop),
- /* 666 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(146),
+ /* 666 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(147),
/* 674 S> */ B(Nop),
- /* 675 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(148),
+ /* 675 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(149),
/* 683 S> */ B(Nop),
- /* 684 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(150),
+ /* 684 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(151),
/* 692 S> */ B(Nop),
- /* 693 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(152),
+ /* 693 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(153),
/* 701 S> */ B(Nop),
- /* 702 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(154),
+ /* 702 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(155),
/* 710 S> */ B(Nop),
- /* 711 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(156),
+ /* 711 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(157),
/* 719 S> */ B(Nop),
- /* 720 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(158),
+ /* 720 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(159),
/* 728 S> */ B(Nop),
- /* 729 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(160),
+ /* 729 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(161),
/* 737 S> */ B(Nop),
- /* 738 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(162),
+ /* 738 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(163),
/* 746 S> */ B(Nop),
- /* 747 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(164),
+ /* 747 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(165),
/* 755 S> */ B(Nop),
- /* 756 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(166),
+ /* 756 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(167),
/* 764 S> */ B(Nop),
- /* 765 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(168),
+ /* 765 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(169),
/* 773 S> */ B(Nop),
- /* 774 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(170),
+ /* 774 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(171),
/* 782 S> */ B(Nop),
- /* 783 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(172),
+ /* 783 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(173),
/* 791 S> */ B(Nop),
- /* 792 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(174),
+ /* 792 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(175),
/* 800 S> */ B(Nop),
- /* 801 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(176),
+ /* 801 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(177),
/* 809 S> */ B(Nop),
- /* 810 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(178),
+ /* 810 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(179),
/* 818 S> */ B(Nop),
- /* 819 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(180),
+ /* 819 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(181),
/* 827 S> */ B(Nop),
- /* 828 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(182),
+ /* 828 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(183),
/* 836 S> */ B(Nop),
- /* 837 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(184),
+ /* 837 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(185),
/* 845 S> */ B(Nop),
- /* 846 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(186),
+ /* 846 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(187),
/* 854 S> */ B(Nop),
- /* 855 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(188),
+ /* 855 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(189),
/* 863 S> */ B(Nop),
- /* 864 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(190),
+ /* 864 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(191),
/* 872 S> */ B(Nop),
- /* 873 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(192),
+ /* 873 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(193),
/* 881 S> */ B(Nop),
- /* 882 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(194),
+ /* 882 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(195),
/* 890 S> */ B(Nop),
- /* 891 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(196),
+ /* 891 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(197),
/* 899 S> */ B(Nop),
- /* 900 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(198),
+ /* 900 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(199),
/* 908 S> */ B(Nop),
- /* 909 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(200),
+ /* 909 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(201),
/* 917 S> */ B(Nop),
- /* 918 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(202),
+ /* 918 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(203),
/* 926 S> */ B(Nop),
- /* 927 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(204),
+ /* 927 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(205),
/* 935 S> */ B(Nop),
- /* 936 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(206),
+ /* 936 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(207),
/* 944 S> */ B(Nop),
- /* 945 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(208),
+ /* 945 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(209),
/* 953 S> */ B(Nop),
- /* 954 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(210),
+ /* 954 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(211),
/* 962 S> */ B(Nop),
- /* 963 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(212),
+ /* 963 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(213),
/* 971 S> */ B(Nop),
- /* 972 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(214),
+ /* 972 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(215),
/* 980 S> */ B(Nop),
- /* 981 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(216),
+ /* 981 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(217),
/* 989 S> */ B(Nop),
- /* 990 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(218),
+ /* 990 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(219),
/* 998 S> */ B(Nop),
- /* 999 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(220),
+ /* 999 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(221),
/* 1007 S> */ B(Nop),
- /* 1008 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(222),
+ /* 1008 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(223),
/* 1016 S> */ B(Nop),
- /* 1017 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(224),
+ /* 1017 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(225),
/* 1025 S> */ B(Nop),
- /* 1026 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(226),
+ /* 1026 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(227),
/* 1034 S> */ B(Nop),
- /* 1035 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(228),
+ /* 1035 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(229),
/* 1043 S> */ B(Nop),
- /* 1044 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(230),
+ /* 1044 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(231),
/* 1052 S> */ B(Nop),
- /* 1053 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(232),
+ /* 1053 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(233),
/* 1061 S> */ B(Nop),
- /* 1062 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(234),
+ /* 1062 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(235),
/* 1070 S> */ B(Nop),
- /* 1071 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(236),
+ /* 1071 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(237),
/* 1079 S> */ B(Nop),
- /* 1080 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(238),
+ /* 1080 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(239),
/* 1088 S> */ B(Nop),
- /* 1089 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(240),
+ /* 1089 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(241),
/* 1097 S> */ B(Nop),
- /* 1098 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(242),
+ /* 1098 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(243),
/* 1106 S> */ B(Nop),
- /* 1107 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(244),
+ /* 1107 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(245),
/* 1115 S> */ B(Nop),
- /* 1116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(246),
+ /* 1116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(247),
/* 1124 S> */ B(Nop),
- /* 1125 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(248),
+ /* 1125 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(249),
/* 1133 S> */ B(Nop),
- /* 1134 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(250),
+ /* 1134 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(251),
/* 1142 S> */ B(Nop),
- /* 1143 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(252),
+ /* 1143 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(253),
/* 1151 S> */ B(Nop),
- /* 1152 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(254),
+ /* 1152 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(255),
/* 1160 S> */ B(Nop),
- /* 1161 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(256),
+ /* 1161 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(257),
/* 1169 S> */ B(Nop),
- /* 1177 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(260),
+ /* 1177 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(261),
B(Star), R(0),
- /* 1178 E> */ B(Wide), B(CallProperty0), R16(0), R16(arg0), U16(258),
+ /* 1178 E> */ B(Wide), B(CallProperty0), R16(0), R16(arg0), U16(259),
/* 1186 S> */ B(Return),
]
constant pool: [
@@ -493,23 +493,23 @@ bytecode array length: 52
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(Nop),
- /* 24 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(8),
+ /* 24 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(9),
B(Star), R(2),
B(LdaSmi), I8(1),
B(Star), R(4),
- /* 25 E> */ B(CallProperty1), R(2), R(arg0), R(4), U8(6),
+ /* 25 E> */ B(CallProperty1), R(2), R(arg0), R(4), U8(7),
B(Star), R(2),
- /* 32 E> */ B(LdaNamedProperty), R(2), U8(0), U8(10),
+ /* 32 E> */ B(LdaNamedProperty), R(2), U8(0), U8(11),
B(Star), R(1),
B(LdaSmi), I8(2),
B(Star), R(3),
- /* 33 E> */ B(CallProperty1), R(1), R(2), R(3), U8(4),
+ /* 33 E> */ B(CallProperty1), R(1), R(2), R(3), U8(5),
B(Star), R(1),
- /* 40 E> */ B(LdaNamedProperty), R(1), U8(0), U8(12),
+ /* 40 E> */ B(LdaNamedProperty), R(1), U8(0), U8(13),
B(Star), R(0),
B(LdaSmi), I8(3),
B(Star), R(2),
- /* 41 E> */ B(CallProperty1), R(0), R(1), R(2), U8(2),
+ /* 41 E> */ B(CallProperty1), R(0), R(1), R(2), U8(3),
/* 50 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
index bee525cf44..33ec14c396 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyLoads.golden
@@ -17,7 +17,7 @@ bytecode array length: 7
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(Nop),
- /* 24 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(2),
+ /* 24 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(3),
/* 31 S> */ B(Return),
]
constant pool: [
@@ -37,7 +37,7 @@ bytecode array length: 7
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(Nop),
- /* 24 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(2),
+ /* 24 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(3),
/* 33 S> */ B(Return),
]
constant pool: [
@@ -57,7 +57,7 @@ bytecode array length: 7
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(LdaSmi), I8(100),
- /* 24 E> */ B(LdaKeyedProperty), R(arg0), U8(2),
+ /* 24 E> */ B(LdaKeyedProperty), R(arg0), U8(3),
/* 31 S> */ B(Return),
]
constant pool: [
@@ -76,7 +76,7 @@ bytecode array length: 7
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 19 S> */ B(Ldar), R(arg1),
- /* 28 E> */ B(LdaKeyedProperty), R(arg0), U8(2),
+ /* 28 E> */ B(LdaKeyedProperty), R(arg0), U8(3),
/* 32 S> */ B(Return),
]
constant pool: [
@@ -95,10 +95,10 @@ bytecode array length: 14
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 25 S> */ B(Nop),
- /* 25 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(2),
+ /* 25 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(3),
B(Star), R(0),
/* 32 S> */ B(LdaSmi), I8(-124),
- /* 40 E> */ B(LdaKeyedProperty), R(arg0), U8(4),
+ /* 40 E> */ B(LdaKeyedProperty), R(arg0), U8(5),
/* 48 S> */ B(Return),
]
constant pool: [
@@ -249,391 +249,391 @@ bytecode array length: 911
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 27 S> */ B(Nop),
- /* 32 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(2),
+ /* 32 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(3),
B(Star), R(0),
/* 41 S> */ B(Nop),
- /* 46 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(4),
+ /* 46 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(5),
B(Star), R(0),
/* 55 S> */ B(Nop),
- /* 60 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(6),
+ /* 60 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(7),
B(Star), R(0),
/* 69 S> */ B(Nop),
- /* 74 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(8),
+ /* 74 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(9),
B(Star), R(0),
/* 83 S> */ B(Nop),
- /* 88 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(10),
+ /* 88 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(11),
B(Star), R(0),
/* 97 S> */ B(Nop),
- /* 102 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(12),
+ /* 102 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(13),
B(Star), R(0),
/* 111 S> */ B(Nop),
- /* 116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(14),
+ /* 116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(15),
B(Star), R(0),
/* 125 S> */ B(Nop),
- /* 130 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(16),
+ /* 130 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(17),
B(Star), R(0),
/* 139 S> */ B(Nop),
- /* 144 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(18),
+ /* 144 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(19),
B(Star), R(0),
/* 153 S> */ B(Nop),
- /* 158 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(20),
+ /* 158 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(21),
B(Star), R(0),
/* 167 S> */ B(Nop),
- /* 172 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(22),
+ /* 172 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(23),
B(Star), R(0),
/* 181 S> */ B(Nop),
- /* 186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(24),
+ /* 186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(25),
B(Star), R(0),
/* 195 S> */ B(Nop),
- /* 200 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(26),
+ /* 200 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(27),
B(Star), R(0),
/* 209 S> */ B(Nop),
- /* 214 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(28),
+ /* 214 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(29),
B(Star), R(0),
/* 223 S> */ B(Nop),
- /* 228 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(30),
+ /* 228 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(31),
B(Star), R(0),
/* 237 S> */ B(Nop),
- /* 242 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(32),
+ /* 242 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(33),
B(Star), R(0),
/* 251 S> */ B(Nop),
- /* 256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(34),
+ /* 256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(35),
B(Star), R(0),
/* 265 S> */ B(Nop),
- /* 270 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(36),
+ /* 270 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(37),
B(Star), R(0),
/* 279 S> */ B(Nop),
- /* 284 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(38),
+ /* 284 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(39),
B(Star), R(0),
/* 293 S> */ B(Nop),
- /* 298 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(40),
+ /* 298 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(41),
B(Star), R(0),
/* 307 S> */ B(Nop),
- /* 312 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(42),
+ /* 312 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(43),
B(Star), R(0),
/* 321 S> */ B(Nop),
- /* 326 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(44),
+ /* 326 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(45),
B(Star), R(0),
/* 335 S> */ B(Nop),
- /* 340 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(46),
+ /* 340 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(47),
B(Star), R(0),
/* 349 S> */ B(Nop),
- /* 354 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(48),
+ /* 354 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(49),
B(Star), R(0),
/* 363 S> */ B(Nop),
- /* 368 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(50),
+ /* 368 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(51),
B(Star), R(0),
/* 377 S> */ B(Nop),
- /* 382 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(52),
+ /* 382 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(53),
B(Star), R(0),
/* 391 S> */ B(Nop),
- /* 396 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(54),
+ /* 396 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(55),
B(Star), R(0),
/* 405 S> */ B(Nop),
- /* 410 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(56),
+ /* 410 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(57),
B(Star), R(0),
/* 419 S> */ B(Nop),
- /* 424 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(58),
+ /* 424 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(59),
B(Star), R(0),
/* 433 S> */ B(Nop),
- /* 438 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(60),
+ /* 438 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(61),
B(Star), R(0),
/* 447 S> */ B(Nop),
- /* 452 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(62),
+ /* 452 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(63),
B(Star), R(0),
/* 461 S> */ B(Nop),
- /* 466 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(64),
+ /* 466 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(65),
B(Star), R(0),
/* 475 S> */ B(Nop),
- /* 480 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(66),
+ /* 480 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(67),
B(Star), R(0),
/* 489 S> */ B(Nop),
- /* 494 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(68),
+ /* 494 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(69),
B(Star), R(0),
/* 503 S> */ B(Nop),
- /* 508 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(70),
+ /* 508 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(71),
B(Star), R(0),
/* 517 S> */ B(Nop),
- /* 522 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(72),
+ /* 522 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(73),
B(Star), R(0),
/* 531 S> */ B(Nop),
- /* 536 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(74),
+ /* 536 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(75),
B(Star), R(0),
/* 545 S> */ B(Nop),
- /* 550 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(76),
+ /* 550 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(77),
B(Star), R(0),
/* 559 S> */ B(Nop),
- /* 564 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(78),
+ /* 564 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(79),
B(Star), R(0),
/* 573 S> */ B(Nop),
- /* 578 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(80),
+ /* 578 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(81),
B(Star), R(0),
/* 587 S> */ B(Nop),
- /* 592 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(82),
+ /* 592 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(83),
B(Star), R(0),
/* 601 S> */ B(Nop),
- /* 606 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(84),
+ /* 606 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(85),
B(Star), R(0),
/* 615 S> */ B(Nop),
- /* 620 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(86),
+ /* 620 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(87),
B(Star), R(0),
/* 629 S> */ B(Nop),
- /* 634 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(88),
+ /* 634 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(89),
B(Star), R(0),
/* 643 S> */ B(Nop),
- /* 648 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(90),
+ /* 648 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(91),
B(Star), R(0),
/* 657 S> */ B(Nop),
- /* 662 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(92),
+ /* 662 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(93),
B(Star), R(0),
/* 671 S> */ B(Nop),
- /* 676 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(94),
+ /* 676 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(95),
B(Star), R(0),
/* 685 S> */ B(Nop),
- /* 690 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(96),
+ /* 690 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(97),
B(Star), R(0),
/* 699 S> */ B(Nop),
- /* 704 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(98),
+ /* 704 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(99),
B(Star), R(0),
/* 713 S> */ B(Nop),
- /* 718 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(100),
+ /* 718 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(101),
B(Star), R(0),
/* 727 S> */ B(Nop),
- /* 732 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(102),
+ /* 732 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(103),
B(Star), R(0),
/* 741 S> */ B(Nop),
- /* 746 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(104),
+ /* 746 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(105),
B(Star), R(0),
/* 755 S> */ B(Nop),
- /* 760 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(106),
+ /* 760 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(107),
B(Star), R(0),
/* 769 S> */ B(Nop),
- /* 774 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(108),
+ /* 774 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(109),
B(Star), R(0),
/* 783 S> */ B(Nop),
- /* 788 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(110),
+ /* 788 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(111),
B(Star), R(0),
/* 797 S> */ B(Nop),
- /* 802 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(112),
+ /* 802 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(113),
B(Star), R(0),
/* 811 S> */ B(Nop),
- /* 816 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(114),
+ /* 816 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(115),
B(Star), R(0),
/* 825 S> */ B(Nop),
- /* 830 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(116),
+ /* 830 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(117),
B(Star), R(0),
/* 839 S> */ B(Nop),
- /* 844 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(118),
+ /* 844 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(119),
B(Star), R(0),
/* 853 S> */ B(Nop),
- /* 858 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(120),
+ /* 858 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(121),
B(Star), R(0),
/* 867 S> */ B(Nop),
- /* 872 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(122),
+ /* 872 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(123),
B(Star), R(0),
/* 881 S> */ B(Nop),
- /* 886 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(124),
+ /* 886 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(125),
B(Star), R(0),
/* 895 S> */ B(Nop),
- /* 900 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(126),
+ /* 900 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(127),
B(Star), R(0),
/* 909 S> */ B(Nop),
- /* 914 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(128),
+ /* 914 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(129),
B(Star), R(0),
/* 923 S> */ B(Nop),
- /* 928 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(130),
+ /* 928 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(131),
B(Star), R(0),
/* 937 S> */ B(Nop),
- /* 942 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(132),
+ /* 942 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(133),
B(Star), R(0),
/* 951 S> */ B(Nop),
- /* 956 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(134),
+ /* 956 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(135),
B(Star), R(0),
/* 965 S> */ B(Nop),
- /* 970 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(136),
+ /* 970 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(137),
B(Star), R(0),
/* 979 S> */ B(Nop),
- /* 984 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(138),
+ /* 984 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(139),
B(Star), R(0),
/* 993 S> */ B(Nop),
- /* 998 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(140),
+ /* 998 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(141),
B(Star), R(0),
/* 1007 S> */ B(Nop),
- /* 1012 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(142),
+ /* 1012 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(143),
B(Star), R(0),
/* 1021 S> */ B(Nop),
- /* 1026 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(144),
+ /* 1026 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(145),
B(Star), R(0),
/* 1035 S> */ B(Nop),
- /* 1040 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(146),
+ /* 1040 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(147),
B(Star), R(0),
/* 1049 S> */ B(Nop),
- /* 1054 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(148),
+ /* 1054 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(149),
B(Star), R(0),
/* 1063 S> */ B(Nop),
- /* 1068 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(150),
+ /* 1068 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(151),
B(Star), R(0),
/* 1077 S> */ B(Nop),
- /* 1082 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(152),
+ /* 1082 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(153),
B(Star), R(0),
/* 1091 S> */ B(Nop),
- /* 1096 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(154),
+ /* 1096 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(155),
B(Star), R(0),
/* 1105 S> */ B(Nop),
- /* 1110 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(156),
+ /* 1110 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(157),
B(Star), R(0),
/* 1119 S> */ B(Nop),
- /* 1124 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(158),
+ /* 1124 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(159),
B(Star), R(0),
/* 1133 S> */ B(Nop),
- /* 1138 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(160),
+ /* 1138 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(161),
B(Star), R(0),
/* 1147 S> */ B(Nop),
- /* 1152 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(162),
+ /* 1152 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(163),
B(Star), R(0),
/* 1161 S> */ B(Nop),
- /* 1166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(164),
+ /* 1166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(165),
B(Star), R(0),
/* 1175 S> */ B(Nop),
- /* 1180 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(166),
+ /* 1180 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(167),
B(Star), R(0),
/* 1189 S> */ B(Nop),
- /* 1194 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(168),
+ /* 1194 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(169),
B(Star), R(0),
/* 1203 S> */ B(Nop),
- /* 1208 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(170),
+ /* 1208 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(171),
B(Star), R(0),
/* 1217 S> */ B(Nop),
- /* 1222 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(172),
+ /* 1222 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(173),
B(Star), R(0),
/* 1231 S> */ B(Nop),
- /* 1236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(174),
+ /* 1236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(175),
B(Star), R(0),
/* 1245 S> */ B(Nop),
- /* 1250 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(176),
+ /* 1250 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(177),
B(Star), R(0),
/* 1259 S> */ B(Nop),
- /* 1264 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(178),
+ /* 1264 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(179),
B(Star), R(0),
/* 1273 S> */ B(Nop),
- /* 1278 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(180),
+ /* 1278 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(181),
B(Star), R(0),
/* 1287 S> */ B(Nop),
- /* 1292 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(182),
+ /* 1292 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(183),
B(Star), R(0),
/* 1301 S> */ B(Nop),
- /* 1306 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(184),
+ /* 1306 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(185),
B(Star), R(0),
/* 1315 S> */ B(Nop),
- /* 1320 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(186),
+ /* 1320 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(187),
B(Star), R(0),
/* 1329 S> */ B(Nop),
- /* 1334 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(188),
+ /* 1334 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(189),
B(Star), R(0),
/* 1343 S> */ B(Nop),
- /* 1348 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(190),
+ /* 1348 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(191),
B(Star), R(0),
/* 1357 S> */ B(Nop),
- /* 1362 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(192),
+ /* 1362 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(193),
B(Star), R(0),
/* 1371 S> */ B(Nop),
- /* 1376 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(194),
+ /* 1376 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(195),
B(Star), R(0),
/* 1385 S> */ B(Nop),
- /* 1390 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(196),
+ /* 1390 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(197),
B(Star), R(0),
/* 1399 S> */ B(Nop),
- /* 1404 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(198),
+ /* 1404 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(199),
B(Star), R(0),
/* 1413 S> */ B(Nop),
- /* 1418 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(200),
+ /* 1418 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(201),
B(Star), R(0),
/* 1427 S> */ B(Nop),
- /* 1432 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(202),
+ /* 1432 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(203),
B(Star), R(0),
/* 1441 S> */ B(Nop),
- /* 1446 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(204),
+ /* 1446 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(205),
B(Star), R(0),
/* 1455 S> */ B(Nop),
- /* 1460 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(206),
+ /* 1460 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(207),
B(Star), R(0),
/* 1469 S> */ B(Nop),
- /* 1474 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(208),
+ /* 1474 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(209),
B(Star), R(0),
/* 1483 S> */ B(Nop),
- /* 1488 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(210),
+ /* 1488 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(211),
B(Star), R(0),
/* 1497 S> */ B(Nop),
- /* 1502 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(212),
+ /* 1502 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(213),
B(Star), R(0),
/* 1511 S> */ B(Nop),
- /* 1516 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(214),
+ /* 1516 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(215),
B(Star), R(0),
/* 1525 S> */ B(Nop),
- /* 1530 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(216),
+ /* 1530 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(217),
B(Star), R(0),
/* 1539 S> */ B(Nop),
- /* 1544 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(218),
+ /* 1544 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(219),
B(Star), R(0),
/* 1553 S> */ B(Nop),
- /* 1558 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(220),
+ /* 1558 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(221),
B(Star), R(0),
/* 1567 S> */ B(Nop),
- /* 1572 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(222),
+ /* 1572 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(223),
B(Star), R(0),
/* 1581 S> */ B(Nop),
- /* 1586 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(224),
+ /* 1586 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(225),
B(Star), R(0),
/* 1595 S> */ B(Nop),
- /* 1600 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(226),
+ /* 1600 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(227),
B(Star), R(0),
/* 1609 S> */ B(Nop),
- /* 1614 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(228),
+ /* 1614 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(229),
B(Star), R(0),
/* 1623 S> */ B(Nop),
- /* 1628 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(230),
+ /* 1628 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(231),
B(Star), R(0),
/* 1637 S> */ B(Nop),
- /* 1642 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(232),
+ /* 1642 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(233),
B(Star), R(0),
/* 1651 S> */ B(Nop),
- /* 1656 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(234),
+ /* 1656 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(235),
B(Star), R(0),
/* 1665 S> */ B(Nop),
- /* 1670 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(236),
+ /* 1670 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(237),
B(Star), R(0),
/* 1679 S> */ B(Nop),
- /* 1684 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(238),
+ /* 1684 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(239),
B(Star), R(0),
/* 1693 S> */ B(Nop),
- /* 1698 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(240),
+ /* 1698 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(241),
B(Star), R(0),
/* 1707 S> */ B(Nop),
- /* 1712 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(242),
+ /* 1712 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(243),
B(Star), R(0),
/* 1721 S> */ B(Nop),
- /* 1726 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(244),
+ /* 1726 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(245),
B(Star), R(0),
/* 1735 S> */ B(Nop),
- /* 1740 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(246),
+ /* 1740 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(247),
B(Star), R(0),
/* 1749 S> */ B(Nop),
- /* 1754 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(248),
+ /* 1754 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(249),
B(Star), R(0),
/* 1763 S> */ B(Nop),
- /* 1768 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(250),
+ /* 1768 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(251),
B(Star), R(0),
/* 1777 S> */ B(Nop),
- /* 1782 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(252),
+ /* 1782 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(253),
B(Star), R(0),
/* 1791 S> */ B(Nop),
- /* 1796 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(254),
+ /* 1796 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(255),
B(Star), R(0),
/* 1805 S> */ B(Nop),
- /* 1810 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(256),
+ /* 1810 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(257),
B(Star), R(0),
/* 1819 S> */ B(Nop),
- /* 1827 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(258),
+ /* 1827 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(259),
/* 1834 S> */ B(Return),
]
constant pool: [
@@ -784,391 +784,391 @@ bytecode array length: 909
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 30 S> */ B(Ldar), R(arg1),
- /* 36 E> */ B(LdaKeyedProperty), R(arg0), U8(2),
+ /* 36 E> */ B(LdaKeyedProperty), R(arg0), U8(3),
B(Star), R(0),
/* 42 S> */ B(Ldar), R(arg1),
- /* 48 E> */ B(LdaKeyedProperty), R(arg0), U8(4),
+ /* 48 E> */ B(LdaKeyedProperty), R(arg0), U8(5),
B(Star), R(0),
/* 54 S> */ B(Ldar), R(arg1),
- /* 60 E> */ B(LdaKeyedProperty), R(arg0), U8(6),
+ /* 60 E> */ B(LdaKeyedProperty), R(arg0), U8(7),
B(Star), R(0),
/* 66 S> */ B(Ldar), R(arg1),
- /* 72 E> */ B(LdaKeyedProperty), R(arg0), U8(8),
+ /* 72 E> */ B(LdaKeyedProperty), R(arg0), U8(9),
B(Star), R(0),
/* 78 S> */ B(Ldar), R(arg1),
- /* 84 E> */ B(LdaKeyedProperty), R(arg0), U8(10),
+ /* 84 E> */ B(LdaKeyedProperty), R(arg0), U8(11),
B(Star), R(0),
/* 90 S> */ B(Ldar), R(arg1),
- /* 96 E> */ B(LdaKeyedProperty), R(arg0), U8(12),
+ /* 96 E> */ B(LdaKeyedProperty), R(arg0), U8(13),
B(Star), R(0),
/* 102 S> */ B(Ldar), R(arg1),
- /* 108 E> */ B(LdaKeyedProperty), R(arg0), U8(14),
+ /* 108 E> */ B(LdaKeyedProperty), R(arg0), U8(15),
B(Star), R(0),
/* 114 S> */ B(Ldar), R(arg1),
- /* 120 E> */ B(LdaKeyedProperty), R(arg0), U8(16),
+ /* 120 E> */ B(LdaKeyedProperty), R(arg0), U8(17),
B(Star), R(0),
/* 126 S> */ B(Ldar), R(arg1),
- /* 132 E> */ B(LdaKeyedProperty), R(arg0), U8(18),
+ /* 132 E> */ B(LdaKeyedProperty), R(arg0), U8(19),
B(Star), R(0),
/* 138 S> */ B(Ldar), R(arg1),
- /* 144 E> */ B(LdaKeyedProperty), R(arg0), U8(20),
+ /* 144 E> */ B(LdaKeyedProperty), R(arg0), U8(21),
B(Star), R(0),
/* 150 S> */ B(Ldar), R(arg1),
- /* 156 E> */ B(LdaKeyedProperty), R(arg0), U8(22),
+ /* 156 E> */ B(LdaKeyedProperty), R(arg0), U8(23),
B(Star), R(0),
/* 162 S> */ B(Ldar), R(arg1),
- /* 168 E> */ B(LdaKeyedProperty), R(arg0), U8(24),
+ /* 168 E> */ B(LdaKeyedProperty), R(arg0), U8(25),
B(Star), R(0),
/* 174 S> */ B(Ldar), R(arg1),
- /* 180 E> */ B(LdaKeyedProperty), R(arg0), U8(26),
+ /* 180 E> */ B(LdaKeyedProperty), R(arg0), U8(27),
B(Star), R(0),
/* 186 S> */ B(Ldar), R(arg1),
- /* 192 E> */ B(LdaKeyedProperty), R(arg0), U8(28),
+ /* 192 E> */ B(LdaKeyedProperty), R(arg0), U8(29),
B(Star), R(0),
/* 198 S> */ B(Ldar), R(arg1),
- /* 204 E> */ B(LdaKeyedProperty), R(arg0), U8(30),
+ /* 204 E> */ B(LdaKeyedProperty), R(arg0), U8(31),
B(Star), R(0),
/* 210 S> */ B(Ldar), R(arg1),
- /* 216 E> */ B(LdaKeyedProperty), R(arg0), U8(32),
+ /* 216 E> */ B(LdaKeyedProperty), R(arg0), U8(33),
B(Star), R(0),
/* 222 S> */ B(Ldar), R(arg1),
- /* 228 E> */ B(LdaKeyedProperty), R(arg0), U8(34),
+ /* 228 E> */ B(LdaKeyedProperty), R(arg0), U8(35),
B(Star), R(0),
/* 234 S> */ B(Ldar), R(arg1),
- /* 240 E> */ B(LdaKeyedProperty), R(arg0), U8(36),
+ /* 240 E> */ B(LdaKeyedProperty), R(arg0), U8(37),
B(Star), R(0),
/* 246 S> */ B(Ldar), R(arg1),
- /* 252 E> */ B(LdaKeyedProperty), R(arg0), U8(38),
+ /* 252 E> */ B(LdaKeyedProperty), R(arg0), U8(39),
B(Star), R(0),
/* 258 S> */ B(Ldar), R(arg1),
- /* 264 E> */ B(LdaKeyedProperty), R(arg0), U8(40),
+ /* 264 E> */ B(LdaKeyedProperty), R(arg0), U8(41),
B(Star), R(0),
/* 270 S> */ B(Ldar), R(arg1),
- /* 276 E> */ B(LdaKeyedProperty), R(arg0), U8(42),
+ /* 276 E> */ B(LdaKeyedProperty), R(arg0), U8(43),
B(Star), R(0),
/* 282 S> */ B(Ldar), R(arg1),
- /* 288 E> */ B(LdaKeyedProperty), R(arg0), U8(44),
+ /* 288 E> */ B(LdaKeyedProperty), R(arg0), U8(45),
B(Star), R(0),
/* 294 S> */ B(Ldar), R(arg1),
- /* 300 E> */ B(LdaKeyedProperty), R(arg0), U8(46),
+ /* 300 E> */ B(LdaKeyedProperty), R(arg0), U8(47),
B(Star), R(0),
/* 306 S> */ B(Ldar), R(arg1),
- /* 312 E> */ B(LdaKeyedProperty), R(arg0), U8(48),
+ /* 312 E> */ B(LdaKeyedProperty), R(arg0), U8(49),
B(Star), R(0),
/* 318 S> */ B(Ldar), R(arg1),
- /* 324 E> */ B(LdaKeyedProperty), R(arg0), U8(50),
+ /* 324 E> */ B(LdaKeyedProperty), R(arg0), U8(51),
B(Star), R(0),
/* 330 S> */ B(Ldar), R(arg1),
- /* 336 E> */ B(LdaKeyedProperty), R(arg0), U8(52),
+ /* 336 E> */ B(LdaKeyedProperty), R(arg0), U8(53),
B(Star), R(0),
/* 342 S> */ B(Ldar), R(arg1),
- /* 348 E> */ B(LdaKeyedProperty), R(arg0), U8(54),
+ /* 348 E> */ B(LdaKeyedProperty), R(arg0), U8(55),
B(Star), R(0),
/* 354 S> */ B(Ldar), R(arg1),
- /* 360 E> */ B(LdaKeyedProperty), R(arg0), U8(56),
+ /* 360 E> */ B(LdaKeyedProperty), R(arg0), U8(57),
B(Star), R(0),
/* 366 S> */ B(Ldar), R(arg1),
- /* 372 E> */ B(LdaKeyedProperty), R(arg0), U8(58),
+ /* 372 E> */ B(LdaKeyedProperty), R(arg0), U8(59),
B(Star), R(0),
/* 378 S> */ B(Ldar), R(arg1),
- /* 384 E> */ B(LdaKeyedProperty), R(arg0), U8(60),
+ /* 384 E> */ B(LdaKeyedProperty), R(arg0), U8(61),
B(Star), R(0),
/* 390 S> */ B(Ldar), R(arg1),
- /* 396 E> */ B(LdaKeyedProperty), R(arg0), U8(62),
+ /* 396 E> */ B(LdaKeyedProperty), R(arg0), U8(63),
B(Star), R(0),
/* 402 S> */ B(Ldar), R(arg1),
- /* 408 E> */ B(LdaKeyedProperty), R(arg0), U8(64),
+ /* 408 E> */ B(LdaKeyedProperty), R(arg0), U8(65),
B(Star), R(0),
/* 414 S> */ B(Ldar), R(arg1),
- /* 420 E> */ B(LdaKeyedProperty), R(arg0), U8(66),
+ /* 420 E> */ B(LdaKeyedProperty), R(arg0), U8(67),
B(Star), R(0),
/* 426 S> */ B(Ldar), R(arg1),
- /* 432 E> */ B(LdaKeyedProperty), R(arg0), U8(68),
+ /* 432 E> */ B(LdaKeyedProperty), R(arg0), U8(69),
B(Star), R(0),
/* 438 S> */ B(Ldar), R(arg1),
- /* 444 E> */ B(LdaKeyedProperty), R(arg0), U8(70),
+ /* 444 E> */ B(LdaKeyedProperty), R(arg0), U8(71),
B(Star), R(0),
/* 450 S> */ B(Ldar), R(arg1),
- /* 456 E> */ B(LdaKeyedProperty), R(arg0), U8(72),
+ /* 456 E> */ B(LdaKeyedProperty), R(arg0), U8(73),
B(Star), R(0),
/* 462 S> */ B(Ldar), R(arg1),
- /* 468 E> */ B(LdaKeyedProperty), R(arg0), U8(74),
+ /* 468 E> */ B(LdaKeyedProperty), R(arg0), U8(75),
B(Star), R(0),
/* 474 S> */ B(Ldar), R(arg1),
- /* 480 E> */ B(LdaKeyedProperty), R(arg0), U8(76),
+ /* 480 E> */ B(LdaKeyedProperty), R(arg0), U8(77),
B(Star), R(0),
/* 486 S> */ B(Ldar), R(arg1),
- /* 492 E> */ B(LdaKeyedProperty), R(arg0), U8(78),
+ /* 492 E> */ B(LdaKeyedProperty), R(arg0), U8(79),
B(Star), R(0),
/* 498 S> */ B(Ldar), R(arg1),
- /* 504 E> */ B(LdaKeyedProperty), R(arg0), U8(80),
+ /* 504 E> */ B(LdaKeyedProperty), R(arg0), U8(81),
B(Star), R(0),
/* 510 S> */ B(Ldar), R(arg1),
- /* 516 E> */ B(LdaKeyedProperty), R(arg0), U8(82),
+ /* 516 E> */ B(LdaKeyedProperty), R(arg0), U8(83),
B(Star), R(0),
/* 522 S> */ B(Ldar), R(arg1),
- /* 528 E> */ B(LdaKeyedProperty), R(arg0), U8(84),
+ /* 528 E> */ B(LdaKeyedProperty), R(arg0), U8(85),
B(Star), R(0),
/* 534 S> */ B(Ldar), R(arg1),
- /* 540 E> */ B(LdaKeyedProperty), R(arg0), U8(86),
+ /* 540 E> */ B(LdaKeyedProperty), R(arg0), U8(87),
B(Star), R(0),
/* 546 S> */ B(Ldar), R(arg1),
- /* 552 E> */ B(LdaKeyedProperty), R(arg0), U8(88),
+ /* 552 E> */ B(LdaKeyedProperty), R(arg0), U8(89),
B(Star), R(0),
/* 558 S> */ B(Ldar), R(arg1),
- /* 564 E> */ B(LdaKeyedProperty), R(arg0), U8(90),
+ /* 564 E> */ B(LdaKeyedProperty), R(arg0), U8(91),
B(Star), R(0),
/* 570 S> */ B(Ldar), R(arg1),
- /* 576 E> */ B(LdaKeyedProperty), R(arg0), U8(92),
+ /* 576 E> */ B(LdaKeyedProperty), R(arg0), U8(93),
B(Star), R(0),
/* 582 S> */ B(Ldar), R(arg1),
- /* 588 E> */ B(LdaKeyedProperty), R(arg0), U8(94),
+ /* 588 E> */ B(LdaKeyedProperty), R(arg0), U8(95),
B(Star), R(0),
/* 594 S> */ B(Ldar), R(arg1),
- /* 600 E> */ B(LdaKeyedProperty), R(arg0), U8(96),
+ /* 600 E> */ B(LdaKeyedProperty), R(arg0), U8(97),
B(Star), R(0),
/* 606 S> */ B(Ldar), R(arg1),
- /* 612 E> */ B(LdaKeyedProperty), R(arg0), U8(98),
+ /* 612 E> */ B(LdaKeyedProperty), R(arg0), U8(99),
B(Star), R(0),
/* 618 S> */ B(Ldar), R(arg1),
- /* 624 E> */ B(LdaKeyedProperty), R(arg0), U8(100),
+ /* 624 E> */ B(LdaKeyedProperty), R(arg0), U8(101),
B(Star), R(0),
/* 630 S> */ B(Ldar), R(arg1),
- /* 636 E> */ B(LdaKeyedProperty), R(arg0), U8(102),
+ /* 636 E> */ B(LdaKeyedProperty), R(arg0), U8(103),
B(Star), R(0),
/* 642 S> */ B(Ldar), R(arg1),
- /* 648 E> */ B(LdaKeyedProperty), R(arg0), U8(104),
+ /* 648 E> */ B(LdaKeyedProperty), R(arg0), U8(105),
B(Star), R(0),
/* 654 S> */ B(Ldar), R(arg1),
- /* 660 E> */ B(LdaKeyedProperty), R(arg0), U8(106),
+ /* 660 E> */ B(LdaKeyedProperty), R(arg0), U8(107),
B(Star), R(0),
/* 666 S> */ B(Ldar), R(arg1),
- /* 672 E> */ B(LdaKeyedProperty), R(arg0), U8(108),
+ /* 672 E> */ B(LdaKeyedProperty), R(arg0), U8(109),
B(Star), R(0),
/* 678 S> */ B(Ldar), R(arg1),
- /* 684 E> */ B(LdaKeyedProperty), R(arg0), U8(110),
+ /* 684 E> */ B(LdaKeyedProperty), R(arg0), U8(111),
B(Star), R(0),
/* 690 S> */ B(Ldar), R(arg1),
- /* 696 E> */ B(LdaKeyedProperty), R(arg0), U8(112),
+ /* 696 E> */ B(LdaKeyedProperty), R(arg0), U8(113),
B(Star), R(0),
/* 702 S> */ B(Ldar), R(arg1),
- /* 708 E> */ B(LdaKeyedProperty), R(arg0), U8(114),
+ /* 708 E> */ B(LdaKeyedProperty), R(arg0), U8(115),
B(Star), R(0),
/* 714 S> */ B(Ldar), R(arg1),
- /* 720 E> */ B(LdaKeyedProperty), R(arg0), U8(116),
+ /* 720 E> */ B(LdaKeyedProperty), R(arg0), U8(117),
B(Star), R(0),
/* 726 S> */ B(Ldar), R(arg1),
- /* 732 E> */ B(LdaKeyedProperty), R(arg0), U8(118),
+ /* 732 E> */ B(LdaKeyedProperty), R(arg0), U8(119),
B(Star), R(0),
/* 738 S> */ B(Ldar), R(arg1),
- /* 744 E> */ B(LdaKeyedProperty), R(arg0), U8(120),
+ /* 744 E> */ B(LdaKeyedProperty), R(arg0), U8(121),
B(Star), R(0),
/* 750 S> */ B(Ldar), R(arg1),
- /* 756 E> */ B(LdaKeyedProperty), R(arg0), U8(122),
+ /* 756 E> */ B(LdaKeyedProperty), R(arg0), U8(123),
B(Star), R(0),
/* 762 S> */ B(Ldar), R(arg1),
- /* 768 E> */ B(LdaKeyedProperty), R(arg0), U8(124),
+ /* 768 E> */ B(LdaKeyedProperty), R(arg0), U8(125),
B(Star), R(0),
/* 774 S> */ B(Ldar), R(arg1),
- /* 780 E> */ B(LdaKeyedProperty), R(arg0), U8(126),
+ /* 780 E> */ B(LdaKeyedProperty), R(arg0), U8(127),
B(Star), R(0),
/* 786 S> */ B(Ldar), R(arg1),
- /* 792 E> */ B(LdaKeyedProperty), R(arg0), U8(128),
+ /* 792 E> */ B(LdaKeyedProperty), R(arg0), U8(129),
B(Star), R(0),
/* 798 S> */ B(Ldar), R(arg1),
- /* 804 E> */ B(LdaKeyedProperty), R(arg0), U8(130),
+ /* 804 E> */ B(LdaKeyedProperty), R(arg0), U8(131),
B(Star), R(0),
/* 810 S> */ B(Ldar), R(arg1),
- /* 816 E> */ B(LdaKeyedProperty), R(arg0), U8(132),
+ /* 816 E> */ B(LdaKeyedProperty), R(arg0), U8(133),
B(Star), R(0),
/* 822 S> */ B(Ldar), R(arg1),
- /* 828 E> */ B(LdaKeyedProperty), R(arg0), U8(134),
+ /* 828 E> */ B(LdaKeyedProperty), R(arg0), U8(135),
B(Star), R(0),
/* 834 S> */ B(Ldar), R(arg1),
- /* 840 E> */ B(LdaKeyedProperty), R(arg0), U8(136),
+ /* 840 E> */ B(LdaKeyedProperty), R(arg0), U8(137),
B(Star), R(0),
/* 846 S> */ B(Ldar), R(arg1),
- /* 852 E> */ B(LdaKeyedProperty), R(arg0), U8(138),
+ /* 852 E> */ B(LdaKeyedProperty), R(arg0), U8(139),
B(Star), R(0),
/* 858 S> */ B(Ldar), R(arg1),
- /* 864 E> */ B(LdaKeyedProperty), R(arg0), U8(140),
+ /* 864 E> */ B(LdaKeyedProperty), R(arg0), U8(141),
B(Star), R(0),
/* 870 S> */ B(Ldar), R(arg1),
- /* 876 E> */ B(LdaKeyedProperty), R(arg0), U8(142),
+ /* 876 E> */ B(LdaKeyedProperty), R(arg0), U8(143),
B(Star), R(0),
/* 882 S> */ B(Ldar), R(arg1),
- /* 888 E> */ B(LdaKeyedProperty), R(arg0), U8(144),
+ /* 888 E> */ B(LdaKeyedProperty), R(arg0), U8(145),
B(Star), R(0),
/* 894 S> */ B(Ldar), R(arg1),
- /* 900 E> */ B(LdaKeyedProperty), R(arg0), U8(146),
+ /* 900 E> */ B(LdaKeyedProperty), R(arg0), U8(147),
B(Star), R(0),
/* 906 S> */ B(Ldar), R(arg1),
- /* 912 E> */ B(LdaKeyedProperty), R(arg0), U8(148),
+ /* 912 E> */ B(LdaKeyedProperty), R(arg0), U8(149),
B(Star), R(0),
/* 918 S> */ B(Ldar), R(arg1),
- /* 924 E> */ B(LdaKeyedProperty), R(arg0), U8(150),
+ /* 924 E> */ B(LdaKeyedProperty), R(arg0), U8(151),
B(Star), R(0),
/* 930 S> */ B(Ldar), R(arg1),
- /* 936 E> */ B(LdaKeyedProperty), R(arg0), U8(152),
+ /* 936 E> */ B(LdaKeyedProperty), R(arg0), U8(153),
B(Star), R(0),
/* 942 S> */ B(Ldar), R(arg1),
- /* 948 E> */ B(LdaKeyedProperty), R(arg0), U8(154),
+ /* 948 E> */ B(LdaKeyedProperty), R(arg0), U8(155),
B(Star), R(0),
/* 954 S> */ B(Ldar), R(arg1),
- /* 960 E> */ B(LdaKeyedProperty), R(arg0), U8(156),
+ /* 960 E> */ B(LdaKeyedProperty), R(arg0), U8(157),
B(Star), R(0),
/* 966 S> */ B(Ldar), R(arg1),
- /* 972 E> */ B(LdaKeyedProperty), R(arg0), U8(158),
+ /* 972 E> */ B(LdaKeyedProperty), R(arg0), U8(159),
B(Star), R(0),
/* 978 S> */ B(Ldar), R(arg1),
- /* 984 E> */ B(LdaKeyedProperty), R(arg0), U8(160),
+ /* 984 E> */ B(LdaKeyedProperty), R(arg0), U8(161),
B(Star), R(0),
/* 990 S> */ B(Ldar), R(arg1),
- /* 996 E> */ B(LdaKeyedProperty), R(arg0), U8(162),
+ /* 996 E> */ B(LdaKeyedProperty), R(arg0), U8(163),
B(Star), R(0),
/* 1002 S> */ B(Ldar), R(arg1),
- /* 1008 E> */ B(LdaKeyedProperty), R(arg0), U8(164),
+ /* 1008 E> */ B(LdaKeyedProperty), R(arg0), U8(165),
B(Star), R(0),
/* 1014 S> */ B(Ldar), R(arg1),
- /* 1020 E> */ B(LdaKeyedProperty), R(arg0), U8(166),
+ /* 1020 E> */ B(LdaKeyedProperty), R(arg0), U8(167),
B(Star), R(0),
/* 1026 S> */ B(Ldar), R(arg1),
- /* 1032 E> */ B(LdaKeyedProperty), R(arg0), U8(168),
+ /* 1032 E> */ B(LdaKeyedProperty), R(arg0), U8(169),
B(Star), R(0),
/* 1038 S> */ B(Ldar), R(arg1),
- /* 1044 E> */ B(LdaKeyedProperty), R(arg0), U8(170),
+ /* 1044 E> */ B(LdaKeyedProperty), R(arg0), U8(171),
B(Star), R(0),
/* 1050 S> */ B(Ldar), R(arg1),
- /* 1056 E> */ B(LdaKeyedProperty), R(arg0), U8(172),
+ /* 1056 E> */ B(LdaKeyedProperty), R(arg0), U8(173),
B(Star), R(0),
/* 1062 S> */ B(Ldar), R(arg1),
- /* 1068 E> */ B(LdaKeyedProperty), R(arg0), U8(174),
+ /* 1068 E> */ B(LdaKeyedProperty), R(arg0), U8(175),
B(Star), R(0),
/* 1074 S> */ B(Ldar), R(arg1),
- /* 1080 E> */ B(LdaKeyedProperty), R(arg0), U8(176),
+ /* 1080 E> */ B(LdaKeyedProperty), R(arg0), U8(177),
B(Star), R(0),
/* 1086 S> */ B(Ldar), R(arg1),
- /* 1092 E> */ B(LdaKeyedProperty), R(arg0), U8(178),
+ /* 1092 E> */ B(LdaKeyedProperty), R(arg0), U8(179),
B(Star), R(0),
/* 1098 S> */ B(Ldar), R(arg1),
- /* 1104 E> */ B(LdaKeyedProperty), R(arg0), U8(180),
+ /* 1104 E> */ B(LdaKeyedProperty), R(arg0), U8(181),
B(Star), R(0),
/* 1110 S> */ B(Ldar), R(arg1),
- /* 1116 E> */ B(LdaKeyedProperty), R(arg0), U8(182),
+ /* 1116 E> */ B(LdaKeyedProperty), R(arg0), U8(183),
B(Star), R(0),
/* 1122 S> */ B(Ldar), R(arg1),
- /* 1128 E> */ B(LdaKeyedProperty), R(arg0), U8(184),
+ /* 1128 E> */ B(LdaKeyedProperty), R(arg0), U8(185),
B(Star), R(0),
/* 1134 S> */ B(Ldar), R(arg1),
- /* 1140 E> */ B(LdaKeyedProperty), R(arg0), U8(186),
+ /* 1140 E> */ B(LdaKeyedProperty), R(arg0), U8(187),
B(Star), R(0),
/* 1146 S> */ B(Ldar), R(arg1),
- /* 1152 E> */ B(LdaKeyedProperty), R(arg0), U8(188),
+ /* 1152 E> */ B(LdaKeyedProperty), R(arg0), U8(189),
B(Star), R(0),
/* 1158 S> */ B(Ldar), R(arg1),
- /* 1164 E> */ B(LdaKeyedProperty), R(arg0), U8(190),
+ /* 1164 E> */ B(LdaKeyedProperty), R(arg0), U8(191),
B(Star), R(0),
/* 1170 S> */ B(Ldar), R(arg1),
- /* 1176 E> */ B(LdaKeyedProperty), R(arg0), U8(192),
+ /* 1176 E> */ B(LdaKeyedProperty), R(arg0), U8(193),
B(Star), R(0),
/* 1182 S> */ B(Ldar), R(arg1),
- /* 1188 E> */ B(LdaKeyedProperty), R(arg0), U8(194),
+ /* 1188 E> */ B(LdaKeyedProperty), R(arg0), U8(195),
B(Star), R(0),
/* 1194 S> */ B(Ldar), R(arg1),
- /* 1200 E> */ B(LdaKeyedProperty), R(arg0), U8(196),
+ /* 1200 E> */ B(LdaKeyedProperty), R(arg0), U8(197),
B(Star), R(0),
/* 1206 S> */ B(Ldar), R(arg1),
- /* 1212 E> */ B(LdaKeyedProperty), R(arg0), U8(198),
+ /* 1212 E> */ B(LdaKeyedProperty), R(arg0), U8(199),
B(Star), R(0),
/* 1218 S> */ B(Ldar), R(arg1),
- /* 1224 E> */ B(LdaKeyedProperty), R(arg0), U8(200),
+ /* 1224 E> */ B(LdaKeyedProperty), R(arg0), U8(201),
B(Star), R(0),
/* 1230 S> */ B(Ldar), R(arg1),
- /* 1236 E> */ B(LdaKeyedProperty), R(arg0), U8(202),
+ /* 1236 E> */ B(LdaKeyedProperty), R(arg0), U8(203),
B(Star), R(0),
/* 1242 S> */ B(Ldar), R(arg1),
- /* 1248 E> */ B(LdaKeyedProperty), R(arg0), U8(204),
+ /* 1248 E> */ B(LdaKeyedProperty), R(arg0), U8(205),
B(Star), R(0),
/* 1254 S> */ B(Ldar), R(arg1),
- /* 1260 E> */ B(LdaKeyedProperty), R(arg0), U8(206),
+ /* 1260 E> */ B(LdaKeyedProperty), R(arg0), U8(207),
B(Star), R(0),
/* 1266 S> */ B(Ldar), R(arg1),
- /* 1272 E> */ B(LdaKeyedProperty), R(arg0), U8(208),
+ /* 1272 E> */ B(LdaKeyedProperty), R(arg0), U8(209),
B(Star), R(0),
/* 1278 S> */ B(Ldar), R(arg1),
- /* 1284 E> */ B(LdaKeyedProperty), R(arg0), U8(210),
+ /* 1284 E> */ B(LdaKeyedProperty), R(arg0), U8(211),
B(Star), R(0),
/* 1290 S> */ B(Ldar), R(arg1),
- /* 1296 E> */ B(LdaKeyedProperty), R(arg0), U8(212),
+ /* 1296 E> */ B(LdaKeyedProperty), R(arg0), U8(213),
B(Star), R(0),
/* 1302 S> */ B(Ldar), R(arg1),
- /* 1308 E> */ B(LdaKeyedProperty), R(arg0), U8(214),
+ /* 1308 E> */ B(LdaKeyedProperty), R(arg0), U8(215),
B(Star), R(0),
/* 1314 S> */ B(Ldar), R(arg1),
- /* 1320 E> */ B(LdaKeyedProperty), R(arg0), U8(216),
+ /* 1320 E> */ B(LdaKeyedProperty), R(arg0), U8(217),
B(Star), R(0),
/* 1326 S> */ B(Ldar), R(arg1),
- /* 1332 E> */ B(LdaKeyedProperty), R(arg0), U8(218),
+ /* 1332 E> */ B(LdaKeyedProperty), R(arg0), U8(219),
B(Star), R(0),
/* 1338 S> */ B(Ldar), R(arg1),
- /* 1344 E> */ B(LdaKeyedProperty), R(arg0), U8(220),
+ /* 1344 E> */ B(LdaKeyedProperty), R(arg0), U8(221),
B(Star), R(0),
/* 1350 S> */ B(Ldar), R(arg1),
- /* 1356 E> */ B(LdaKeyedProperty), R(arg0), U8(222),
+ /* 1356 E> */ B(LdaKeyedProperty), R(arg0), U8(223),
B(Star), R(0),
/* 1362 S> */ B(Ldar), R(arg1),
- /* 1368 E> */ B(LdaKeyedProperty), R(arg0), U8(224),
+ /* 1368 E> */ B(LdaKeyedProperty), R(arg0), U8(225),
B(Star), R(0),
/* 1374 S> */ B(Ldar), R(arg1),
- /* 1380 E> */ B(LdaKeyedProperty), R(arg0), U8(226),
+ /* 1380 E> */ B(LdaKeyedProperty), R(arg0), U8(227),
B(Star), R(0),
/* 1386 S> */ B(Ldar), R(arg1),
- /* 1392 E> */ B(LdaKeyedProperty), R(arg0), U8(228),
+ /* 1392 E> */ B(LdaKeyedProperty), R(arg0), U8(229),
B(Star), R(0),
/* 1398 S> */ B(Ldar), R(arg1),
- /* 1404 E> */ B(LdaKeyedProperty), R(arg0), U8(230),
+ /* 1404 E> */ B(LdaKeyedProperty), R(arg0), U8(231),
B(Star), R(0),
/* 1410 S> */ B(Ldar), R(arg1),
- /* 1416 E> */ B(LdaKeyedProperty), R(arg0), U8(232),
+ /* 1416 E> */ B(LdaKeyedProperty), R(arg0), U8(233),
B(Star), R(0),
/* 1422 S> */ B(Ldar), R(arg1),
- /* 1428 E> */ B(LdaKeyedProperty), R(arg0), U8(234),
+ /* 1428 E> */ B(LdaKeyedProperty), R(arg0), U8(235),
B(Star), R(0),
/* 1434 S> */ B(Ldar), R(arg1),
- /* 1440 E> */ B(LdaKeyedProperty), R(arg0), U8(236),
+ /* 1440 E> */ B(LdaKeyedProperty), R(arg0), U8(237),
B(Star), R(0),
/* 1446 S> */ B(Ldar), R(arg1),
- /* 1452 E> */ B(LdaKeyedProperty), R(arg0), U8(238),
+ /* 1452 E> */ B(LdaKeyedProperty), R(arg0), U8(239),
B(Star), R(0),
/* 1458 S> */ B(Ldar), R(arg1),
- /* 1464 E> */ B(LdaKeyedProperty), R(arg0), U8(240),
+ /* 1464 E> */ B(LdaKeyedProperty), R(arg0), U8(241),
B(Star), R(0),
/* 1470 S> */ B(Ldar), R(arg1),
- /* 1476 E> */ B(LdaKeyedProperty), R(arg0), U8(242),
+ /* 1476 E> */ B(LdaKeyedProperty), R(arg0), U8(243),
B(Star), R(0),
/* 1482 S> */ B(Ldar), R(arg1),
- /* 1488 E> */ B(LdaKeyedProperty), R(arg0), U8(244),
+ /* 1488 E> */ B(LdaKeyedProperty), R(arg0), U8(245),
B(Star), R(0),
/* 1494 S> */ B(Ldar), R(arg1),
- /* 1500 E> */ B(LdaKeyedProperty), R(arg0), U8(246),
+ /* 1500 E> */ B(LdaKeyedProperty), R(arg0), U8(247),
B(Star), R(0),
/* 1506 S> */ B(Ldar), R(arg1),
- /* 1512 E> */ B(LdaKeyedProperty), R(arg0), U8(248),
+ /* 1512 E> */ B(LdaKeyedProperty), R(arg0), U8(249),
B(Star), R(0),
/* 1518 S> */ B(Ldar), R(arg1),
- /* 1524 E> */ B(LdaKeyedProperty), R(arg0), U8(250),
+ /* 1524 E> */ B(LdaKeyedProperty), R(arg0), U8(251),
B(Star), R(0),
/* 1530 S> */ B(Ldar), R(arg1),
- /* 1536 E> */ B(LdaKeyedProperty), R(arg0), U8(252),
+ /* 1536 E> */ B(LdaKeyedProperty), R(arg0), U8(253),
B(Star), R(0),
/* 1542 S> */ B(Ldar), R(arg1),
- /* 1548 E> */ B(LdaKeyedProperty), R(arg0), U8(254),
+ /* 1548 E> */ B(LdaKeyedProperty), R(arg0), U8(255),
B(Star), R(0),
/* 1554 S> */ B(Ldar), R(arg1),
- /* 1560 E> */ B(Wide), B(LdaKeyedProperty), R16(arg0), U16(256),
+ /* 1560 E> */ B(Wide), B(LdaKeyedProperty), R16(arg0), U16(257),
B(Star), R(0),
/* 1566 S> */ B(Ldar), R(arg1),
- /* 1575 E> */ B(Wide), B(LdaKeyedProperty), R16(arg0), U16(258),
+ /* 1575 E> */ B(Wide), B(LdaKeyedProperty), R16(arg0), U16(259),
/* 1579 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
index 7e6dd5ae78..e3c161ee7a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PropertyStores.golden
@@ -17,7 +17,7 @@ bytecode array length: 9
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(LdaConstant), U8(0),
- /* 23 E> */ B(StaNamedPropertySloppy), R(arg0), U8(1), U8(2),
+ /* 23 E> */ B(StaNamedPropertySloppy), R(arg0), U8(1), U8(3),
B(LdaUndefined),
/* 32 S> */ B(Return),
]
@@ -39,7 +39,7 @@ bytecode array length: 9
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(LdaConstant), U8(0),
- /* 25 E> */ B(StaNamedPropertySloppy), R(arg0), U8(1), U8(2),
+ /* 25 E> */ B(StaNamedPropertySloppy), R(arg0), U8(1), U8(3),
B(LdaUndefined),
/* 34 S> */ B(Return),
]
@@ -63,7 +63,7 @@ bytecodes: [
/* 16 S> */ B(LdaSmi), I8(100),
B(Star), R(1),
B(LdaConstant), U8(0),
- /* 23 E> */ B(StaKeyedPropertySloppy), R(arg0), R(1), U8(2),
+ /* 23 E> */ B(StaKeyedPropertySloppy), R(arg0), R(1), U8(3),
B(LdaUndefined),
/* 32 S> */ B(Return),
]
@@ -84,7 +84,7 @@ bytecode array length: 9
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 19 S> */ B(LdaConstant), U8(0),
- /* 24 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(2),
+ /* 24 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(3),
B(LdaUndefined),
/* 33 S> */ B(Return),
]
@@ -105,8 +105,8 @@ bytecode array length: 12
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 16 S> */ B(LdaSmi), I8(-124),
- /* 26 E> */ B(LdaKeyedProperty), R(arg0), U8(2),
- /* 23 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(4),
+ /* 26 E> */ B(LdaKeyedProperty), R(arg0), U8(3),
+ /* 23 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(5),
B(LdaUndefined),
/* 34 S> */ B(Return),
]
@@ -127,7 +127,7 @@ bytecode array length: 9
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 30 S> */ B(LdaConstant), U8(0),
- /* 37 E> */ B(StaNamedPropertyStrict), R(arg0), U8(1), U8(2),
+ /* 37 E> */ B(StaNamedPropertyStrict), R(arg0), U8(1), U8(3),
B(LdaUndefined),
/* 46 S> */ B(Return),
]
@@ -149,7 +149,7 @@ bytecode array length: 9
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 33 S> */ B(LdaConstant), U8(0),
- /* 38 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(2),
+ /* 38 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(3),
B(LdaUndefined),
/* 47 S> */ B(Return),
]
@@ -300,263 +300,263 @@ bytecode array length: 785
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 18 S> */ B(LdaSmi), I8(1),
- /* 25 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(2),
+ /* 25 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(3),
/* 32 S> */ B(LdaSmi), I8(1),
- /* 39 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(4),
+ /* 39 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(5),
/* 46 S> */ B(LdaSmi), I8(1),
- /* 53 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(6),
+ /* 53 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(7),
/* 60 S> */ B(LdaSmi), I8(1),
- /* 67 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(8),
+ /* 67 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(9),
/* 74 S> */ B(LdaSmi), I8(1),
- /* 81 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(10),
+ /* 81 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(11),
/* 88 S> */ B(LdaSmi), I8(1),
- /* 95 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(12),
+ /* 95 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(13),
/* 102 S> */ B(LdaSmi), I8(1),
- /* 109 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(14),
+ /* 109 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(15),
/* 116 S> */ B(LdaSmi), I8(1),
- /* 123 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(16),
+ /* 123 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(17),
/* 130 S> */ B(LdaSmi), I8(1),
- /* 137 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(18),
+ /* 137 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(19),
/* 144 S> */ B(LdaSmi), I8(1),
- /* 151 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(20),
+ /* 151 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(21),
/* 158 S> */ B(LdaSmi), I8(1),
- /* 165 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(22),
+ /* 165 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(23),
/* 172 S> */ B(LdaSmi), I8(1),
- /* 179 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(24),
+ /* 179 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(25),
/* 186 S> */ B(LdaSmi), I8(1),
- /* 193 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(26),
+ /* 193 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(27),
/* 200 S> */ B(LdaSmi), I8(1),
- /* 207 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(28),
+ /* 207 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(29),
/* 214 S> */ B(LdaSmi), I8(1),
- /* 221 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(30),
+ /* 221 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(31),
/* 228 S> */ B(LdaSmi), I8(1),
- /* 235 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(32),
+ /* 235 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(33),
/* 242 S> */ B(LdaSmi), I8(1),
- /* 249 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(34),
+ /* 249 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(35),
/* 256 S> */ B(LdaSmi), I8(1),
- /* 263 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(36),
+ /* 263 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(37),
/* 270 S> */ B(LdaSmi), I8(1),
- /* 277 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(38),
+ /* 277 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(39),
/* 284 S> */ B(LdaSmi), I8(1),
- /* 291 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(40),
+ /* 291 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(41),
/* 298 S> */ B(LdaSmi), I8(1),
- /* 305 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(42),
+ /* 305 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(43),
/* 312 S> */ B(LdaSmi), I8(1),
- /* 319 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(44),
+ /* 319 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(45),
/* 326 S> */ B(LdaSmi), I8(1),
- /* 333 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(46),
+ /* 333 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(47),
/* 340 S> */ B(LdaSmi), I8(1),
- /* 347 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(48),
+ /* 347 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(49),
/* 354 S> */ B(LdaSmi), I8(1),
- /* 361 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(50),
+ /* 361 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(51),
/* 368 S> */ B(LdaSmi), I8(1),
- /* 375 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(52),
+ /* 375 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(53),
/* 382 S> */ B(LdaSmi), I8(1),
- /* 389 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(54),
+ /* 389 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(55),
/* 396 S> */ B(LdaSmi), I8(1),
- /* 403 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(56),
+ /* 403 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(57),
/* 410 S> */ B(LdaSmi), I8(1),
- /* 417 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(58),
+ /* 417 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(59),
/* 424 S> */ B(LdaSmi), I8(1),
- /* 431 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(60),
+ /* 431 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(61),
/* 438 S> */ B(LdaSmi), I8(1),
- /* 445 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(62),
+ /* 445 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(63),
/* 452 S> */ B(LdaSmi), I8(1),
- /* 459 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(64),
+ /* 459 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(65),
/* 466 S> */ B(LdaSmi), I8(1),
- /* 473 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(66),
+ /* 473 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(67),
/* 480 S> */ B(LdaSmi), I8(1),
- /* 487 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(68),
+ /* 487 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(69),
/* 494 S> */ B(LdaSmi), I8(1),
- /* 501 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(70),
+ /* 501 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(71),
/* 508 S> */ B(LdaSmi), I8(1),
- /* 515 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(72),
+ /* 515 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(73),
/* 522 S> */ B(LdaSmi), I8(1),
- /* 529 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(74),
+ /* 529 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(75),
/* 536 S> */ B(LdaSmi), I8(1),
- /* 543 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(76),
+ /* 543 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(77),
/* 550 S> */ B(LdaSmi), I8(1),
- /* 557 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(78),
+ /* 557 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(79),
/* 564 S> */ B(LdaSmi), I8(1),
- /* 571 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(80),
+ /* 571 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(81),
/* 578 S> */ B(LdaSmi), I8(1),
- /* 585 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(82),
+ /* 585 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(83),
/* 592 S> */ B(LdaSmi), I8(1),
- /* 599 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(84),
+ /* 599 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(85),
/* 606 S> */ B(LdaSmi), I8(1),
- /* 613 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(86),
+ /* 613 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(87),
/* 620 S> */ B(LdaSmi), I8(1),
- /* 627 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(88),
+ /* 627 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(89),
/* 634 S> */ B(LdaSmi), I8(1),
- /* 641 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(90),
+ /* 641 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(91),
/* 648 S> */ B(LdaSmi), I8(1),
- /* 655 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(92),
+ /* 655 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(93),
/* 662 S> */ B(LdaSmi), I8(1),
- /* 669 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(94),
+ /* 669 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(95),
/* 676 S> */ B(LdaSmi), I8(1),
- /* 683 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(96),
+ /* 683 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(97),
/* 690 S> */ B(LdaSmi), I8(1),
- /* 697 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(98),
+ /* 697 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(99),
/* 704 S> */ B(LdaSmi), I8(1),
- /* 711 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(100),
+ /* 711 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(101),
/* 718 S> */ B(LdaSmi), I8(1),
- /* 725 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(102),
+ /* 725 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(103),
/* 732 S> */ B(LdaSmi), I8(1),
- /* 739 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(104),
+ /* 739 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(105),
/* 746 S> */ B(LdaSmi), I8(1),
- /* 753 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(106),
+ /* 753 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(107),
/* 760 S> */ B(LdaSmi), I8(1),
- /* 767 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(108),
+ /* 767 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(109),
/* 774 S> */ B(LdaSmi), I8(1),
- /* 781 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(110),
+ /* 781 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(111),
/* 788 S> */ B(LdaSmi), I8(1),
- /* 795 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(112),
+ /* 795 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(113),
/* 802 S> */ B(LdaSmi), I8(1),
- /* 809 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(114),
+ /* 809 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(115),
/* 816 S> */ B(LdaSmi), I8(1),
- /* 823 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(116),
+ /* 823 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(117),
/* 830 S> */ B(LdaSmi), I8(1),
- /* 837 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(118),
+ /* 837 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(119),
/* 844 S> */ B(LdaSmi), I8(1),
- /* 851 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(120),
+ /* 851 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(121),
/* 858 S> */ B(LdaSmi), I8(1),
- /* 865 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(122),
+ /* 865 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(123),
/* 872 S> */ B(LdaSmi), I8(1),
- /* 879 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(124),
+ /* 879 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(125),
/* 886 S> */ B(LdaSmi), I8(1),
- /* 893 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(126),
+ /* 893 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(127),
/* 900 S> */ B(LdaSmi), I8(1),
- /* 907 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(128),
+ /* 907 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(129),
/* 914 S> */ B(LdaSmi), I8(1),
- /* 921 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(130),
+ /* 921 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(131),
/* 928 S> */ B(LdaSmi), I8(1),
- /* 935 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(132),
+ /* 935 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(133),
/* 942 S> */ B(LdaSmi), I8(1),
- /* 949 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(134),
+ /* 949 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(135),
/* 956 S> */ B(LdaSmi), I8(1),
- /* 963 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(136),
+ /* 963 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(137),
/* 970 S> */ B(LdaSmi), I8(1),
- /* 977 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(138),
+ /* 977 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(139),
/* 984 S> */ B(LdaSmi), I8(1),
- /* 991 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(140),
+ /* 991 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(141),
/* 998 S> */ B(LdaSmi), I8(1),
- /* 1005 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(142),
+ /* 1005 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(143),
/* 1012 S> */ B(LdaSmi), I8(1),
- /* 1019 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(144),
+ /* 1019 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(145),
/* 1026 S> */ B(LdaSmi), I8(1),
- /* 1033 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(146),
+ /* 1033 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(147),
/* 1040 S> */ B(LdaSmi), I8(1),
- /* 1047 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(148),
+ /* 1047 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(149),
/* 1054 S> */ B(LdaSmi), I8(1),
- /* 1061 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(150),
+ /* 1061 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(151),
/* 1068 S> */ B(LdaSmi), I8(1),
- /* 1075 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(152),
+ /* 1075 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(153),
/* 1082 S> */ B(LdaSmi), I8(1),
- /* 1089 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(154),
+ /* 1089 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(155),
/* 1096 S> */ B(LdaSmi), I8(1),
- /* 1103 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(156),
+ /* 1103 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(157),
/* 1110 S> */ B(LdaSmi), I8(1),
- /* 1117 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(158),
+ /* 1117 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(159),
/* 1124 S> */ B(LdaSmi), I8(1),
- /* 1131 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(160),
+ /* 1131 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(161),
/* 1138 S> */ B(LdaSmi), I8(1),
- /* 1145 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(162),
+ /* 1145 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(163),
/* 1152 S> */ B(LdaSmi), I8(1),
- /* 1159 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(164),
+ /* 1159 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(165),
/* 1166 S> */ B(LdaSmi), I8(1),
- /* 1173 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(166),
+ /* 1173 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(167),
/* 1180 S> */ B(LdaSmi), I8(1),
- /* 1187 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(168),
+ /* 1187 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(169),
/* 1194 S> */ B(LdaSmi), I8(1),
- /* 1201 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(170),
+ /* 1201 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(171),
/* 1208 S> */ B(LdaSmi), I8(1),
- /* 1215 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(172),
+ /* 1215 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(173),
/* 1222 S> */ B(LdaSmi), I8(1),
- /* 1229 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(174),
+ /* 1229 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(175),
/* 1236 S> */ B(LdaSmi), I8(1),
- /* 1243 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(176),
+ /* 1243 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(177),
/* 1250 S> */ B(LdaSmi), I8(1),
- /* 1257 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(178),
+ /* 1257 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(179),
/* 1264 S> */ B(LdaSmi), I8(1),
- /* 1271 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(180),
+ /* 1271 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(181),
/* 1278 S> */ B(LdaSmi), I8(1),
- /* 1285 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(182),
+ /* 1285 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(183),
/* 1292 S> */ B(LdaSmi), I8(1),
- /* 1299 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(184),
+ /* 1299 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(185),
/* 1306 S> */ B(LdaSmi), I8(1),
- /* 1313 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(186),
+ /* 1313 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(187),
/* 1320 S> */ B(LdaSmi), I8(1),
- /* 1327 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(188),
+ /* 1327 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(189),
/* 1334 S> */ B(LdaSmi), I8(1),
- /* 1341 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(190),
+ /* 1341 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(191),
/* 1348 S> */ B(LdaSmi), I8(1),
- /* 1355 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(192),
+ /* 1355 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(193),
/* 1362 S> */ B(LdaSmi), I8(1),
- /* 1369 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(194),
+ /* 1369 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(195),
/* 1376 S> */ B(LdaSmi), I8(1),
- /* 1383 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(196),
+ /* 1383 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(197),
/* 1390 S> */ B(LdaSmi), I8(1),
- /* 1397 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(198),
+ /* 1397 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(199),
/* 1404 S> */ B(LdaSmi), I8(1),
- /* 1411 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(200),
+ /* 1411 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(201),
/* 1418 S> */ B(LdaSmi), I8(1),
- /* 1425 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(202),
+ /* 1425 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(203),
/* 1432 S> */ B(LdaSmi), I8(1),
- /* 1439 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(204),
+ /* 1439 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(205),
/* 1446 S> */ B(LdaSmi), I8(1),
- /* 1453 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(206),
+ /* 1453 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(207),
/* 1460 S> */ B(LdaSmi), I8(1),
- /* 1467 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(208),
+ /* 1467 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(209),
/* 1474 S> */ B(LdaSmi), I8(1),
- /* 1481 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(210),
+ /* 1481 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(211),
/* 1488 S> */ B(LdaSmi), I8(1),
- /* 1495 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(212),
+ /* 1495 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(213),
/* 1502 S> */ B(LdaSmi), I8(1),
- /* 1509 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(214),
+ /* 1509 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(215),
/* 1516 S> */ B(LdaSmi), I8(1),
- /* 1523 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(216),
+ /* 1523 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(217),
/* 1530 S> */ B(LdaSmi), I8(1),
- /* 1537 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(218),
+ /* 1537 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(219),
/* 1544 S> */ B(LdaSmi), I8(1),
- /* 1551 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(220),
+ /* 1551 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(221),
/* 1558 S> */ B(LdaSmi), I8(1),
- /* 1565 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(222),
+ /* 1565 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(223),
/* 1572 S> */ B(LdaSmi), I8(1),
- /* 1579 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(224),
+ /* 1579 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(225),
/* 1586 S> */ B(LdaSmi), I8(1),
- /* 1593 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(226),
+ /* 1593 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(227),
/* 1600 S> */ B(LdaSmi), I8(1),
- /* 1607 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(228),
+ /* 1607 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(229),
/* 1614 S> */ B(LdaSmi), I8(1),
- /* 1621 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(230),
+ /* 1621 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(231),
/* 1628 S> */ B(LdaSmi), I8(1),
- /* 1635 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(232),
+ /* 1635 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(233),
/* 1642 S> */ B(LdaSmi), I8(1),
- /* 1649 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(234),
+ /* 1649 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(235),
/* 1656 S> */ B(LdaSmi), I8(1),
- /* 1663 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(236),
+ /* 1663 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(237),
/* 1670 S> */ B(LdaSmi), I8(1),
- /* 1677 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(238),
+ /* 1677 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(239),
/* 1684 S> */ B(LdaSmi), I8(1),
- /* 1691 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(240),
+ /* 1691 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(241),
/* 1698 S> */ B(LdaSmi), I8(1),
- /* 1705 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(242),
+ /* 1705 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(243),
/* 1712 S> */ B(LdaSmi), I8(1),
- /* 1719 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(244),
+ /* 1719 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(245),
/* 1726 S> */ B(LdaSmi), I8(1),
- /* 1733 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(246),
+ /* 1733 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(247),
/* 1740 S> */ B(LdaSmi), I8(1),
- /* 1747 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(248),
+ /* 1747 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(249),
/* 1754 S> */ B(LdaSmi), I8(1),
- /* 1761 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(250),
+ /* 1761 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(251),
/* 1768 S> */ B(LdaSmi), I8(1),
- /* 1775 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(252),
+ /* 1775 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(253),
/* 1782 S> */ B(LdaSmi), I8(1),
- /* 1789 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(254),
+ /* 1789 E> */ B(StaNamedPropertySloppy), R(arg0), U8(0), U8(255),
/* 1796 S> */ B(LdaSmi), I8(1),
- /* 1803 E> */ B(Wide), B(StaNamedPropertySloppy), R16(arg0), U16(0), U16(256),
+ /* 1803 E> */ B(Wide), B(StaNamedPropertySloppy), R16(arg0), U16(0), U16(257),
/* 1810 S> */ B(LdaSmi), I8(2),
- /* 1817 E> */ B(Wide), B(StaNamedPropertySloppy), R16(arg0), U16(0), U16(258),
+ /* 1817 E> */ B(Wide), B(StaNamedPropertySloppy), R16(arg0), U16(0), U16(259),
B(LdaUndefined),
/* 1822 S> */ B(Return),
]
@@ -708,263 +708,263 @@ bytecode array length: 785
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 33 S> */ B(LdaSmi), I8(1),
- /* 40 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(2),
+ /* 40 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(3),
/* 47 S> */ B(LdaSmi), I8(1),
- /* 54 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(4),
+ /* 54 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(5),
/* 61 S> */ B(LdaSmi), I8(1),
- /* 68 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(6),
+ /* 68 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(7),
/* 75 S> */ B(LdaSmi), I8(1),
- /* 82 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(8),
+ /* 82 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(9),
/* 89 S> */ B(LdaSmi), I8(1),
- /* 96 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(10),
+ /* 96 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(11),
/* 103 S> */ B(LdaSmi), I8(1),
- /* 110 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(12),
+ /* 110 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(13),
/* 117 S> */ B(LdaSmi), I8(1),
- /* 124 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(14),
+ /* 124 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(15),
/* 131 S> */ B(LdaSmi), I8(1),
- /* 138 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(16),
+ /* 138 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(17),
/* 145 S> */ B(LdaSmi), I8(1),
- /* 152 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(18),
+ /* 152 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(19),
/* 159 S> */ B(LdaSmi), I8(1),
- /* 166 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(20),
+ /* 166 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(21),
/* 173 S> */ B(LdaSmi), I8(1),
- /* 180 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(22),
+ /* 180 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(23),
/* 187 S> */ B(LdaSmi), I8(1),
- /* 194 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(24),
+ /* 194 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(25),
/* 201 S> */ B(LdaSmi), I8(1),
- /* 208 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(26),
+ /* 208 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(27),
/* 215 S> */ B(LdaSmi), I8(1),
- /* 222 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(28),
+ /* 222 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(29),
/* 229 S> */ B(LdaSmi), I8(1),
- /* 236 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(30),
+ /* 236 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(31),
/* 243 S> */ B(LdaSmi), I8(1),
- /* 250 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(32),
+ /* 250 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(33),
/* 257 S> */ B(LdaSmi), I8(1),
- /* 264 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(34),
+ /* 264 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(35),
/* 271 S> */ B(LdaSmi), I8(1),
- /* 278 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(36),
+ /* 278 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(37),
/* 285 S> */ B(LdaSmi), I8(1),
- /* 292 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(38),
+ /* 292 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(39),
/* 299 S> */ B(LdaSmi), I8(1),
- /* 306 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(40),
+ /* 306 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(41),
/* 313 S> */ B(LdaSmi), I8(1),
- /* 320 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(42),
+ /* 320 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(43),
/* 327 S> */ B(LdaSmi), I8(1),
- /* 334 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(44),
+ /* 334 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(45),
/* 341 S> */ B(LdaSmi), I8(1),
- /* 348 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(46),
+ /* 348 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(47),
/* 355 S> */ B(LdaSmi), I8(1),
- /* 362 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(48),
+ /* 362 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(49),
/* 369 S> */ B(LdaSmi), I8(1),
- /* 376 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(50),
+ /* 376 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(51),
/* 383 S> */ B(LdaSmi), I8(1),
- /* 390 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(52),
+ /* 390 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(53),
/* 397 S> */ B(LdaSmi), I8(1),
- /* 404 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(54),
+ /* 404 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(55),
/* 411 S> */ B(LdaSmi), I8(1),
- /* 418 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(56),
+ /* 418 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(57),
/* 425 S> */ B(LdaSmi), I8(1),
- /* 432 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(58),
+ /* 432 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(59),
/* 439 S> */ B(LdaSmi), I8(1),
- /* 446 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(60),
+ /* 446 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(61),
/* 453 S> */ B(LdaSmi), I8(1),
- /* 460 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(62),
+ /* 460 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(63),
/* 467 S> */ B(LdaSmi), I8(1),
- /* 474 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(64),
+ /* 474 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(65),
/* 481 S> */ B(LdaSmi), I8(1),
- /* 488 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(66),
+ /* 488 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(67),
/* 495 S> */ B(LdaSmi), I8(1),
- /* 502 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(68),
+ /* 502 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(69),
/* 509 S> */ B(LdaSmi), I8(1),
- /* 516 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(70),
+ /* 516 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(71),
/* 523 S> */ B(LdaSmi), I8(1),
- /* 530 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(72),
+ /* 530 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(73),
/* 537 S> */ B(LdaSmi), I8(1),
- /* 544 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(74),
+ /* 544 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(75),
/* 551 S> */ B(LdaSmi), I8(1),
- /* 558 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(76),
+ /* 558 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(77),
/* 565 S> */ B(LdaSmi), I8(1),
- /* 572 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(78),
+ /* 572 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(79),
/* 579 S> */ B(LdaSmi), I8(1),
- /* 586 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(80),
+ /* 586 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(81),
/* 593 S> */ B(LdaSmi), I8(1),
- /* 600 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(82),
+ /* 600 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(83),
/* 607 S> */ B(LdaSmi), I8(1),
- /* 614 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(84),
+ /* 614 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(85),
/* 621 S> */ B(LdaSmi), I8(1),
- /* 628 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(86),
+ /* 628 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(87),
/* 635 S> */ B(LdaSmi), I8(1),
- /* 642 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(88),
+ /* 642 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(89),
/* 649 S> */ B(LdaSmi), I8(1),
- /* 656 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(90),
+ /* 656 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(91),
/* 663 S> */ B(LdaSmi), I8(1),
- /* 670 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(92),
+ /* 670 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(93),
/* 677 S> */ B(LdaSmi), I8(1),
- /* 684 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(94),
+ /* 684 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(95),
/* 691 S> */ B(LdaSmi), I8(1),
- /* 698 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(96),
+ /* 698 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(97),
/* 705 S> */ B(LdaSmi), I8(1),
- /* 712 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(98),
+ /* 712 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(99),
/* 719 S> */ B(LdaSmi), I8(1),
- /* 726 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(100),
+ /* 726 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(101),
/* 733 S> */ B(LdaSmi), I8(1),
- /* 740 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(102),
+ /* 740 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(103),
/* 747 S> */ B(LdaSmi), I8(1),
- /* 754 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(104),
+ /* 754 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(105),
/* 761 S> */ B(LdaSmi), I8(1),
- /* 768 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(106),
+ /* 768 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(107),
/* 775 S> */ B(LdaSmi), I8(1),
- /* 782 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(108),
+ /* 782 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(109),
/* 789 S> */ B(LdaSmi), I8(1),
- /* 796 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(110),
+ /* 796 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(111),
/* 803 S> */ B(LdaSmi), I8(1),
- /* 810 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(112),
+ /* 810 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(113),
/* 817 S> */ B(LdaSmi), I8(1),
- /* 824 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(114),
+ /* 824 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(115),
/* 831 S> */ B(LdaSmi), I8(1),
- /* 838 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(116),
+ /* 838 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(117),
/* 845 S> */ B(LdaSmi), I8(1),
- /* 852 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(118),
+ /* 852 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(119),
/* 859 S> */ B(LdaSmi), I8(1),
- /* 866 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(120),
+ /* 866 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(121),
/* 873 S> */ B(LdaSmi), I8(1),
- /* 880 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(122),
+ /* 880 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(123),
/* 887 S> */ B(LdaSmi), I8(1),
- /* 894 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(124),
+ /* 894 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(125),
/* 901 S> */ B(LdaSmi), I8(1),
- /* 908 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(126),
+ /* 908 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(127),
/* 915 S> */ B(LdaSmi), I8(1),
- /* 922 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(128),
+ /* 922 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(129),
/* 929 S> */ B(LdaSmi), I8(1),
- /* 936 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(130),
+ /* 936 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(131),
/* 943 S> */ B(LdaSmi), I8(1),
- /* 950 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(132),
+ /* 950 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(133),
/* 957 S> */ B(LdaSmi), I8(1),
- /* 964 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(134),
+ /* 964 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(135),
/* 971 S> */ B(LdaSmi), I8(1),
- /* 978 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(136),
+ /* 978 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(137),
/* 985 S> */ B(LdaSmi), I8(1),
- /* 992 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(138),
+ /* 992 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(139),
/* 999 S> */ B(LdaSmi), I8(1),
- /* 1006 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(140),
+ /* 1006 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(141),
/* 1013 S> */ B(LdaSmi), I8(1),
- /* 1020 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(142),
+ /* 1020 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(143),
/* 1027 S> */ B(LdaSmi), I8(1),
- /* 1034 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(144),
+ /* 1034 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(145),
/* 1041 S> */ B(LdaSmi), I8(1),
- /* 1048 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(146),
+ /* 1048 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(147),
/* 1055 S> */ B(LdaSmi), I8(1),
- /* 1062 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(148),
+ /* 1062 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(149),
/* 1069 S> */ B(LdaSmi), I8(1),
- /* 1076 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(150),
+ /* 1076 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(151),
/* 1083 S> */ B(LdaSmi), I8(1),
- /* 1090 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(152),
+ /* 1090 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(153),
/* 1097 S> */ B(LdaSmi), I8(1),
- /* 1104 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(154),
+ /* 1104 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(155),
/* 1111 S> */ B(LdaSmi), I8(1),
- /* 1118 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(156),
+ /* 1118 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(157),
/* 1125 S> */ B(LdaSmi), I8(1),
- /* 1132 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(158),
+ /* 1132 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(159),
/* 1139 S> */ B(LdaSmi), I8(1),
- /* 1146 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(160),
+ /* 1146 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(161),
/* 1153 S> */ B(LdaSmi), I8(1),
- /* 1160 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(162),
+ /* 1160 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(163),
/* 1167 S> */ B(LdaSmi), I8(1),
- /* 1174 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(164),
+ /* 1174 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(165),
/* 1181 S> */ B(LdaSmi), I8(1),
- /* 1188 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(166),
+ /* 1188 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(167),
/* 1195 S> */ B(LdaSmi), I8(1),
- /* 1202 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(168),
+ /* 1202 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(169),
/* 1209 S> */ B(LdaSmi), I8(1),
- /* 1216 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(170),
+ /* 1216 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(171),
/* 1223 S> */ B(LdaSmi), I8(1),
- /* 1230 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(172),
+ /* 1230 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(173),
/* 1237 S> */ B(LdaSmi), I8(1),
- /* 1244 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(174),
+ /* 1244 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(175),
/* 1251 S> */ B(LdaSmi), I8(1),
- /* 1258 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(176),
+ /* 1258 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(177),
/* 1265 S> */ B(LdaSmi), I8(1),
- /* 1272 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(178),
+ /* 1272 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(179),
/* 1279 S> */ B(LdaSmi), I8(1),
- /* 1286 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(180),
+ /* 1286 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(181),
/* 1293 S> */ B(LdaSmi), I8(1),
- /* 1300 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(182),
+ /* 1300 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(183),
/* 1307 S> */ B(LdaSmi), I8(1),
- /* 1314 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(184),
+ /* 1314 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(185),
/* 1321 S> */ B(LdaSmi), I8(1),
- /* 1328 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(186),
+ /* 1328 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(187),
/* 1335 S> */ B(LdaSmi), I8(1),
- /* 1342 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(188),
+ /* 1342 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(189),
/* 1349 S> */ B(LdaSmi), I8(1),
- /* 1356 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(190),
+ /* 1356 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(191),
/* 1363 S> */ B(LdaSmi), I8(1),
- /* 1370 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(192),
+ /* 1370 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(193),
/* 1377 S> */ B(LdaSmi), I8(1),
- /* 1384 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(194),
+ /* 1384 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(195),
/* 1391 S> */ B(LdaSmi), I8(1),
- /* 1398 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(196),
+ /* 1398 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(197),
/* 1405 S> */ B(LdaSmi), I8(1),
- /* 1412 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(198),
+ /* 1412 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(199),
/* 1419 S> */ B(LdaSmi), I8(1),
- /* 1426 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(200),
+ /* 1426 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(201),
/* 1433 S> */ B(LdaSmi), I8(1),
- /* 1440 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(202),
+ /* 1440 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(203),
/* 1447 S> */ B(LdaSmi), I8(1),
- /* 1454 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(204),
+ /* 1454 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(205),
/* 1461 S> */ B(LdaSmi), I8(1),
- /* 1468 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(206),
+ /* 1468 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(207),
/* 1475 S> */ B(LdaSmi), I8(1),
- /* 1482 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(208),
+ /* 1482 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(209),
/* 1489 S> */ B(LdaSmi), I8(1),
- /* 1496 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(210),
+ /* 1496 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(211),
/* 1503 S> */ B(LdaSmi), I8(1),
- /* 1510 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(212),
+ /* 1510 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(213),
/* 1517 S> */ B(LdaSmi), I8(1),
- /* 1524 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(214),
+ /* 1524 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(215),
/* 1531 S> */ B(LdaSmi), I8(1),
- /* 1538 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(216),
+ /* 1538 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(217),
/* 1545 S> */ B(LdaSmi), I8(1),
- /* 1552 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(218),
+ /* 1552 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(219),
/* 1559 S> */ B(LdaSmi), I8(1),
- /* 1566 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(220),
+ /* 1566 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(221),
/* 1573 S> */ B(LdaSmi), I8(1),
- /* 1580 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(222),
+ /* 1580 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(223),
/* 1587 S> */ B(LdaSmi), I8(1),
- /* 1594 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(224),
+ /* 1594 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(225),
/* 1601 S> */ B(LdaSmi), I8(1),
- /* 1608 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(226),
+ /* 1608 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(227),
/* 1615 S> */ B(LdaSmi), I8(1),
- /* 1622 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(228),
+ /* 1622 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(229),
/* 1629 S> */ B(LdaSmi), I8(1),
- /* 1636 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(230),
+ /* 1636 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(231),
/* 1643 S> */ B(LdaSmi), I8(1),
- /* 1650 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(232),
+ /* 1650 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(233),
/* 1657 S> */ B(LdaSmi), I8(1),
- /* 1664 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(234),
+ /* 1664 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(235),
/* 1671 S> */ B(LdaSmi), I8(1),
- /* 1678 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(236),
+ /* 1678 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(237),
/* 1685 S> */ B(LdaSmi), I8(1),
- /* 1692 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(238),
+ /* 1692 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(239),
/* 1699 S> */ B(LdaSmi), I8(1),
- /* 1706 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(240),
+ /* 1706 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(241),
/* 1713 S> */ B(LdaSmi), I8(1),
- /* 1720 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(242),
+ /* 1720 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(243),
/* 1727 S> */ B(LdaSmi), I8(1),
- /* 1734 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(244),
+ /* 1734 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(245),
/* 1741 S> */ B(LdaSmi), I8(1),
- /* 1748 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(246),
+ /* 1748 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(247),
/* 1755 S> */ B(LdaSmi), I8(1),
- /* 1762 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(248),
+ /* 1762 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(249),
/* 1769 S> */ B(LdaSmi), I8(1),
- /* 1776 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(250),
+ /* 1776 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(251),
/* 1783 S> */ B(LdaSmi), I8(1),
- /* 1790 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(252),
+ /* 1790 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(253),
/* 1797 S> */ B(LdaSmi), I8(1),
- /* 1804 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(254),
+ /* 1804 E> */ B(StaNamedPropertyStrict), R(arg0), U8(0), U8(255),
/* 1811 S> */ B(LdaSmi), I8(1),
- /* 1818 E> */ B(Wide), B(StaNamedPropertyStrict), R16(arg0), U16(0), U16(256),
+ /* 1818 E> */ B(Wide), B(StaNamedPropertyStrict), R16(arg0), U16(0), U16(257),
/* 1825 S> */ B(LdaSmi), I8(2),
- /* 1832 E> */ B(Wide), B(StaNamedPropertyStrict), R16(arg0), U16(0), U16(258),
+ /* 1832 E> */ B(Wide), B(StaNamedPropertyStrict), R16(arg0), U16(0), U16(259),
B(LdaUndefined),
/* 1837 S> */ B(Return),
]
@@ -1115,263 +1115,263 @@ bytecode array length: 785
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 21 S> */ B(LdaSmi), I8(1),
- /* 26 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(2),
+ /* 26 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(3),
/* 33 S> */ B(LdaSmi), I8(1),
- /* 38 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(4),
+ /* 38 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(5),
/* 45 S> */ B(LdaSmi), I8(1),
- /* 50 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(6),
+ /* 50 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(7),
/* 57 S> */ B(LdaSmi), I8(1),
- /* 62 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(8),
+ /* 62 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(9),
/* 69 S> */ B(LdaSmi), I8(1),
- /* 74 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(10),
+ /* 74 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(11),
/* 81 S> */ B(LdaSmi), I8(1),
- /* 86 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(12),
+ /* 86 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(13),
/* 93 S> */ B(LdaSmi), I8(1),
- /* 98 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(14),
+ /* 98 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(15),
/* 105 S> */ B(LdaSmi), I8(1),
- /* 110 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(16),
+ /* 110 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(17),
/* 117 S> */ B(LdaSmi), I8(1),
- /* 122 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(18),
+ /* 122 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(19),
/* 129 S> */ B(LdaSmi), I8(1),
- /* 134 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(20),
+ /* 134 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(21),
/* 141 S> */ B(LdaSmi), I8(1),
- /* 146 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(22),
+ /* 146 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(23),
/* 153 S> */ B(LdaSmi), I8(1),
- /* 158 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(24),
+ /* 158 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(25),
/* 165 S> */ B(LdaSmi), I8(1),
- /* 170 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(26),
+ /* 170 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(27),
/* 177 S> */ B(LdaSmi), I8(1),
- /* 182 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(28),
+ /* 182 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(29),
/* 189 S> */ B(LdaSmi), I8(1),
- /* 194 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(30),
+ /* 194 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(31),
/* 201 S> */ B(LdaSmi), I8(1),
- /* 206 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(32),
+ /* 206 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(33),
/* 213 S> */ B(LdaSmi), I8(1),
- /* 218 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(34),
+ /* 218 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(35),
/* 225 S> */ B(LdaSmi), I8(1),
- /* 230 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(36),
+ /* 230 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(37),
/* 237 S> */ B(LdaSmi), I8(1),
- /* 242 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(38),
+ /* 242 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(39),
/* 249 S> */ B(LdaSmi), I8(1),
- /* 254 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(40),
+ /* 254 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(41),
/* 261 S> */ B(LdaSmi), I8(1),
- /* 266 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(42),
+ /* 266 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(43),
/* 273 S> */ B(LdaSmi), I8(1),
- /* 278 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(44),
+ /* 278 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(45),
/* 285 S> */ B(LdaSmi), I8(1),
- /* 290 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(46),
+ /* 290 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(47),
/* 297 S> */ B(LdaSmi), I8(1),
- /* 302 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(48),
+ /* 302 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(49),
/* 309 S> */ B(LdaSmi), I8(1),
- /* 314 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(50),
+ /* 314 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(51),
/* 321 S> */ B(LdaSmi), I8(1),
- /* 326 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(52),
+ /* 326 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(53),
/* 333 S> */ B(LdaSmi), I8(1),
- /* 338 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(54),
+ /* 338 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(55),
/* 345 S> */ B(LdaSmi), I8(1),
- /* 350 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(56),
+ /* 350 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(57),
/* 357 S> */ B(LdaSmi), I8(1),
- /* 362 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(58),
+ /* 362 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(59),
/* 369 S> */ B(LdaSmi), I8(1),
- /* 374 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(60),
+ /* 374 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(61),
/* 381 S> */ B(LdaSmi), I8(1),
- /* 386 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(62),
+ /* 386 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(63),
/* 393 S> */ B(LdaSmi), I8(1),
- /* 398 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(64),
+ /* 398 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(65),
/* 405 S> */ B(LdaSmi), I8(1),
- /* 410 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(66),
+ /* 410 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(67),
/* 417 S> */ B(LdaSmi), I8(1),
- /* 422 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(68),
+ /* 422 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(69),
/* 429 S> */ B(LdaSmi), I8(1),
- /* 434 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(70),
+ /* 434 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(71),
/* 441 S> */ B(LdaSmi), I8(1),
- /* 446 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(72),
+ /* 446 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(73),
/* 453 S> */ B(LdaSmi), I8(1),
- /* 458 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(74),
+ /* 458 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(75),
/* 465 S> */ B(LdaSmi), I8(1),
- /* 470 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(76),
+ /* 470 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(77),
/* 477 S> */ B(LdaSmi), I8(1),
- /* 482 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(78),
+ /* 482 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(79),
/* 489 S> */ B(LdaSmi), I8(1),
- /* 494 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(80),
+ /* 494 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(81),
/* 501 S> */ B(LdaSmi), I8(1),
- /* 506 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(82),
+ /* 506 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(83),
/* 513 S> */ B(LdaSmi), I8(1),
- /* 518 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(84),
+ /* 518 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(85),
/* 525 S> */ B(LdaSmi), I8(1),
- /* 530 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(86),
+ /* 530 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(87),
/* 537 S> */ B(LdaSmi), I8(1),
- /* 542 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(88),
+ /* 542 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(89),
/* 549 S> */ B(LdaSmi), I8(1),
- /* 554 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(90),
+ /* 554 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(91),
/* 561 S> */ B(LdaSmi), I8(1),
- /* 566 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(92),
+ /* 566 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(93),
/* 573 S> */ B(LdaSmi), I8(1),
- /* 578 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(94),
+ /* 578 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(95),
/* 585 S> */ B(LdaSmi), I8(1),
- /* 590 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(96),
+ /* 590 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(97),
/* 597 S> */ B(LdaSmi), I8(1),
- /* 602 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(98),
+ /* 602 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(99),
/* 609 S> */ B(LdaSmi), I8(1),
- /* 614 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(100),
+ /* 614 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(101),
/* 621 S> */ B(LdaSmi), I8(1),
- /* 626 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(102),
+ /* 626 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(103),
/* 633 S> */ B(LdaSmi), I8(1),
- /* 638 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(104),
+ /* 638 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(105),
/* 645 S> */ B(LdaSmi), I8(1),
- /* 650 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(106),
+ /* 650 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(107),
/* 657 S> */ B(LdaSmi), I8(1),
- /* 662 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(108),
+ /* 662 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(109),
/* 669 S> */ B(LdaSmi), I8(1),
- /* 674 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(110),
+ /* 674 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(111),
/* 681 S> */ B(LdaSmi), I8(1),
- /* 686 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(112),
+ /* 686 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(113),
/* 693 S> */ B(LdaSmi), I8(1),
- /* 698 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(114),
+ /* 698 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(115),
/* 705 S> */ B(LdaSmi), I8(1),
- /* 710 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(116),
+ /* 710 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(117),
/* 717 S> */ B(LdaSmi), I8(1),
- /* 722 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(118),
+ /* 722 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(119),
/* 729 S> */ B(LdaSmi), I8(1),
- /* 734 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(120),
+ /* 734 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(121),
/* 741 S> */ B(LdaSmi), I8(1),
- /* 746 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(122),
+ /* 746 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(123),
/* 753 S> */ B(LdaSmi), I8(1),
- /* 758 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(124),
+ /* 758 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(125),
/* 765 S> */ B(LdaSmi), I8(1),
- /* 770 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(126),
+ /* 770 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(127),
/* 777 S> */ B(LdaSmi), I8(1),
- /* 782 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(128),
+ /* 782 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(129),
/* 789 S> */ B(LdaSmi), I8(1),
- /* 794 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(130),
+ /* 794 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(131),
/* 801 S> */ B(LdaSmi), I8(1),
- /* 806 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(132),
+ /* 806 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(133),
/* 813 S> */ B(LdaSmi), I8(1),
- /* 818 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(134),
+ /* 818 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(135),
/* 825 S> */ B(LdaSmi), I8(1),
- /* 830 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(136),
+ /* 830 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(137),
/* 837 S> */ B(LdaSmi), I8(1),
- /* 842 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(138),
+ /* 842 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(139),
/* 849 S> */ B(LdaSmi), I8(1),
- /* 854 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(140),
+ /* 854 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(141),
/* 861 S> */ B(LdaSmi), I8(1),
- /* 866 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(142),
+ /* 866 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(143),
/* 873 S> */ B(LdaSmi), I8(1),
- /* 878 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(144),
+ /* 878 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(145),
/* 885 S> */ B(LdaSmi), I8(1),
- /* 890 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(146),
+ /* 890 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(147),
/* 897 S> */ B(LdaSmi), I8(1),
- /* 902 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(148),
+ /* 902 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(149),
/* 909 S> */ B(LdaSmi), I8(1),
- /* 914 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(150),
+ /* 914 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(151),
/* 921 S> */ B(LdaSmi), I8(1),
- /* 926 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(152),
+ /* 926 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(153),
/* 933 S> */ B(LdaSmi), I8(1),
- /* 938 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(154),
+ /* 938 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(155),
/* 945 S> */ B(LdaSmi), I8(1),
- /* 950 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(156),
+ /* 950 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(157),
/* 957 S> */ B(LdaSmi), I8(1),
- /* 962 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(158),
+ /* 962 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(159),
/* 969 S> */ B(LdaSmi), I8(1),
- /* 974 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(160),
+ /* 974 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(161),
/* 981 S> */ B(LdaSmi), I8(1),
- /* 986 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(162),
+ /* 986 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(163),
/* 993 S> */ B(LdaSmi), I8(1),
- /* 998 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(164),
+ /* 998 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(165),
/* 1005 S> */ B(LdaSmi), I8(1),
- /* 1010 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(166),
+ /* 1010 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(167),
/* 1017 S> */ B(LdaSmi), I8(1),
- /* 1022 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(168),
+ /* 1022 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(169),
/* 1029 S> */ B(LdaSmi), I8(1),
- /* 1034 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(170),
+ /* 1034 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(171),
/* 1041 S> */ B(LdaSmi), I8(1),
- /* 1046 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(172),
+ /* 1046 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(173),
/* 1053 S> */ B(LdaSmi), I8(1),
- /* 1058 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(174),
+ /* 1058 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(175),
/* 1065 S> */ B(LdaSmi), I8(1),
- /* 1070 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(176),
+ /* 1070 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(177),
/* 1077 S> */ B(LdaSmi), I8(1),
- /* 1082 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(178),
+ /* 1082 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(179),
/* 1089 S> */ B(LdaSmi), I8(1),
- /* 1094 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(180),
+ /* 1094 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(181),
/* 1101 S> */ B(LdaSmi), I8(1),
- /* 1106 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(182),
+ /* 1106 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(183),
/* 1113 S> */ B(LdaSmi), I8(1),
- /* 1118 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(184),
+ /* 1118 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(185),
/* 1125 S> */ B(LdaSmi), I8(1),
- /* 1130 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(186),
+ /* 1130 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(187),
/* 1137 S> */ B(LdaSmi), I8(1),
- /* 1142 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(188),
+ /* 1142 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(189),
/* 1149 S> */ B(LdaSmi), I8(1),
- /* 1154 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(190),
+ /* 1154 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(191),
/* 1161 S> */ B(LdaSmi), I8(1),
- /* 1166 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(192),
+ /* 1166 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(193),
/* 1173 S> */ B(LdaSmi), I8(1),
- /* 1178 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(194),
+ /* 1178 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(195),
/* 1185 S> */ B(LdaSmi), I8(1),
- /* 1190 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(196),
+ /* 1190 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(197),
/* 1197 S> */ B(LdaSmi), I8(1),
- /* 1202 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(198),
+ /* 1202 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(199),
/* 1209 S> */ B(LdaSmi), I8(1),
- /* 1214 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(200),
+ /* 1214 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(201),
/* 1221 S> */ B(LdaSmi), I8(1),
- /* 1226 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(202),
+ /* 1226 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(203),
/* 1233 S> */ B(LdaSmi), I8(1),
- /* 1238 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(204),
+ /* 1238 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(205),
/* 1245 S> */ B(LdaSmi), I8(1),
- /* 1250 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(206),
+ /* 1250 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(207),
/* 1257 S> */ B(LdaSmi), I8(1),
- /* 1262 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(208),
+ /* 1262 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(209),
/* 1269 S> */ B(LdaSmi), I8(1),
- /* 1274 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(210),
+ /* 1274 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(211),
/* 1281 S> */ B(LdaSmi), I8(1),
- /* 1286 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(212),
+ /* 1286 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(213),
/* 1293 S> */ B(LdaSmi), I8(1),
- /* 1298 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(214),
+ /* 1298 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(215),
/* 1305 S> */ B(LdaSmi), I8(1),
- /* 1310 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(216),
+ /* 1310 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(217),
/* 1317 S> */ B(LdaSmi), I8(1),
- /* 1322 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(218),
+ /* 1322 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(219),
/* 1329 S> */ B(LdaSmi), I8(1),
- /* 1334 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(220),
+ /* 1334 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(221),
/* 1341 S> */ B(LdaSmi), I8(1),
- /* 1346 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(222),
+ /* 1346 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(223),
/* 1353 S> */ B(LdaSmi), I8(1),
- /* 1358 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(224),
+ /* 1358 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(225),
/* 1365 S> */ B(LdaSmi), I8(1),
- /* 1370 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(226),
+ /* 1370 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(227),
/* 1377 S> */ B(LdaSmi), I8(1),
- /* 1382 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(228),
+ /* 1382 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(229),
/* 1389 S> */ B(LdaSmi), I8(1),
- /* 1394 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(230),
+ /* 1394 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(231),
/* 1401 S> */ B(LdaSmi), I8(1),
- /* 1406 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(232),
+ /* 1406 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(233),
/* 1413 S> */ B(LdaSmi), I8(1),
- /* 1418 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(234),
+ /* 1418 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(235),
/* 1425 S> */ B(LdaSmi), I8(1),
- /* 1430 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(236),
+ /* 1430 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(237),
/* 1437 S> */ B(LdaSmi), I8(1),
- /* 1442 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(238),
+ /* 1442 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(239),
/* 1449 S> */ B(LdaSmi), I8(1),
- /* 1454 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(240),
+ /* 1454 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(241),
/* 1461 S> */ B(LdaSmi), I8(1),
- /* 1466 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(242),
+ /* 1466 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(243),
/* 1473 S> */ B(LdaSmi), I8(1),
- /* 1478 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(244),
+ /* 1478 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(245),
/* 1485 S> */ B(LdaSmi), I8(1),
- /* 1490 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(246),
+ /* 1490 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(247),
/* 1497 S> */ B(LdaSmi), I8(1),
- /* 1502 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(248),
+ /* 1502 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(249),
/* 1509 S> */ B(LdaSmi), I8(1),
- /* 1514 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(250),
+ /* 1514 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(251),
/* 1521 S> */ B(LdaSmi), I8(1),
- /* 1526 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(252),
+ /* 1526 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(253),
/* 1533 S> */ B(LdaSmi), I8(1),
- /* 1538 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(254),
+ /* 1538 E> */ B(StaKeyedPropertySloppy), R(arg0), R(arg1), U8(255),
/* 1545 S> */ B(LdaSmi), I8(1),
- /* 1550 E> */ B(Wide), B(StaKeyedPropertySloppy), R16(arg0), R16(arg1), U16(256),
+ /* 1550 E> */ B(Wide), B(StaKeyedPropertySloppy), R16(arg0), R16(arg1), U16(257),
/* 1557 S> */ B(LdaSmi), I8(2),
- /* 1562 E> */ B(Wide), B(StaKeyedPropertySloppy), R16(arg0), R16(arg1), U16(258),
+ /* 1562 E> */ B(Wide), B(StaKeyedPropertySloppy), R16(arg0), R16(arg1), U16(259),
B(LdaUndefined),
/* 1567 S> */ B(Return),
]
@@ -1522,263 +1522,263 @@ bytecode array length: 785
bytecodes: [
/* 10 E> */ B(StackCheck),
/* 37 S> */ B(LdaSmi), I8(1),
- /* 42 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(2),
+ /* 42 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(3),
/* 49 S> */ B(LdaSmi), I8(1),
- /* 54 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(4),
+ /* 54 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(5),
/* 61 S> */ B(LdaSmi), I8(1),
- /* 66 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(6),
+ /* 66 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(7),
/* 73 S> */ B(LdaSmi), I8(1),
- /* 78 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(8),
+ /* 78 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(9),
/* 85 S> */ B(LdaSmi), I8(1),
- /* 90 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(10),
+ /* 90 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(11),
/* 97 S> */ B(LdaSmi), I8(1),
- /* 102 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(12),
+ /* 102 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(13),
/* 109 S> */ B(LdaSmi), I8(1),
- /* 114 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(14),
+ /* 114 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(15),
/* 121 S> */ B(LdaSmi), I8(1),
- /* 126 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(16),
+ /* 126 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(17),
/* 133 S> */ B(LdaSmi), I8(1),
- /* 138 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(18),
+ /* 138 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(19),
/* 145 S> */ B(LdaSmi), I8(1),
- /* 150 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(20),
+ /* 150 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(21),
/* 157 S> */ B(LdaSmi), I8(1),
- /* 162 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(22),
+ /* 162 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(23),
/* 169 S> */ B(LdaSmi), I8(1),
- /* 174 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(24),
+ /* 174 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(25),
/* 181 S> */ B(LdaSmi), I8(1),
- /* 186 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(26),
+ /* 186 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(27),
/* 193 S> */ B(LdaSmi), I8(1),
- /* 198 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(28),
+ /* 198 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(29),
/* 205 S> */ B(LdaSmi), I8(1),
- /* 210 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(30),
+ /* 210 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(31),
/* 217 S> */ B(LdaSmi), I8(1),
- /* 222 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(32),
+ /* 222 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(33),
/* 229 S> */ B(LdaSmi), I8(1),
- /* 234 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(34),
+ /* 234 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(35),
/* 241 S> */ B(LdaSmi), I8(1),
- /* 246 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(36),
+ /* 246 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(37),
/* 253 S> */ B(LdaSmi), I8(1),
- /* 258 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(38),
+ /* 258 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(39),
/* 265 S> */ B(LdaSmi), I8(1),
- /* 270 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(40),
+ /* 270 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(41),
/* 277 S> */ B(LdaSmi), I8(1),
- /* 282 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(42),
+ /* 282 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(43),
/* 289 S> */ B(LdaSmi), I8(1),
- /* 294 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(44),
+ /* 294 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(45),
/* 301 S> */ B(LdaSmi), I8(1),
- /* 306 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(46),
+ /* 306 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(47),
/* 313 S> */ B(LdaSmi), I8(1),
- /* 318 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(48),
+ /* 318 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(49),
/* 325 S> */ B(LdaSmi), I8(1),
- /* 330 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(50),
+ /* 330 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(51),
/* 337 S> */ B(LdaSmi), I8(1),
- /* 342 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(52),
+ /* 342 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(53),
/* 349 S> */ B(LdaSmi), I8(1),
- /* 354 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(54),
+ /* 354 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(55),
/* 361 S> */ B(LdaSmi), I8(1),
- /* 366 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(56),
+ /* 366 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(57),
/* 373 S> */ B(LdaSmi), I8(1),
- /* 378 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(58),
+ /* 378 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(59),
/* 385 S> */ B(LdaSmi), I8(1),
- /* 390 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(60),
+ /* 390 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(61),
/* 397 S> */ B(LdaSmi), I8(1),
- /* 402 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(62),
+ /* 402 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(63),
/* 409 S> */ B(LdaSmi), I8(1),
- /* 414 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(64),
+ /* 414 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(65),
/* 421 S> */ B(LdaSmi), I8(1),
- /* 426 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(66),
+ /* 426 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(67),
/* 433 S> */ B(LdaSmi), I8(1),
- /* 438 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(68),
+ /* 438 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(69),
/* 445 S> */ B(LdaSmi), I8(1),
- /* 450 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(70),
+ /* 450 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(71),
/* 457 S> */ B(LdaSmi), I8(1),
- /* 462 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(72),
+ /* 462 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(73),
/* 469 S> */ B(LdaSmi), I8(1),
- /* 474 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(74),
+ /* 474 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(75),
/* 481 S> */ B(LdaSmi), I8(1),
- /* 486 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(76),
+ /* 486 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(77),
/* 493 S> */ B(LdaSmi), I8(1),
- /* 498 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(78),
+ /* 498 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(79),
/* 505 S> */ B(LdaSmi), I8(1),
- /* 510 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(80),
+ /* 510 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(81),
/* 517 S> */ B(LdaSmi), I8(1),
- /* 522 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(82),
+ /* 522 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(83),
/* 529 S> */ B(LdaSmi), I8(1),
- /* 534 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(84),
+ /* 534 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(85),
/* 541 S> */ B(LdaSmi), I8(1),
- /* 546 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(86),
+ /* 546 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(87),
/* 553 S> */ B(LdaSmi), I8(1),
- /* 558 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(88),
+ /* 558 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(89),
/* 565 S> */ B(LdaSmi), I8(1),
- /* 570 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(90),
+ /* 570 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(91),
/* 577 S> */ B(LdaSmi), I8(1),
- /* 582 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(92),
+ /* 582 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(93),
/* 589 S> */ B(LdaSmi), I8(1),
- /* 594 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(94),
+ /* 594 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(95),
/* 601 S> */ B(LdaSmi), I8(1),
- /* 606 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(96),
+ /* 606 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(97),
/* 613 S> */ B(LdaSmi), I8(1),
- /* 618 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(98),
+ /* 618 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(99),
/* 625 S> */ B(LdaSmi), I8(1),
- /* 630 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(100),
+ /* 630 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(101),
/* 637 S> */ B(LdaSmi), I8(1),
- /* 642 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(102),
+ /* 642 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(103),
/* 649 S> */ B(LdaSmi), I8(1),
- /* 654 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(104),
+ /* 654 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(105),
/* 661 S> */ B(LdaSmi), I8(1),
- /* 666 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(106),
+ /* 666 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(107),
/* 673 S> */ B(LdaSmi), I8(1),
- /* 678 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(108),
+ /* 678 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(109),
/* 685 S> */ B(LdaSmi), I8(1),
- /* 690 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(110),
+ /* 690 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(111),
/* 697 S> */ B(LdaSmi), I8(1),
- /* 702 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(112),
+ /* 702 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(113),
/* 709 S> */ B(LdaSmi), I8(1),
- /* 714 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(114),
+ /* 714 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(115),
/* 721 S> */ B(LdaSmi), I8(1),
- /* 726 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(116),
+ /* 726 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(117),
/* 733 S> */ B(LdaSmi), I8(1),
- /* 738 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(118),
+ /* 738 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(119),
/* 745 S> */ B(LdaSmi), I8(1),
- /* 750 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(120),
+ /* 750 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(121),
/* 757 S> */ B(LdaSmi), I8(1),
- /* 762 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(122),
+ /* 762 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(123),
/* 769 S> */ B(LdaSmi), I8(1),
- /* 774 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(124),
+ /* 774 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(125),
/* 781 S> */ B(LdaSmi), I8(1),
- /* 786 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(126),
+ /* 786 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(127),
/* 793 S> */ B(LdaSmi), I8(1),
- /* 798 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(128),
+ /* 798 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(129),
/* 805 S> */ B(LdaSmi), I8(1),
- /* 810 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(130),
+ /* 810 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(131),
/* 817 S> */ B(LdaSmi), I8(1),
- /* 822 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(132),
+ /* 822 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(133),
/* 829 S> */ B(LdaSmi), I8(1),
- /* 834 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(134),
+ /* 834 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(135),
/* 841 S> */ B(LdaSmi), I8(1),
- /* 846 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(136),
+ /* 846 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(137),
/* 853 S> */ B(LdaSmi), I8(1),
- /* 858 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(138),
+ /* 858 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(139),
/* 865 S> */ B(LdaSmi), I8(1),
- /* 870 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(140),
+ /* 870 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(141),
/* 877 S> */ B(LdaSmi), I8(1),
- /* 882 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(142),
+ /* 882 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(143),
/* 889 S> */ B(LdaSmi), I8(1),
- /* 894 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(144),
+ /* 894 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(145),
/* 901 S> */ B(LdaSmi), I8(1),
- /* 906 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(146),
+ /* 906 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(147),
/* 913 S> */ B(LdaSmi), I8(1),
- /* 918 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(148),
+ /* 918 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(149),
/* 925 S> */ B(LdaSmi), I8(1),
- /* 930 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(150),
+ /* 930 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(151),
/* 937 S> */ B(LdaSmi), I8(1),
- /* 942 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(152),
+ /* 942 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(153),
/* 949 S> */ B(LdaSmi), I8(1),
- /* 954 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(154),
+ /* 954 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(155),
/* 961 S> */ B(LdaSmi), I8(1),
- /* 966 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(156),
+ /* 966 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(157),
/* 973 S> */ B(LdaSmi), I8(1),
- /* 978 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(158),
+ /* 978 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(159),
/* 985 S> */ B(LdaSmi), I8(1),
- /* 990 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(160),
+ /* 990 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(161),
/* 997 S> */ B(LdaSmi), I8(1),
- /* 1002 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(162),
+ /* 1002 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(163),
/* 1009 S> */ B(LdaSmi), I8(1),
- /* 1014 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(164),
+ /* 1014 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(165),
/* 1021 S> */ B(LdaSmi), I8(1),
- /* 1026 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(166),
+ /* 1026 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(167),
/* 1033 S> */ B(LdaSmi), I8(1),
- /* 1038 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(168),
+ /* 1038 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(169),
/* 1045 S> */ B(LdaSmi), I8(1),
- /* 1050 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(170),
+ /* 1050 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(171),
/* 1057 S> */ B(LdaSmi), I8(1),
- /* 1062 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(172),
+ /* 1062 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(173),
/* 1069 S> */ B(LdaSmi), I8(1),
- /* 1074 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(174),
+ /* 1074 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(175),
/* 1081 S> */ B(LdaSmi), I8(1),
- /* 1086 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(176),
+ /* 1086 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(177),
/* 1093 S> */ B(LdaSmi), I8(1),
- /* 1098 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(178),
+ /* 1098 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(179),
/* 1105 S> */ B(LdaSmi), I8(1),
- /* 1110 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(180),
+ /* 1110 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(181),
/* 1117 S> */ B(LdaSmi), I8(1),
- /* 1122 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(182),
+ /* 1122 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(183),
/* 1129 S> */ B(LdaSmi), I8(1),
- /* 1134 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(184),
+ /* 1134 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(185),
/* 1141 S> */ B(LdaSmi), I8(1),
- /* 1146 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(186),
+ /* 1146 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(187),
/* 1153 S> */ B(LdaSmi), I8(1),
- /* 1158 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(188),
+ /* 1158 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(189),
/* 1165 S> */ B(LdaSmi), I8(1),
- /* 1170 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(190),
+ /* 1170 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(191),
/* 1177 S> */ B(LdaSmi), I8(1),
- /* 1182 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(192),
+ /* 1182 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(193),
/* 1189 S> */ B(LdaSmi), I8(1),
- /* 1194 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(194),
+ /* 1194 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(195),
/* 1201 S> */ B(LdaSmi), I8(1),
- /* 1206 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(196),
+ /* 1206 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(197),
/* 1213 S> */ B(LdaSmi), I8(1),
- /* 1218 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(198),
+ /* 1218 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(199),
/* 1225 S> */ B(LdaSmi), I8(1),
- /* 1230 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(200),
+ /* 1230 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(201),
/* 1237 S> */ B(LdaSmi), I8(1),
- /* 1242 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(202),
+ /* 1242 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(203),
/* 1249 S> */ B(LdaSmi), I8(1),
- /* 1254 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(204),
+ /* 1254 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(205),
/* 1261 S> */ B(LdaSmi), I8(1),
- /* 1266 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(206),
+ /* 1266 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(207),
/* 1273 S> */ B(LdaSmi), I8(1),
- /* 1278 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(208),
+ /* 1278 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(209),
/* 1285 S> */ B(LdaSmi), I8(1),
- /* 1290 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(210),
+ /* 1290 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(211),
/* 1297 S> */ B(LdaSmi), I8(1),
- /* 1302 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(212),
+ /* 1302 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(213),
/* 1309 S> */ B(LdaSmi), I8(1),
- /* 1314 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(214),
+ /* 1314 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(215),
/* 1321 S> */ B(LdaSmi), I8(1),
- /* 1326 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(216),
+ /* 1326 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(217),
/* 1333 S> */ B(LdaSmi), I8(1),
- /* 1338 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(218),
+ /* 1338 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(219),
/* 1345 S> */ B(LdaSmi), I8(1),
- /* 1350 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(220),
+ /* 1350 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(221),
/* 1357 S> */ B(LdaSmi), I8(1),
- /* 1362 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(222),
+ /* 1362 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(223),
/* 1369 S> */ B(LdaSmi), I8(1),
- /* 1374 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(224),
+ /* 1374 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(225),
/* 1381 S> */ B(LdaSmi), I8(1),
- /* 1386 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(226),
+ /* 1386 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(227),
/* 1393 S> */ B(LdaSmi), I8(1),
- /* 1398 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(228),
+ /* 1398 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(229),
/* 1405 S> */ B(LdaSmi), I8(1),
- /* 1410 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(230),
+ /* 1410 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(231),
/* 1417 S> */ B(LdaSmi), I8(1),
- /* 1422 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(232),
+ /* 1422 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(233),
/* 1429 S> */ B(LdaSmi), I8(1),
- /* 1434 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(234),
+ /* 1434 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(235),
/* 1441 S> */ B(LdaSmi), I8(1),
- /* 1446 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(236),
+ /* 1446 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(237),
/* 1453 S> */ B(LdaSmi), I8(1),
- /* 1458 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(238),
+ /* 1458 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(239),
/* 1465 S> */ B(LdaSmi), I8(1),
- /* 1470 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(240),
+ /* 1470 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(241),
/* 1477 S> */ B(LdaSmi), I8(1),
- /* 1482 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(242),
+ /* 1482 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(243),
/* 1489 S> */ B(LdaSmi), I8(1),
- /* 1494 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(244),
+ /* 1494 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(245),
/* 1501 S> */ B(LdaSmi), I8(1),
- /* 1506 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(246),
+ /* 1506 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(247),
/* 1513 S> */ B(LdaSmi), I8(1),
- /* 1518 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(248),
+ /* 1518 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(249),
/* 1525 S> */ B(LdaSmi), I8(1),
- /* 1530 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(250),
+ /* 1530 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(251),
/* 1537 S> */ B(LdaSmi), I8(1),
- /* 1542 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(252),
+ /* 1542 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(253),
/* 1549 S> */ B(LdaSmi), I8(1),
- /* 1554 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(254),
+ /* 1554 E> */ B(StaKeyedPropertyStrict), R(arg0), R(arg1), U8(255),
/* 1561 S> */ B(LdaSmi), I8(1),
- /* 1566 E> */ B(Wide), B(StaKeyedPropertyStrict), R16(arg0), R16(arg1), U16(256),
+ /* 1566 E> */ B(Wide), B(StaKeyedPropertyStrict), R16(arg0), R16(arg1), U16(257),
/* 1573 S> */ B(LdaSmi), I8(2),
- /* 1578 E> */ B(Wide), B(StaKeyedPropertyStrict), R16(arg0), R16(arg1), U16(258),
+ /* 1578 E> */ B(Wide), B(StaKeyedPropertyStrict), R16(arg0), R16(arg1), U16(259),
B(LdaUndefined),
/* 1583 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
index 4c98cea746..18c9f2ce21 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiterals.golden
@@ -14,7 +14,7 @@ parameter count: 1
bytecode array length: 6
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateRegExpLiteral), U8(0), U8(2), U8(0),
+ /* 34 S> */ B(CreateRegExpLiteral), U8(0), U8(3), U8(0),
/* 49 S> */ B(Return),
]
constant pool: [
@@ -32,7 +32,7 @@ parameter count: 1
bytecode array length: 6
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateRegExpLiteral), U8(0), U8(2), U8(2),
+ /* 34 S> */ B(CreateRegExpLiteral), U8(0), U8(3), U8(2),
/* 58 S> */ B(Return),
]
constant pool: [
@@ -50,13 +50,13 @@ parameter count: 1
bytecode array length: 23
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateRegExpLiteral), U8(0), U8(4), U8(0),
+ /* 34 S> */ B(CreateRegExpLiteral), U8(0), U8(5), U8(0),
B(Star), R(1),
- /* 47 E> */ B(LdaNamedProperty), R(1), U8(1), U8(5),
+ /* 47 E> */ B(LdaNamedProperty), R(1), U8(1), U8(6),
B(Star), R(0),
B(LdaConstant), U8(2),
B(Star), R(2),
- /* 48 E> */ B(CallProperty1), R(0), R(1), R(2), U8(2),
+ /* 48 E> */ B(CallProperty1), R(0), R(1), R(2), U8(3),
/* 62 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden
index 2fcd4b1188..a2f7ef5c20 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/RegExpLiteralsWide.golden
@@ -783,7 +783,7 @@ bytecodes: [
B(Star), R(0),
/* 2591 S> */ B(LdaConstant), U8(255),
B(Star), R(0),
- /* 2601 S> */ B(Wide), B(CreateRegExpLiteral), U16(256), U16(2), U8(0),
+ /* 2601 S> */ B(Wide), B(CreateRegExpLiteral), U16(256), U16(3), U8(0),
/* 2616 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden
index 4fb7fbe420..6df03300b4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/RemoveRedundantLdar.golden
@@ -23,10 +23,10 @@ bytecodes: [
B(Star), R(0),
/* 48 E> */ B(StackCheck),
/* 64 S> */ B(Ldar), R(0),
- /* 76 E> */ B(Add), R(0), U8(2),
+ /* 76 E> */ B(Add), R(0), U8(3),
B(Star), R(0),
/* 86 S> */ B(LdaSmi), I8(10),
- /* 95 E> */ B(TestGreaterThan), R(0), U8(3),
+ /* 95 E> */ B(TestGreaterThan), R(0), U8(4),
B(JumpIfFalse), U8(4),
/* 101 S> */ B(Jump), U8(5),
B(JumpLoop), U8(17), I8(0),
@@ -56,10 +56,10 @@ bytecodes: [
B(Star), R(0),
/* 48 E> */ B(StackCheck),
/* 55 S> */ B(Nop),
- /* 67 E> */ B(Add), R(0), U8(2),
+ /* 67 E> */ B(Add), R(0), U8(3),
B(Star), R(0),
/* 77 S> */ B(LdaSmi), I8(10),
- /* 86 E> */ B(TestGreaterThan), R(0), U8(3),
+ /* 86 E> */ B(TestGreaterThan), R(0), U8(4),
B(JumpIfFalse), U8(4),
/* 92 S> */ B(Jump), U8(2),
/* 118 S> */ B(Ldar), R(0),
@@ -84,7 +84,7 @@ bytecodes: [
/* 45 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
/* 50 S> */ B(Nop),
- /* 62 E> */ B(Add), R(0), U8(2),
+ /* 62 E> */ B(Add), R(0), U8(3),
B(Star), R(0),
/* 72 S> */ B(Nop),
/* 85 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
new file mode 100644
index 0000000000..12f4e99cc3
--- /dev/null
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StandardForLoop.golden
@@ -0,0 +1,1070 @@
+#
+# Autogenerated by generate-bytecode-expectations.
+#
+
+---
+wrap: no
+test function name: f
+
+---
+snippet: "
+ function f() {
+ for (let x = 0; x < 10; ++x) { let y = x; }
+ }
+ f();
+"
+frame size: 2
+parameter count: 1
+bytecode array length: 26
+bytecodes: [
+ /* 10 E> */ B(StackCheck),
+ /* 30 S> */ B(LdaZero),
+ B(Star), R(1),
+ /* 35 S> */ B(LdaSmi), I8(10),
+ /* 35 E> */ B(TestLessThan), R(1), U8(3),
+ B(JumpIfFalse), U8(15),
+ /* 17 E> */ B(StackCheck),
+ /* 56 S> */ B(Mov), R(1), R(0),
+ /* 43 S> */ B(Ldar), R(0),
+ B(Inc), U8(4),
+ B(Star), R(1),
+ B(JumpLoop), U8(17), I8(0),
+ B(LdaUndefined),
+ /* 61 S> */ B(Return),
+]
+constant pool: [
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f() {
+ for (let x = 0; x < 10; ++x) { eval('1'); }
+ }
+ f();
+"
+frame size: 14
+parameter count: 1
+bytecode array length: 168
+bytecodes: [
+ B(CreateFunctionContext), U8(3),
+ B(PushContext), R(3),
+ B(Ldar), R(this),
+ B(StaCurrentContextSlot), U8(4),
+ B(CreateMappedArguments),
+ B(StaCurrentContextSlot), U8(6),
+ B(Ldar), R(new_target),
+ B(StaCurrentContextSlot), U8(5),
+ /* 10 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(4),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ /* 30 S> */ B(LdaZero),
+ /* 30 E> */ B(StaCurrentContextSlot), U8(4),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(0),
+ B(LdaSmi), I8(1),
+ B(Star), R(1),
+ /* 59 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(1),
+ B(PushContext), R(5),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(Ldar), R(0),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaSmi), I8(1),
+ B(TestEqual), R(1), U8(3),
+ B(JumpIfFalse), U8(7),
+ B(LdaZero),
+ B(Star), R(1),
+ B(Jump), U8(8),
+ /* 43 S> */ B(LdaCurrentContextSlot), U8(4),
+ B(Inc), U8(4),
+ /* 43 E> */ B(StaCurrentContextSlot), U8(4),
+ B(LdaSmi), I8(1),
+ B(Star), R(2),
+ /* 35 S> */ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(6),
+ B(LdaSmi), I8(10),
+ /* 35 E> */ B(TestLessThan), R(6), U8(5),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(6),
+ B(PopContext), R(5),
+ B(Jump), U8(77),
+ B(LdaSmi), I8(1),
+ B(TestEqual), R(2), U8(6),
+ B(JumpIfFalse), U8(54),
+ /* 17 E> */ B(StackCheck),
+ /* 48 S> */ B(LdaLookupGlobalSlot), U8(2), U8(9), U8(1),
+ B(Star), R(6),
+ B(LdaConstant), U8(3),
+ B(Star), R(7),
+ B(LdaZero),
+ B(Star), R(11),
+ B(LdaSmi), I8(31),
+ B(Star), R(12),
+ B(LdaSmi), I8(48),
+ B(Star), R(13),
+ B(Mov), R(6), R(8),
+ B(Mov), R(7), R(9),
+ B(Mov), R(closure), R(10),
+ B(CallRuntime), U16(Runtime::kResolvePossiblyDirectEval), R(8), U8(6),
+ B(Star), R(6),
+ /* 48 E> */ B(CallUndefinedReceiver1), R(6), R(7), U8(7),
+ B(LdaZero),
+ B(Star), R(2),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(0),
+ B(JumpLoop), U8(56), I8(1),
+ B(LdaSmi), I8(1),
+ /* 59 E> */ B(TestEqual), R(2), U8(11),
+ B(JumpIfFalse), U8(6),
+ B(PopContext), R(5),
+ B(Jump), U8(7),
+ B(PopContext), R(5),
+ B(JumpLoop), U8(125), I8(0),
+ B(PopContext), R(4),
+ B(LdaUndefined),
+ /* 61 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["eval"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["1"],
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f() {
+ for (let x = 0; x < 10; ++x) { (function() { return x; })(); }
+ }
+ f();
+"
+frame size: 6
+parameter count: 1
+bytecode array length: 108
+bytecodes: [
+ /* 10 E> */ B(StackCheck),
+ /* 30 S> */ B(LdaZero),
+ B(Star), R(0),
+ B(Star), R(1),
+ B(LdaSmi), I8(1),
+ B(Star), R(2),
+ /* 78 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(4),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(Ldar), R(1),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaSmi), I8(1),
+ B(TestEqual), R(2), U8(3),
+ B(JumpIfFalse), U8(7),
+ B(LdaZero),
+ B(Star), R(2),
+ B(Jump), U8(8),
+ /* 43 S> */ B(LdaCurrentContextSlot), U8(4),
+ B(Inc), U8(4),
+ /* 43 E> */ B(StaCurrentContextSlot), U8(4),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ /* 35 S> */ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(5),
+ B(LdaSmi), I8(10),
+ /* 35 E> */ B(TestLessThan), R(5), U8(5),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(6),
+ B(PopContext), R(4),
+ B(Jump), U8(45),
+ B(LdaSmi), I8(1),
+ B(TestEqual), R(3), U8(6),
+ B(JumpIfFalse), U8(22),
+ /* 17 E> */ B(StackCheck),
+ /* 48 S> */ B(CreateClosure), U8(1), U8(9), U8(2),
+ B(Star), R(5),
+ /* 74 E> */ B(CallUndefinedReceiver0), R(5), U8(7),
+ B(LdaZero),
+ B(Star), R(3),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(1),
+ B(JumpLoop), U8(24), I8(1),
+ B(LdaSmi), I8(1),
+ /* 78 E> */ B(TestEqual), R(3), U8(10),
+ B(JumpIfFalse), U8(6),
+ B(PopContext), R(4),
+ B(Jump), U8(7),
+ B(PopContext), R(4),
+ B(JumpLoop), U8(93), I8(0),
+ B(LdaUndefined),
+ /* 80 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+]
+handlers: [
+]
+
+---
+snippet: "
+ function f() {
+ for (let { x, y } = { x: 0, y: 3 }; y > 0; --y) { let z = x + y; }
+ }
+ f();
+"
+frame size: 6
+parameter count: 1
+bytecode array length: 68
+bytecodes: [
+ /* 10 E> */ B(StackCheck),
+ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(4),
+ B(Mov), R(4), R(3),
+ B(Ldar), R(3),
+ B(JumpIfUndefined), U8(6),
+ B(Ldar), R(3),
+ B(JumpIfNotNull), U8(16),
+ B(LdaSmi), I8(61),
+ B(Star), R(4),
+ B(LdaConstant), U8(1),
+ B(Star), R(5),
+ B(CallRuntime), U16(Runtime::kNewTypeError), R(4), U8(2),
+ B(Throw),
+ /* 28 S> */ B(LdaNamedProperty), R(3), U8(2), U8(6),
+ B(Star), R(1),
+ /* 31 S> */ B(LdaNamedProperty), R(3), U8(3), U8(8),
+ B(Star), R(2),
+ /* 55 S> */ B(LdaZero),
+ /* 55 E> */ B(TestGreaterThan), R(2), U8(10),
+ B(JumpIfFalse), U8(19),
+ /* 17 E> */ B(StackCheck),
+ /* 77 S> */ B(Ldar), R(2),
+ /* 77 E> */ B(Add), R(1), U8(12),
+ B(Star), R(0),
+ /* 62 S> */ B(Ldar), R(2),
+ B(Dec), U8(11),
+ B(Star), R(2),
+ B(JumpLoop), U8(20), I8(0),
+ B(LdaUndefined),
+ /* 84 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [""],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["x"],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE ["y"],
+]
+handlers: [
+]
+
+---
+snippet: "
+ function* f() {
+ for (let x = 0; x < 10; ++x) { let y = x; }
+ }
+ f();
+"
+frame size: 11
+parameter count: 1
+bytecode array length: 331
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
+ B(ResumeGenerator), R(new_target),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
+ B(LdaSmi), I8(79),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(LdaSmi), I8(-2),
+ B(Star), R(0),
+ B(CreateFunctionContext), U8(4),
+ B(PushContext), R(2),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
+ B(StaCurrentContextSlot), U8(4),
+ /* 11 E> */ B(StackCheck),
+ B(Mov), R(context), R(5),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(6),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(7),
+ B(LdaZero),
+ /* 11 E> */ B(SuspendGenerator), R(6), U8(0),
+ B(Ldar), R(7),
+ /* 62 S> */ B(Return),
+ B(LdaSmi), I8(-2),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(6), U8(1),
+ B(Star), R(7),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(6), U8(1),
+ B(Star), R(8),
+ B(LdaZero),
+ B(TestEqualStrictNoFeedback), R(8),
+ B(JumpIfTrue), U8(28),
+ B(LdaSmi), I8(2),
+ B(TestEqualStrictNoFeedback), R(8),
+ B(JumpIfTrue), U8(19),
+ B(LdaTrue),
+ B(Star), R(10),
+ B(Mov), R(7), R(9),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(9), U8(2),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(192),
+ B(Ldar), R(7),
+ /* 11 E> */ B(Throw),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(1),
+ B(PushContext), R(6),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ /* 31 S> */ B(LdaZero),
+ /* 31 E> */ B(StaCurrentContextSlot), U8(4),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ /* 60 E> */ B(StaContextSlot), R(6), U8(5), U8(0),
+ B(LdaSmi), I8(1),
+ B(StaContextSlot), R(6), U8(6), U8(0),
+ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(2),
+ B(PushContext), R(7),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaContextSlot), R(6), U8(5), U8(0),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaContextSlot), R(6), U8(6), U8(0),
+ B(Star), R(8),
+ B(LdaSmi), I8(1),
+ B(TestEqual), R(8), U8(3),
+ B(JumpIfFalse), U8(9),
+ B(LdaZero),
+ B(StaContextSlot), R(6), U8(6), U8(0),
+ B(Jump), U8(8),
+ /* 44 S> */ B(LdaCurrentContextSlot), U8(4),
+ B(Inc), U8(4),
+ /* 44 E> */ B(StaCurrentContextSlot), U8(4),
+ B(LdaSmi), I8(1),
+ B(StaContextSlot), R(6), U8(7), U8(0),
+ /* 36 S> */ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(8),
+ B(LdaSmi), I8(10),
+ /* 36 E> */ B(TestLessThan), R(8), U8(5),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(6),
+ B(PopContext), R(7),
+ B(Jump), U8(69),
+ B(LdaContextSlot), R(6), U8(7), U8(0),
+ B(Star), R(8),
+ B(LdaSmi), I8(1),
+ B(TestEqual), R(8), U8(6),
+ B(JumpIfFalse), U8(34),
+ /* 18 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(3),
+ B(PushContext), R(8),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ /* 57 S> */ B(LdaContextSlot), R(8), U8(4), U8(0),
+ /* 57 E> */ B(StaCurrentContextSlot), U8(4),
+ B(PopContext), R(8),
+ B(LdaZero),
+ B(StaContextSlot), R(6), U8(7), U8(0),
+ B(LdaCurrentContextSlot), U8(4),
+ /* 60 E> */ B(StaContextSlot), R(6), U8(5), U8(0),
+ B(JumpLoop), U8(42), I8(1),
+ B(LdaContextSlot), R(6), U8(7), U8(0),
+ B(Star), R(8),
+ B(LdaSmi), I8(1),
+ B(TestEqual), R(8), U8(7),
+ B(JumpIfFalse), U8(6),
+ B(PopContext), R(7),
+ B(Jump), U8(7),
+ B(PopContext), R(7),
+ B(JumpLoop), U8(129), I8(0),
+ B(PopContext), R(6),
+ B(LdaUndefined),
+ B(Star), R(6),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(6), U8(2),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(14),
+ B(LdaSmi), I8(-1),
+ B(Star), R(3),
+ B(Jump), U8(8),
+ B(Star), R(4),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(5),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(6), U8(1),
+ B(Ldar), R(5),
+ B(SetPendingMessage),
+ B(Ldar), R(3),
+ B(SwitchOnSmiNoFeedback), U8(4), U8(2), I8(0),
+ B(Jump), U8(8),
+ B(Ldar), R(4),
+ /* 62 S> */ B(Return),
+ B(Ldar), R(4),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 62 S> */ B(Return),
+]
+constant pool: [
+ Smi [52],
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ Smi [6],
+ Smi [9],
+]
+handlers: [
+ [51, 288, 294],
+]
+
+---
+snippet: "
+ function* f() {
+ for (let x = 0; x < 10; ++x) yield x;
+ }
+ f();
+"
+frame size: 13
+parameter count: 1
+bytecode array length: 438
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
+ B(ResumeGenerator), R(new_target),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(2), I8(0),
+ B(LdaSmi), I8(79),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(LdaSmi), I8(-2),
+ B(Star), R(0),
+ B(CreateFunctionContext), U8(4),
+ B(PushContext), R(2),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
+ B(StaCurrentContextSlot), U8(4),
+ /* 11 E> */ B(StackCheck),
+ B(Mov), R(context), R(5),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(6),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(7),
+ B(LdaZero),
+ /* 11 E> */ B(SuspendGenerator), R(6), U8(0),
+ B(Ldar), R(7),
+ /* 56 S> */ B(Return),
+ B(LdaSmi), I8(-2),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(6), U8(1),
+ B(Star), R(7),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(6), U8(1),
+ B(Star), R(8),
+ B(LdaZero),
+ B(TestEqualStrictNoFeedback), R(8),
+ B(JumpIfTrue), U8(28),
+ B(LdaSmi), I8(2),
+ B(TestEqualStrictNoFeedback), R(8),
+ B(JumpIfTrue), U8(19),
+ B(LdaTrue),
+ B(Star), R(10),
+ B(Mov), R(7), R(9),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(9), U8(2),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(JumpConstant), U8(6),
+ B(Ldar), R(7),
+ /* 11 E> */ B(Throw),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(2),
+ B(PushContext), R(6),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ /* 31 S> */ B(LdaZero),
+ /* 31 E> */ B(StaCurrentContextSlot), U8(4),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ /* 54 E> */ B(StaContextSlot), R(6), U8(5), U8(0),
+ B(LdaSmi), I8(1),
+ B(StaContextSlot), R(6), U8(6), U8(0),
+ B(Ldar), R(0),
+ B(SwitchOnSmiNoFeedback), U8(3), U8(1), I8(1),
+ B(LdaSmi), I8(-2),
+ B(TestEqualStrictNoFeedback), R(0),
+ B(JumpIfTrue), U8(11),
+ B(LdaSmi), I8(79),
+ B(Star), R(7),
+ B(CallRuntime), U16(Runtime::kAbort), R(7), U8(1),
+ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(4),
+ B(PushContext), R(7),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaContextSlot), R(6), U8(5), U8(0),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaContextSlot), R(6), U8(6), U8(0),
+ B(Star), R(8),
+ B(LdaSmi), I8(1),
+ B(TestEqual), R(8), U8(3),
+ B(JumpIfFalse), U8(9),
+ B(LdaZero),
+ B(StaContextSlot), R(6), U8(6), U8(0),
+ B(Jump), U8(8),
+ /* 44 S> */ B(LdaCurrentContextSlot), U8(4),
+ B(Inc), U8(4),
+ /* 44 E> */ B(StaCurrentContextSlot), U8(4),
+ B(LdaSmi), I8(1),
+ B(StaContextSlot), R(6), U8(7), U8(0),
+ /* 36 S> */ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(8),
+ B(LdaSmi), I8(10),
+ /* 36 E> */ B(TestLessThan), R(8), U8(5),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(6),
+ B(PopContext), R(7),
+ B(Jump), U8(155),
+ B(Ldar), R(0),
+ B(SwitchOnSmiNoFeedback), U8(5), U8(1), I8(1),
+ B(LdaSmi), I8(-2),
+ B(TestEqualStrictNoFeedback), R(0),
+ B(JumpIfTrue), U8(11),
+ B(LdaSmi), I8(79),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kAbort), R(8), U8(1),
+ B(LdaContextSlot), R(6), U8(7), U8(0),
+ B(Star), R(8),
+ B(LdaSmi), I8(1),
+ B(TestEqual), R(8), U8(6),
+ B(JumpIfFalse), U8(99),
+ /* 18 E> */ B(StackCheck),
+ /* 47 S> */ B(LdaImmutableContextSlot), R(6), U8(4), U8(0),
+ B(Star), R(8),
+ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(9),
+ B(LdaFalse),
+ B(Star), R(10),
+ /* 53 E> */ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(9), U8(2),
+ B(Star), R(9),
+ B(LdaSmi), I8(1),
+ B(SuspendGenerator), R(8), U8(0),
+ B(Ldar), R(9),
+ /* 56 S> */ B(Return),
+ B(LdaSmi), I8(-2),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(8), U8(1),
+ B(Star), R(9),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(8), U8(1),
+ B(Star), R(10),
+ B(LdaZero),
+ B(TestEqualStrictNoFeedback), R(10),
+ B(JumpIfTrue), U8(36),
+ B(LdaSmi), I8(2),
+ B(TestEqualStrictNoFeedback), R(10),
+ B(JumpIfTrue), U8(27),
+ B(LdaTrue),
+ B(Star), R(12),
+ B(Mov), R(9), R(11),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(11), U8(2),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(6),
+ B(PopContext), R(6),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(72),
+ B(Ldar), R(9),
+ /* 47 E> */ B(Throw),
+ B(LdaZero),
+ B(StaContextSlot), R(6), U8(7), U8(0),
+ B(LdaCurrentContextSlot), U8(4),
+ /* 54 E> */ B(StaContextSlot), R(6), U8(5), U8(0),
+ B(JumpLoop), U8(128), I8(1),
+ B(LdaContextSlot), R(6), U8(7), U8(0),
+ B(Star), R(8),
+ B(LdaSmi), I8(1),
+ B(TestEqual), R(8), U8(7),
+ B(JumpIfFalse), U8(6),
+ B(PopContext), R(7),
+ B(Jump), U8(7),
+ B(PopContext), R(7),
+ B(JumpLoop), U8(236), I8(0),
+ B(PopContext), R(6),
+ B(LdaUndefined),
+ B(Star), R(6),
+ B(LdaTrue),
+ B(Star), R(7),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(6), U8(2),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(14),
+ B(LdaSmi), I8(-1),
+ B(Star), R(3),
+ B(Jump), U8(8),
+ B(Star), R(4),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(5),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(6),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorClose), R(6), U8(1),
+ B(Ldar), R(5),
+ B(SetPendingMessage),
+ B(Ldar), R(3),
+ B(SwitchOnSmiNoFeedback), U8(7), U8(2), I8(0),
+ B(Jump), U8(8),
+ B(Ldar), R(4),
+ /* 56 S> */ B(Return),
+ B(Ldar), R(4),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 56 S> */ B(Return),
+]
+constant pool: [
+ Smi [52],
+ Smi [123],
+ FIXED_ARRAY_TYPE,
+ Smi [84],
+ FIXED_ARRAY_TYPE,
+ Smi [60],
+ Smi [299],
+ Smi [6],
+ Smi [9],
+]
+handlers: [
+ [51, 395, 401],
+]
+
+---
+snippet: "
+ async function f() {
+ for (let x = 0; x < 10; ++x) { let y = x; }
+ }
+ f();
+"
+frame size: 10
+parameter count: 1
+bytecode array length: 300
+bytecodes: [
+ B(CreateFunctionContext), U8(5),
+ B(PushContext), R(0),
+ /* 16 E> */ B(StackCheck),
+ B(LdaUndefined),
+ B(Star), R(1),
+ B(CallJSRuntime), U8(%async_function_promise_create), R(1), U8(1),
+ B(StaCurrentContextSlot), U8(8),
+ B(Mov), R(context), R(3),
+ B(Mov), R(context), R(4),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(0),
+ B(PushContext), R(5),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ /* 36 S> */ B(LdaZero),
+ /* 36 E> */ B(StaCurrentContextSlot), U8(4),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ /* 65 E> */ B(StaContextSlot), R(5), U8(5), U8(0),
+ B(LdaSmi), I8(1),
+ B(StaContextSlot), R(5), U8(6), U8(0),
+ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(1),
+ B(PushContext), R(6),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaContextSlot), R(5), U8(5), U8(0),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaContextSlot), R(5), U8(6), U8(0),
+ B(Star), R(7),
+ B(LdaSmi), I8(1),
+ B(TestEqual), R(7), U8(3),
+ B(JumpIfFalse), U8(9),
+ B(LdaZero),
+ B(StaContextSlot), R(5), U8(6), U8(0),
+ B(Jump), U8(8),
+ /* 49 S> */ B(LdaCurrentContextSlot), U8(4),
+ B(Inc), U8(4),
+ /* 49 E> */ B(StaCurrentContextSlot), U8(4),
+ B(LdaSmi), I8(1),
+ B(StaContextSlot), R(5), U8(7), U8(0),
+ /* 41 S> */ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(7),
+ B(LdaSmi), I8(10),
+ /* 41 E> */ B(TestLessThan), R(7), U8(5),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(6),
+ B(PopContext), R(6),
+ B(Jump), U8(69),
+ B(LdaContextSlot), R(5), U8(7), U8(0),
+ B(Star), R(7),
+ B(LdaSmi), I8(1),
+ B(TestEqual), R(7), U8(6),
+ B(JumpIfFalse), U8(34),
+ /* 23 E> */ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(2),
+ B(PushContext), R(7),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ /* 62 S> */ B(LdaContextSlot), R(7), U8(4), U8(0),
+ /* 62 E> */ B(StaCurrentContextSlot), U8(4),
+ B(PopContext), R(7),
+ B(LdaZero),
+ B(StaContextSlot), R(5), U8(7), U8(0),
+ B(LdaCurrentContextSlot), U8(4),
+ /* 65 E> */ B(StaContextSlot), R(5), U8(5), U8(0),
+ B(JumpLoop), U8(42), I8(1),
+ B(LdaContextSlot), R(5), U8(7), U8(0),
+ B(Star), R(7),
+ B(LdaSmi), I8(1),
+ B(TestEqual), R(7), U8(7),
+ B(JumpIfFalse), U8(6),
+ B(PopContext), R(6),
+ B(Jump), U8(7),
+ B(PopContext), R(6),
+ B(JumpLoop), U8(129), I8(0),
+ B(PopContext), R(5),
+ B(LdaUndefined),
+ B(Star), R(5),
+ B(LdaCurrentContextSlot), U8(8),
+ B(Star), R(6),
+ B(LdaUndefined),
+ B(Star), R(7),
+ B(CallJSRuntime), U8(%promise_resolve), R(5), U8(3),
+ B(LdaCurrentContextSlot), U8(8),
+ B(Star), R(2),
+ B(LdaZero),
+ B(Star), R(1),
+ B(Jump), U8(67),
+ B(Jump), U8(53),
+ B(Star), R(5),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(5), U8(3), U8(4),
+ B(Star), R(4),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(4),
+ B(PushContext), R(5),
+ B(LdaUndefined),
+ B(Star), R(6),
+ B(LdaContextSlot), R(5), U8(8), U8(0),
+ B(Star), R(7),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(8),
+ B(LdaFalse),
+ B(Star), R(9),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(6), U8(4),
+ B(LdaContextSlot), R(5), U8(8), U8(0),
+ B(PopContext), R(5),
+ B(PopContext), R(5),
+ B(Star), R(2),
+ B(LdaZero),
+ B(Star), R(1),
+ B(Jump), U8(14),
+ B(LdaSmi), I8(-1),
+ B(Star), R(1),
+ B(Jump), U8(8),
+ B(Star), R(2),
+ B(LdaSmi), I8(1),
+ B(Star), R(1),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(3),
+ B(LdaUndefined),
+ B(Star), R(4),
+ B(LdaCurrentContextSlot), U8(8),
+ B(Star), R(5),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(4), U8(2),
+ B(Ldar), R(3),
+ B(SetPendingMessage),
+ B(Ldar), R(1),
+ B(SwitchOnSmiNoFeedback), U8(5), U8(2), I8(0),
+ B(Jump), U8(8),
+ B(Ldar), R(2),
+ /* 67 S> */ B(Return),
+ B(Ldar), R(2),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 67 S> */ B(Return),
+]
+constant pool: [
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ FIXED_ARRAY_TYPE,
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
+ FIXED_ARRAY_TYPE,
+ Smi [6],
+ Smi [9],
+]
+handlers: [
+ [17, 254, 260],
+ [20, 201, 203],
+]
+
+---
+snippet: "
+ async function f() {
+ for (let x = 0; x < 10; ++x) await x;
+ }
+ f();
+"
+frame size: 14
+parameter count: 1
+bytecode array length: 480
+bytecodes: [
+ B(Ldar), R(new_target),
+ B(JumpIfUndefined), U8(25),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetContext), R(new_target), U8(1),
+ B(PushContext), R(1),
+ B(ResumeGenerator), R(new_target),
+ B(Star), R(0),
+ B(SwitchOnSmiNoFeedback), U8(0), U8(1), I8(0),
+ B(LdaSmi), I8(79),
+ B(Star), R(2),
+ B(CallRuntime), U16(Runtime::kAbort), R(2), U8(1),
+ B(LdaSmi), I8(-2),
+ B(Star), R(0),
+ B(CreateFunctionContext), U8(6),
+ B(PushContext), R(2),
+ B(Mov), R(closure), R(3),
+ B(Mov), R(this), R(4),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateJSGeneratorObject), R(3), U8(2),
+ B(StaCurrentContextSlot), U8(4),
+ /* 16 E> */ B(StackCheck),
+ B(LdaUndefined),
+ B(Star), R(3),
+ B(CallJSRuntime), U8(%async_function_promise_create), R(3), U8(1),
+ B(StaCurrentContextSlot), U8(6),
+ B(Mov), R(context), R(5),
+ B(Mov), R(context), R(6),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(1),
+ B(PushContext), R(7),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ /* 36 S> */ B(LdaZero),
+ /* 36 E> */ B(StaCurrentContextSlot), U8(4),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ /* 59 E> */ B(StaContextSlot), R(7), U8(7), U8(0),
+ B(LdaSmi), I8(1),
+ B(StaContextSlot), R(7), U8(8), U8(0),
+ B(Ldar), R(0),
+ B(SwitchOnSmiNoFeedback), U8(2), U8(1), I8(0),
+ B(LdaSmi), I8(-2),
+ B(TestEqualStrictNoFeedback), R(0),
+ B(JumpIfTrue), U8(11),
+ B(LdaSmi), I8(79),
+ B(Star), R(8),
+ B(CallRuntime), U16(Runtime::kAbort), R(8), U8(1),
+ B(StackCheck),
+ B(Ldar), R(closure),
+ B(CreateBlockContext), U8(3),
+ B(PushContext), R(8),
+ B(LdaTheHole),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaContextSlot), R(7), U8(7), U8(0),
+ B(StaCurrentContextSlot), U8(4),
+ B(LdaContextSlot), R(7), U8(8), U8(0),
+ B(Star), R(9),
+ B(LdaSmi), I8(1),
+ B(TestEqual), R(9), U8(3),
+ B(JumpIfFalse), U8(9),
+ B(LdaZero),
+ B(StaContextSlot), R(7), U8(8), U8(0),
+ B(Jump), U8(8),
+ /* 49 S> */ B(LdaCurrentContextSlot), U8(4),
+ B(Inc), U8(4),
+ /* 49 E> */ B(StaCurrentContextSlot), U8(4),
+ B(LdaSmi), I8(1),
+ B(StaContextSlot), R(7), U8(9), U8(0),
+ /* 41 S> */ B(LdaCurrentContextSlot), U8(4),
+ B(Star), R(9),
+ B(LdaSmi), I8(10),
+ /* 41 E> */ B(TestLessThan), R(9), U8(5),
+ B(JumpIfFalse), U8(4),
+ B(Jump), U8(6),
+ B(PopContext), R(8),
+ B(Jump), U8(185),
+ B(Ldar), R(0),
+ B(SwitchOnSmiNoFeedback), U8(4), U8(1), I8(0),
+ B(LdaSmi), I8(-2),
+ B(TestEqualStrictNoFeedback), R(0),
+ B(JumpIfTrue), U8(11),
+ B(LdaSmi), I8(79),
+ B(Star), R(9),
+ B(CallRuntime), U16(Runtime::kAbort), R(9), U8(1),
+ B(LdaContextSlot), R(7), U8(9), U8(0),
+ B(Star), R(9),
+ B(LdaSmi), I8(1),
+ B(TestEqual), R(9), U8(6),
+ B(JumpIfFalse), U8(126),
+ /* 23 E> */ B(StackCheck),
+ /* 52 S> */ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
+ B(Star), R(9),
+ /* 58 S> */ B(LdaCurrentContextSlot), U8(4),
+ B(StaContextSlot), R(7), U8(5), U8(0),
+ /* 52 S> */ B(LdaUndefined),
+ B(Star), R(10),
+ B(LdaImmutableContextSlot), R(7), U8(4), U8(0),
+ B(Star), R(11),
+ B(LdaContextSlot), R(7), U8(5), U8(0),
+ B(Star), R(12),
+ B(LdaContextSlot), R(7), U8(6), U8(0),
+ B(Star), R(13),
+ B(CallJSRuntime), U8(%async_function_await_uncaught), R(10), U8(4),
+ B(LdaContextSlot), R(7), U8(6), U8(0),
+ B(Star), R(10),
+ B(LdaZero),
+ B(SuspendGenerator), R(9), U8(2),
+ B(Ldar), R(10),
+ /* 61 S> */ B(Return),
+ B(LdaSmi), I8(-2),
+ B(Star), R(0),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetInputOrDebugPos), R(9), U8(1),
+ B(Star), R(10),
+ B(InvokeIntrinsic), U8(Runtime::k_GeneratorGetResumeMode), R(9), U8(1),
+ B(Star), R(11),
+ B(LdaZero),
+ B(TestEqualStrictNoFeedback), R(11),
+ B(JumpIfTrue), U8(40),
+ B(LdaSmi), I8(2),
+ B(TestEqualStrictNoFeedback), R(11),
+ B(JumpIfTrue), U8(31),
+ B(LdaTrue),
+ B(Star), R(13),
+ B(Mov), R(10), R(12),
+ B(InvokeIntrinsic), U8(Runtime::k_CreateIterResultObject), R(12), U8(2),
+ B(PopContext), R(8),
+ B(PopContext), R(8),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(134),
+ B(Ldar), R(10),
+ B(ReThrow),
+ B(LdaZero),
+ B(StaContextSlot), R(7), U8(9), U8(0),
+ B(LdaCurrentContextSlot), U8(4),
+ /* 59 E> */ B(StaContextSlot), R(7), U8(7), U8(0),
+ B(JumpLoop), U8(155), I8(1),
+ B(LdaContextSlot), R(7), U8(9), U8(0),
+ B(Star), R(9),
+ B(LdaSmi), I8(1),
+ B(TestEqual), R(9), U8(7),
+ B(JumpIfFalse), U8(6),
+ B(PopContext), R(8),
+ B(Jump), U8(10),
+ B(PopContext), R(8),
+ B(Wide), B(JumpLoop), U16(264), I16(0),
+ B(PopContext), R(7),
+ B(LdaUndefined),
+ B(Star), R(7),
+ B(LdaCurrentContextSlot), U8(6),
+ B(Star), R(8),
+ B(LdaUndefined),
+ B(Star), R(9),
+ B(CallJSRuntime), U8(%promise_resolve), R(7), U8(3),
+ B(LdaCurrentContextSlot), U8(6),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(67),
+ B(Jump), U8(53),
+ B(Star), R(7),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(7), U8(5), U8(6),
+ B(Star), R(6),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Ldar), R(6),
+ B(PushContext), R(7),
+ B(LdaUndefined),
+ B(Star), R(8),
+ B(LdaContextSlot), R(7), U8(6), U8(0),
+ B(Star), R(9),
+ B(LdaImmutableCurrentContextSlot), U8(4),
+ B(Star), R(10),
+ B(LdaFalse),
+ B(Star), R(11),
+ B(CallJSRuntime), U8(%promise_internal_reject), R(8), U8(4),
+ B(LdaContextSlot), R(7), U8(6), U8(0),
+ B(PopContext), R(7),
+ B(PopContext), R(7),
+ B(Star), R(4),
+ B(LdaZero),
+ B(Star), R(3),
+ B(Jump), U8(14),
+ B(LdaSmi), I8(-1),
+ B(Star), R(3),
+ B(Jump), U8(8),
+ B(Star), R(4),
+ B(LdaSmi), I8(1),
+ B(Star), R(3),
+ B(LdaTheHole),
+ B(SetPendingMessage),
+ B(Star), R(5),
+ B(LdaUndefined),
+ B(Star), R(6),
+ B(LdaCurrentContextSlot), U8(6),
+ B(Star), R(7),
+ B(CallJSRuntime), U8(%async_function_promise_release), R(6), U8(2),
+ B(Ldar), R(5),
+ B(SetPendingMessage),
+ B(Ldar), R(3),
+ B(SwitchOnSmiNoFeedback), U8(7), U8(2), I8(0),
+ B(Jump), U8(8),
+ B(Ldar), R(4),
+ /* 61 S> */ B(Return),
+ B(Ldar), R(4),
+ B(ReThrow),
+ B(LdaUndefined),
+ /* 61 S> */ B(Return),
+]
+constant pool: [
+ Smi [73],
+ FIXED_ARRAY_TYPE,
+ Smi [84],
+ FIXED_ARRAY_TYPE,
+ Smi [83],
+ ONE_BYTE_INTERNALIZED_STRING_TYPE [".catch"],
+ FIXED_ARRAY_TYPE,
+ Smi [6],
+ Smi [9],
+]
+handlers: [
+ [60, 434, 440],
+ [63, 381, 383],
+]
+
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
index 4e0330ede3..590fe85edf 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StoreGlobal.golden
@@ -18,7 +18,7 @@ bytecode array length: 8
bytecodes: [
/* 21 E> */ B(StackCheck),
/* 26 S> */ B(LdaSmi), I8(2),
- /* 28 E> */ B(StaGlobalSloppy), U8(0), U8(2),
+ /* 28 E> */ B(StaGlobalSloppy), U8(0), U8(3),
B(LdaUndefined),
/* 33 S> */ B(Return),
]
@@ -39,7 +39,7 @@ bytecode array length: 8
bytecodes: [
/* 26 E> */ B(StackCheck),
/* 32 S> */ B(Ldar), R(arg0),
- /* 34 E> */ B(StaGlobalSloppy), U8(0), U8(2),
+ /* 34 E> */ B(StaGlobalSloppy), U8(0), U8(3),
B(LdaUndefined),
/* 39 S> */ B(Return),
]
@@ -61,7 +61,7 @@ bytecode array length: 8
bytecodes: [
/* 35 E> */ B(StackCheck),
/* 40 S> */ B(LdaSmi), I8(2),
- /* 42 E> */ B(StaGlobalStrict), U8(0), U8(2),
+ /* 42 E> */ B(StaGlobalStrict), U8(0), U8(3),
B(LdaUndefined),
/* 47 S> */ B(Return),
]
@@ -83,7 +83,7 @@ bytecode array length: 8
bytecodes: [
/* 17 E> */ B(StackCheck),
/* 22 S> */ B(LdaSmi), I8(2),
- /* 24 E> */ B(StaGlobalSloppy), U8(0), U8(2),
+ /* 24 E> */ B(StaGlobalSloppy), U8(0), U8(3),
B(LdaUndefined),
/* 29 S> */ B(Return),
]
@@ -235,263 +235,263 @@ bytecode array length: 655
bytecodes: [
/* 17 E> */ B(StackCheck),
/* 25 S> */ B(Nop),
- /* 26 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(2),
+ /* 26 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(3),
/* 35 S> */ B(Nop),
- /* 36 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(4),
+ /* 36 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(5),
/* 45 S> */ B(Nop),
- /* 46 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(6),
+ /* 46 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(7),
/* 55 S> */ B(Nop),
- /* 56 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(8),
+ /* 56 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(9),
/* 65 S> */ B(Nop),
- /* 66 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(10),
+ /* 66 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(11),
/* 75 S> */ B(Nop),
- /* 76 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(12),
+ /* 76 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(13),
/* 85 S> */ B(Nop),
- /* 86 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(14),
+ /* 86 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(15),
/* 95 S> */ B(Nop),
- /* 96 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(16),
+ /* 96 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(17),
/* 105 S> */ B(Nop),
- /* 106 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(18),
+ /* 106 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(19),
/* 115 S> */ B(Nop),
- /* 116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(20),
+ /* 116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(21),
/* 125 S> */ B(Nop),
- /* 126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(22),
+ /* 126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(23),
/* 135 S> */ B(Nop),
- /* 136 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(24),
+ /* 136 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(25),
/* 145 S> */ B(Nop),
- /* 146 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(26),
+ /* 146 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(27),
/* 155 S> */ B(Nop),
- /* 156 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(28),
+ /* 156 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(29),
/* 165 S> */ B(Nop),
- /* 166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(30),
+ /* 166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(31),
/* 175 S> */ B(Nop),
- /* 176 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(32),
+ /* 176 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(33),
/* 185 S> */ B(Nop),
- /* 186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(34),
+ /* 186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(35),
/* 195 S> */ B(Nop),
- /* 196 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(36),
+ /* 196 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(37),
/* 205 S> */ B(Nop),
- /* 206 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(38),
+ /* 206 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(39),
/* 215 S> */ B(Nop),
- /* 216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(40),
+ /* 216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(41),
/* 225 S> */ B(Nop),
- /* 226 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(42),
+ /* 226 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(43),
/* 235 S> */ B(Nop),
- /* 236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(44),
+ /* 236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(45),
/* 245 S> */ B(Nop),
- /* 246 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(46),
+ /* 246 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(47),
/* 255 S> */ B(Nop),
- /* 256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(48),
+ /* 256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(49),
/* 265 S> */ B(Nop),
- /* 266 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(50),
+ /* 266 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(51),
/* 275 S> */ B(Nop),
- /* 276 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(52),
+ /* 276 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(53),
/* 285 S> */ B(Nop),
- /* 286 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(54),
+ /* 286 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(55),
/* 295 S> */ B(Nop),
- /* 296 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(56),
+ /* 296 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(57),
/* 305 S> */ B(Nop),
- /* 306 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(58),
+ /* 306 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(59),
/* 315 S> */ B(Nop),
- /* 316 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(60),
+ /* 316 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(61),
/* 325 S> */ B(Nop),
- /* 326 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(62),
+ /* 326 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(63),
/* 335 S> */ B(Nop),
- /* 336 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(64),
+ /* 336 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(65),
/* 345 S> */ B(Nop),
- /* 346 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(66),
+ /* 346 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(67),
/* 355 S> */ B(Nop),
- /* 356 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(68),
+ /* 356 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(69),
/* 365 S> */ B(Nop),
- /* 366 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(70),
+ /* 366 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(71),
/* 375 S> */ B(Nop),
- /* 376 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(72),
+ /* 376 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(73),
/* 385 S> */ B(Nop),
- /* 386 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(74),
+ /* 386 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(75),
/* 395 S> */ B(Nop),
- /* 396 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(76),
+ /* 396 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(77),
/* 405 S> */ B(Nop),
- /* 406 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(78),
+ /* 406 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(79),
/* 415 S> */ B(Nop),
- /* 416 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(80),
+ /* 416 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(81),
/* 425 S> */ B(Nop),
- /* 426 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(82),
+ /* 426 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(83),
/* 435 S> */ B(Nop),
- /* 436 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(84),
+ /* 436 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(85),
/* 445 S> */ B(Nop),
- /* 446 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(86),
+ /* 446 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(87),
/* 455 S> */ B(Nop),
- /* 456 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(88),
+ /* 456 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(89),
/* 465 S> */ B(Nop),
- /* 466 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(90),
+ /* 466 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(91),
/* 475 S> */ B(Nop),
- /* 476 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(92),
+ /* 476 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(93),
/* 485 S> */ B(Nop),
- /* 486 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(94),
+ /* 486 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(95),
/* 495 S> */ B(Nop),
- /* 496 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(96),
+ /* 496 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(97),
/* 505 S> */ B(Nop),
- /* 506 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(98),
+ /* 506 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(99),
/* 515 S> */ B(Nop),
- /* 516 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(100),
+ /* 516 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(101),
/* 525 S> */ B(Nop),
- /* 526 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(102),
+ /* 526 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(103),
/* 535 S> */ B(Nop),
- /* 536 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(104),
+ /* 536 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(105),
/* 545 S> */ B(Nop),
- /* 546 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(106),
+ /* 546 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(107),
/* 555 S> */ B(Nop),
- /* 556 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(108),
+ /* 556 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(109),
/* 565 S> */ B(Nop),
- /* 566 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(110),
+ /* 566 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(111),
/* 575 S> */ B(Nop),
- /* 576 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(112),
+ /* 576 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(113),
/* 585 S> */ B(Nop),
- /* 586 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(114),
+ /* 586 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(115),
/* 595 S> */ B(Nop),
- /* 596 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(116),
+ /* 596 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(117),
/* 605 S> */ B(Nop),
- /* 606 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(118),
+ /* 606 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(119),
/* 615 S> */ B(Nop),
- /* 616 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(120),
+ /* 616 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(121),
/* 625 S> */ B(Nop),
- /* 626 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(122),
+ /* 626 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(123),
/* 635 S> */ B(Nop),
- /* 636 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(124),
+ /* 636 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(125),
/* 645 S> */ B(Nop),
- /* 646 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(126),
+ /* 646 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(127),
/* 655 S> */ B(Nop),
- /* 656 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(128),
+ /* 656 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(129),
/* 665 S> */ B(Nop),
- /* 666 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(130),
+ /* 666 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(131),
/* 675 S> */ B(Nop),
- /* 676 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(132),
+ /* 676 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(133),
/* 685 S> */ B(Nop),
- /* 686 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(134),
+ /* 686 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(135),
/* 695 S> */ B(Nop),
- /* 696 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(136),
+ /* 696 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(137),
/* 705 S> */ B(Nop),
- /* 706 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(138),
+ /* 706 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(139),
/* 715 S> */ B(Nop),
- /* 716 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(140),
+ /* 716 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(141),
/* 725 S> */ B(Nop),
- /* 726 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(142),
+ /* 726 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(143),
/* 735 S> */ B(Nop),
- /* 736 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(144),
+ /* 736 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(145),
/* 745 S> */ B(Nop),
- /* 746 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(146),
+ /* 746 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(147),
/* 755 S> */ B(Nop),
- /* 756 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(148),
+ /* 756 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(149),
/* 765 S> */ B(Nop),
- /* 766 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(150),
+ /* 766 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(151),
/* 775 S> */ B(Nop),
- /* 776 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(152),
+ /* 776 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(153),
/* 785 S> */ B(Nop),
- /* 786 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(154),
+ /* 786 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(155),
/* 795 S> */ B(Nop),
- /* 796 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(156),
+ /* 796 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(157),
/* 805 S> */ B(Nop),
- /* 806 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(158),
+ /* 806 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(159),
/* 815 S> */ B(Nop),
- /* 816 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(160),
+ /* 816 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(161),
/* 825 S> */ B(Nop),
- /* 826 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(162),
+ /* 826 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(163),
/* 835 S> */ B(Nop),
- /* 836 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(164),
+ /* 836 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(165),
/* 845 S> */ B(Nop),
- /* 846 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(166),
+ /* 846 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(167),
/* 855 S> */ B(Nop),
- /* 856 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(168),
+ /* 856 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(169),
/* 865 S> */ B(Nop),
- /* 866 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(170),
+ /* 866 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(171),
/* 875 S> */ B(Nop),
- /* 876 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(172),
+ /* 876 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(173),
/* 885 S> */ B(Nop),
- /* 886 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(174),
+ /* 886 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(175),
/* 895 S> */ B(Nop),
- /* 896 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(176),
+ /* 896 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(177),
/* 905 S> */ B(Nop),
- /* 906 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(178),
+ /* 906 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(179),
/* 915 S> */ B(Nop),
- /* 916 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(180),
+ /* 916 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(181),
/* 925 S> */ B(Nop),
- /* 926 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(182),
+ /* 926 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(183),
/* 935 S> */ B(Nop),
- /* 936 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(184),
+ /* 936 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(185),
/* 945 S> */ B(Nop),
- /* 946 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(186),
+ /* 946 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(187),
/* 955 S> */ B(Nop),
- /* 956 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(188),
+ /* 956 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(189),
/* 965 S> */ B(Nop),
- /* 966 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(190),
+ /* 966 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(191),
/* 975 S> */ B(Nop),
- /* 976 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(192),
+ /* 976 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(193),
/* 985 S> */ B(Nop),
- /* 986 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(194),
+ /* 986 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(195),
/* 995 S> */ B(Nop),
- /* 996 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(196),
+ /* 996 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(197),
/* 1005 S> */ B(Nop),
- /* 1006 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(198),
+ /* 1006 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(199),
/* 1015 S> */ B(Nop),
- /* 1016 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(200),
+ /* 1016 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(201),
/* 1025 S> */ B(Nop),
- /* 1026 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(202),
+ /* 1026 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(203),
/* 1035 S> */ B(Nop),
- /* 1036 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(204),
+ /* 1036 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(205),
/* 1045 S> */ B(Nop),
- /* 1046 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(206),
+ /* 1046 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(207),
/* 1055 S> */ B(Nop),
- /* 1056 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(208),
+ /* 1056 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(209),
/* 1065 S> */ B(Nop),
- /* 1066 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(210),
+ /* 1066 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(211),
/* 1075 S> */ B(Nop),
- /* 1076 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(212),
+ /* 1076 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(213),
/* 1085 S> */ B(Nop),
- /* 1086 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(214),
+ /* 1086 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(215),
/* 1095 S> */ B(Nop),
- /* 1096 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(216),
+ /* 1096 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(217),
/* 1105 S> */ B(Nop),
- /* 1106 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(218),
+ /* 1106 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(219),
/* 1115 S> */ B(Nop),
- /* 1116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(220),
+ /* 1116 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(221),
/* 1125 S> */ B(Nop),
- /* 1126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(222),
+ /* 1126 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(223),
/* 1135 S> */ B(Nop),
- /* 1136 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(224),
+ /* 1136 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(225),
/* 1145 S> */ B(Nop),
- /* 1146 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(226),
+ /* 1146 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(227),
/* 1155 S> */ B(Nop),
- /* 1156 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(228),
+ /* 1156 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(229),
/* 1165 S> */ B(Nop),
- /* 1166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(230),
+ /* 1166 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(231),
/* 1175 S> */ B(Nop),
- /* 1176 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(232),
+ /* 1176 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(233),
/* 1185 S> */ B(Nop),
- /* 1186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(234),
+ /* 1186 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(235),
/* 1195 S> */ B(Nop),
- /* 1196 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(236),
+ /* 1196 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(237),
/* 1205 S> */ B(Nop),
- /* 1206 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(238),
+ /* 1206 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(239),
/* 1215 S> */ B(Nop),
- /* 1216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(240),
+ /* 1216 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(241),
/* 1225 S> */ B(Nop),
- /* 1226 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(242),
+ /* 1226 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(243),
/* 1235 S> */ B(Nop),
- /* 1236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(244),
+ /* 1236 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(245),
/* 1245 S> */ B(Nop),
- /* 1246 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(246),
+ /* 1246 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(247),
/* 1255 S> */ B(Nop),
- /* 1256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(248),
+ /* 1256 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(249),
/* 1265 S> */ B(Nop),
- /* 1266 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(250),
+ /* 1266 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(251),
/* 1275 S> */ B(Nop),
- /* 1276 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(252),
+ /* 1276 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(253),
/* 1285 S> */ B(Nop),
- /* 1286 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(254),
+ /* 1286 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(255),
/* 1295 S> */ B(Nop),
- /* 1296 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(256),
+ /* 1296 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(257),
/* 1305 S> */ B(LdaSmi), I8(2),
- /* 1307 E> */ B(Wide), B(StaGlobalSloppy), U16(1), U16(258),
+ /* 1307 E> */ B(Wide), B(StaGlobalSloppy), U16(1), U16(259),
B(LdaUndefined),
/* 1312 S> */ B(Return),
]
@@ -645,263 +645,263 @@ bytecode array length: 655
bytecodes: [
/* 17 E> */ B(StackCheck),
/* 41 S> */ B(Nop),
- /* 42 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(2),
+ /* 42 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(3),
/* 51 S> */ B(Nop),
- /* 52 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(4),
+ /* 52 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(5),
/* 61 S> */ B(Nop),
- /* 62 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(6),
+ /* 62 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(7),
/* 71 S> */ B(Nop),
- /* 72 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(8),
+ /* 72 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(9),
/* 81 S> */ B(Nop),
- /* 82 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(10),
+ /* 82 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(11),
/* 91 S> */ B(Nop),
- /* 92 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(12),
+ /* 92 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(13),
/* 101 S> */ B(Nop),
- /* 102 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(14),
+ /* 102 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(15),
/* 111 S> */ B(Nop),
- /* 112 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(16),
+ /* 112 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(17),
/* 121 S> */ B(Nop),
- /* 122 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(18),
+ /* 122 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(19),
/* 131 S> */ B(Nop),
- /* 132 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(20),
+ /* 132 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(21),
/* 141 S> */ B(Nop),
- /* 142 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(22),
+ /* 142 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(23),
/* 151 S> */ B(Nop),
- /* 152 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(24),
+ /* 152 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(25),
/* 161 S> */ B(Nop),
- /* 162 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(26),
+ /* 162 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(27),
/* 171 S> */ B(Nop),
- /* 172 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(28),
+ /* 172 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(29),
/* 181 S> */ B(Nop),
- /* 182 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(30),
+ /* 182 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(31),
/* 191 S> */ B(Nop),
- /* 192 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(32),
+ /* 192 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(33),
/* 201 S> */ B(Nop),
- /* 202 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(34),
+ /* 202 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(35),
/* 211 S> */ B(Nop),
- /* 212 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(36),
+ /* 212 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(37),
/* 221 S> */ B(Nop),
- /* 222 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(38),
+ /* 222 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(39),
/* 231 S> */ B(Nop),
- /* 232 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(40),
+ /* 232 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(41),
/* 241 S> */ B(Nop),
- /* 242 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(42),
+ /* 242 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(43),
/* 251 S> */ B(Nop),
- /* 252 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(44),
+ /* 252 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(45),
/* 261 S> */ B(Nop),
- /* 262 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(46),
+ /* 262 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(47),
/* 271 S> */ B(Nop),
- /* 272 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(48),
+ /* 272 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(49),
/* 281 S> */ B(Nop),
- /* 282 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(50),
+ /* 282 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(51),
/* 291 S> */ B(Nop),
- /* 292 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(52),
+ /* 292 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(53),
/* 301 S> */ B(Nop),
- /* 302 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(54),
+ /* 302 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(55),
/* 311 S> */ B(Nop),
- /* 312 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(56),
+ /* 312 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(57),
/* 321 S> */ B(Nop),
- /* 322 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(58),
+ /* 322 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(59),
/* 331 S> */ B(Nop),
- /* 332 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(60),
+ /* 332 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(61),
/* 341 S> */ B(Nop),
- /* 342 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(62),
+ /* 342 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(63),
/* 351 S> */ B(Nop),
- /* 352 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(64),
+ /* 352 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(65),
/* 361 S> */ B(Nop),
- /* 362 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(66),
+ /* 362 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(67),
/* 371 S> */ B(Nop),
- /* 372 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(68),
+ /* 372 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(69),
/* 381 S> */ B(Nop),
- /* 382 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(70),
+ /* 382 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(71),
/* 391 S> */ B(Nop),
- /* 392 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(72),
+ /* 392 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(73),
/* 401 S> */ B(Nop),
- /* 402 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(74),
+ /* 402 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(75),
/* 411 S> */ B(Nop),
- /* 412 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(76),
+ /* 412 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(77),
/* 421 S> */ B(Nop),
- /* 422 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(78),
+ /* 422 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(79),
/* 431 S> */ B(Nop),
- /* 432 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(80),
+ /* 432 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(81),
/* 441 S> */ B(Nop),
- /* 442 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(82),
+ /* 442 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(83),
/* 451 S> */ B(Nop),
- /* 452 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(84),
+ /* 452 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(85),
/* 461 S> */ B(Nop),
- /* 462 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(86),
+ /* 462 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(87),
/* 471 S> */ B(Nop),
- /* 472 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(88),
+ /* 472 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(89),
/* 481 S> */ B(Nop),
- /* 482 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(90),
+ /* 482 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(91),
/* 491 S> */ B(Nop),
- /* 492 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(92),
+ /* 492 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(93),
/* 501 S> */ B(Nop),
- /* 502 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(94),
+ /* 502 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(95),
/* 511 S> */ B(Nop),
- /* 512 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(96),
+ /* 512 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(97),
/* 521 S> */ B(Nop),
- /* 522 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(98),
+ /* 522 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(99),
/* 531 S> */ B(Nop),
- /* 532 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(100),
+ /* 532 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(101),
/* 541 S> */ B(Nop),
- /* 542 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(102),
+ /* 542 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(103),
/* 551 S> */ B(Nop),
- /* 552 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(104),
+ /* 552 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(105),
/* 561 S> */ B(Nop),
- /* 562 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(106),
+ /* 562 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(107),
/* 571 S> */ B(Nop),
- /* 572 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(108),
+ /* 572 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(109),
/* 581 S> */ B(Nop),
- /* 582 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(110),
+ /* 582 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(111),
/* 591 S> */ B(Nop),
- /* 592 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(112),
+ /* 592 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(113),
/* 601 S> */ B(Nop),
- /* 602 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(114),
+ /* 602 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(115),
/* 611 S> */ B(Nop),
- /* 612 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(116),
+ /* 612 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(117),
/* 621 S> */ B(Nop),
- /* 622 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(118),
+ /* 622 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(119),
/* 631 S> */ B(Nop),
- /* 632 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(120),
+ /* 632 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(121),
/* 641 S> */ B(Nop),
- /* 642 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(122),
+ /* 642 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(123),
/* 651 S> */ B(Nop),
- /* 652 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(124),
+ /* 652 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(125),
/* 661 S> */ B(Nop),
- /* 662 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(126),
+ /* 662 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(127),
/* 671 S> */ B(Nop),
- /* 672 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(128),
+ /* 672 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(129),
/* 681 S> */ B(Nop),
- /* 682 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(130),
+ /* 682 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(131),
/* 691 S> */ B(Nop),
- /* 692 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(132),
+ /* 692 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(133),
/* 701 S> */ B(Nop),
- /* 702 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(134),
+ /* 702 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(135),
/* 711 S> */ B(Nop),
- /* 712 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(136),
+ /* 712 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(137),
/* 721 S> */ B(Nop),
- /* 722 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(138),
+ /* 722 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(139),
/* 731 S> */ B(Nop),
- /* 732 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(140),
+ /* 732 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(141),
/* 741 S> */ B(Nop),
- /* 742 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(142),
+ /* 742 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(143),
/* 751 S> */ B(Nop),
- /* 752 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(144),
+ /* 752 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(145),
/* 761 S> */ B(Nop),
- /* 762 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(146),
+ /* 762 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(147),
/* 771 S> */ B(Nop),
- /* 772 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(148),
+ /* 772 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(149),
/* 781 S> */ B(Nop),
- /* 782 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(150),
+ /* 782 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(151),
/* 791 S> */ B(Nop),
- /* 792 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(152),
+ /* 792 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(153),
/* 801 S> */ B(Nop),
- /* 802 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(154),
+ /* 802 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(155),
/* 811 S> */ B(Nop),
- /* 812 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(156),
+ /* 812 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(157),
/* 821 S> */ B(Nop),
- /* 822 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(158),
+ /* 822 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(159),
/* 831 S> */ B(Nop),
- /* 832 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(160),
+ /* 832 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(161),
/* 841 S> */ B(Nop),
- /* 842 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(162),
+ /* 842 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(163),
/* 851 S> */ B(Nop),
- /* 852 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(164),
+ /* 852 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(165),
/* 861 S> */ B(Nop),
- /* 862 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(166),
+ /* 862 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(167),
/* 871 S> */ B(Nop),
- /* 872 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(168),
+ /* 872 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(169),
/* 881 S> */ B(Nop),
- /* 882 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(170),
+ /* 882 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(171),
/* 891 S> */ B(Nop),
- /* 892 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(172),
+ /* 892 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(173),
/* 901 S> */ B(Nop),
- /* 902 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(174),
+ /* 902 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(175),
/* 911 S> */ B(Nop),
- /* 912 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(176),
+ /* 912 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(177),
/* 921 S> */ B(Nop),
- /* 922 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(178),
+ /* 922 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(179),
/* 931 S> */ B(Nop),
- /* 932 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(180),
+ /* 932 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(181),
/* 941 S> */ B(Nop),
- /* 942 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(182),
+ /* 942 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(183),
/* 951 S> */ B(Nop),
- /* 952 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(184),
+ /* 952 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(185),
/* 961 S> */ B(Nop),
- /* 962 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(186),
+ /* 962 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(187),
/* 971 S> */ B(Nop),
- /* 972 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(188),
+ /* 972 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(189),
/* 981 S> */ B(Nop),
- /* 982 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(190),
+ /* 982 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(191),
/* 991 S> */ B(Nop),
- /* 992 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(192),
+ /* 992 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(193),
/* 1001 S> */ B(Nop),
- /* 1002 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(194),
+ /* 1002 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(195),
/* 1011 S> */ B(Nop),
- /* 1012 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(196),
+ /* 1012 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(197),
/* 1021 S> */ B(Nop),
- /* 1022 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(198),
+ /* 1022 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(199),
/* 1031 S> */ B(Nop),
- /* 1032 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(200),
+ /* 1032 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(201),
/* 1041 S> */ B(Nop),
- /* 1042 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(202),
+ /* 1042 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(203),
/* 1051 S> */ B(Nop),
- /* 1052 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(204),
+ /* 1052 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(205),
/* 1061 S> */ B(Nop),
- /* 1062 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(206),
+ /* 1062 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(207),
/* 1071 S> */ B(Nop),
- /* 1072 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(208),
+ /* 1072 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(209),
/* 1081 S> */ B(Nop),
- /* 1082 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(210),
+ /* 1082 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(211),
/* 1091 S> */ B(Nop),
- /* 1092 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(212),
+ /* 1092 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(213),
/* 1101 S> */ B(Nop),
- /* 1102 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(214),
+ /* 1102 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(215),
/* 1111 S> */ B(Nop),
- /* 1112 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(216),
+ /* 1112 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(217),
/* 1121 S> */ B(Nop),
- /* 1122 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(218),
+ /* 1122 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(219),
/* 1131 S> */ B(Nop),
- /* 1132 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(220),
+ /* 1132 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(221),
/* 1141 S> */ B(Nop),
- /* 1142 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(222),
+ /* 1142 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(223),
/* 1151 S> */ B(Nop),
- /* 1152 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(224),
+ /* 1152 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(225),
/* 1161 S> */ B(Nop),
- /* 1162 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(226),
+ /* 1162 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(227),
/* 1171 S> */ B(Nop),
- /* 1172 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(228),
+ /* 1172 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(229),
/* 1181 S> */ B(Nop),
- /* 1182 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(230),
+ /* 1182 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(231),
/* 1191 S> */ B(Nop),
- /* 1192 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(232),
+ /* 1192 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(233),
/* 1201 S> */ B(Nop),
- /* 1202 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(234),
+ /* 1202 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(235),
/* 1211 S> */ B(Nop),
- /* 1212 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(236),
+ /* 1212 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(237),
/* 1221 S> */ B(Nop),
- /* 1222 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(238),
+ /* 1222 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(239),
/* 1231 S> */ B(Nop),
- /* 1232 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(240),
+ /* 1232 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(241),
/* 1241 S> */ B(Nop),
- /* 1242 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(242),
+ /* 1242 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(243),
/* 1251 S> */ B(Nop),
- /* 1252 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(244),
+ /* 1252 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(245),
/* 1261 S> */ B(Nop),
- /* 1262 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(246),
+ /* 1262 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(247),
/* 1271 S> */ B(Nop),
- /* 1272 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(248),
+ /* 1272 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(249),
/* 1281 S> */ B(Nop),
- /* 1282 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(250),
+ /* 1282 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(251),
/* 1291 S> */ B(Nop),
- /* 1292 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(252),
+ /* 1292 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(253),
/* 1301 S> */ B(Nop),
- /* 1302 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(254),
+ /* 1302 E> */ B(LdaNamedProperty), R(arg0), U8(0), U8(255),
/* 1311 S> */ B(Nop),
- /* 1312 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(256),
+ /* 1312 E> */ B(Wide), B(LdaNamedProperty), R16(arg0), U16(0), U16(257),
/* 1321 S> */ B(LdaSmi), I8(2),
- /* 1323 E> */ B(Wide), B(StaGlobalStrict), U16(1), U16(258),
+ /* 1323 E> */ B(Wide), B(StaGlobalStrict), U16(1), U16(259),
B(LdaUndefined),
/* 1328 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
index f7e241721a..425bdb1b9c 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/SuperCallAndSpread.golden
@@ -53,15 +53,13 @@ snippet: "
"
frame size: 7
parameter count: 1
-bytecode array length: 61
+bytecode array length: 58
bytecodes: [
B(CreateRestParameter),
B(Star), R(2),
B(Mov), R(closure), R(1),
B(Mov), R(new_target), R(0),
B(Ldar), R(new_target),
- B(LdaTheHole),
- B(Star), R(3),
/* 128 E> */ B(StackCheck),
B(Mov), R(2), R(3),
/* 140 S> */ B(Ldar), R(1),
@@ -102,15 +100,13 @@ snippet: "
"
frame size: 10
parameter count: 1
-bytecode array length: 94
+bytecode array length: 91
bytecodes: [
B(CreateRestParameter),
B(Star), R(2),
B(Mov), R(closure), R(1),
B(Mov), R(new_target), R(0),
B(Ldar), R(new_target),
- B(LdaTheHole),
- B(Star), R(3),
/* 128 E> */ B(StackCheck),
B(Mov), R(2), R(3),
/* 140 S> */ B(LdaUndefined),
@@ -119,14 +115,14 @@ bytecodes: [
B(Star), R(5),
B(LdaUndefined),
B(Star), R(6),
- B(CreateArrayLiteral), U8(0), U8(2), U8(9),
+ B(CreateArrayLiteral), U8(0), U8(3), U8(17),
B(Star), R(7),
B(LdaUndefined),
B(Star), R(8),
B(Mov), R(2), R(9),
/* 152 E> */ B(CallJSRuntime), U8(%spread_iterable), R(8), U8(2),
B(Star), R(8),
- B(CreateArrayLiteral), U8(1), U8(3), U8(9),
+ B(CreateArrayLiteral), U8(1), U8(4), U8(17),
B(Star), R(9),
B(CallJSRuntime), U8(%spread_arguments), R(6), U8(4),
B(Star), R(6),
@@ -144,8 +140,8 @@ bytecodes: [
/* 162 S> */ B(Return),
]
constant pool: [
- CONSTANT_ELEMENTS_PAIR_TYPE,
- CONSTANT_ELEMENTS_PAIR_TYPE,
+ TUPLE2_TYPE,
+ TUPLE2_TYPE,
]
handlers: [
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
index decd5df726..798ffa0408 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Switch.golden
@@ -22,11 +22,11 @@ bytecodes: [
B(Star), R(0),
B(Star), R(1),
/* 45 S> */ B(LdaSmi), I8(1),
- B(TestEqualStrict), R(1), U8(2),
+ B(TestEqualStrict), R(1), U8(3),
B(Mov), R(0), R(2),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(2), U8(3),
+ B(TestEqualStrict), R(2), U8(4),
B(JumpIfTrue), U8(7),
B(Jump), U8(8),
/* 66 S> */ B(LdaSmi), I8(2),
@@ -58,11 +58,11 @@ bytecodes: [
B(Star), R(0),
B(Star), R(1),
/* 45 S> */ B(LdaSmi), I8(1),
- B(TestEqualStrict), R(1), U8(2),
+ B(TestEqualStrict), R(1), U8(3),
B(Mov), R(0), R(2),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(2), U8(3),
+ B(TestEqualStrict), R(2), U8(4),
B(JumpIfTrue), U8(10),
B(Jump), U8(14),
/* 66 S> */ B(LdaSmi), I8(2),
@@ -96,11 +96,11 @@ bytecodes: [
B(Star), R(0),
B(Star), R(1),
/* 45 S> */ B(LdaSmi), I8(1),
- B(TestEqualStrict), R(1), U8(2),
+ B(TestEqualStrict), R(1), U8(3),
B(Mov), R(0), R(2),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(2), U8(3),
+ B(TestEqualStrict), R(2), U8(4),
B(JumpIfTrue), U8(8),
B(Jump), U8(12),
/* 66 S> */ B(LdaSmi), I8(2),
@@ -134,11 +134,11 @@ bytecodes: [
B(Star), R(0),
B(Star), R(1),
/* 45 S> */ B(LdaSmi), I8(2),
- B(TestEqualStrict), R(1), U8(2),
+ B(TestEqualStrict), R(1), U8(3),
B(Mov), R(0), R(2),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(3),
- B(TestEqualStrict), R(2), U8(3),
+ B(TestEqualStrict), R(2), U8(4),
B(JumpIfTrue), U8(6),
B(Jump), U8(6),
/* 66 S> */ B(Jump), U8(10),
@@ -173,11 +173,11 @@ bytecodes: [
/* 42 E> */ B(TypeOf),
B(Star), R(1),
/* 45 S> */ B(LdaSmi), I8(2),
- B(TestEqualStrict), R(1), U8(2),
+ B(TestEqualStrict), R(1), U8(3),
B(Mov), R(1), R(2),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(3),
- B(TestEqualStrict), R(2), U8(3),
+ B(TestEqualStrict), R(2), U8(4),
B(JumpIfTrue), U8(10),
B(Jump), U8(14),
/* 74 S> */ B(LdaSmi), I8(1),
@@ -214,7 +214,7 @@ bytecodes: [
B(Star), R(0),
B(Star), R(1),
/* 45 S> */ B(TypeOf),
- B(TestEqualStrict), R(1), U8(2),
+ B(TestEqualStrict), R(1), U8(3),
B(Mov), R(0), R(2),
B(JumpIfTrue), U8(4),
B(Jump), U8(8),
@@ -316,11 +316,11 @@ bytecodes: [
B(Star), R(0),
B(Star), R(1),
/* 45 S> */ B(LdaSmi), I8(1),
- B(TestEqualStrict), R(1), U8(2),
+ B(TestEqualStrict), R(1), U8(3),
B(Mov), R(0), R(2),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(2), U8(3),
+ B(TestEqualStrict), R(2), U8(4),
B(JumpIfTrueConstant), U8(0),
B(JumpConstant), U8(1),
/* 68 S> */ B(LdaSmi), I8(2),
@@ -486,18 +486,18 @@ bytecodes: [
B(Star), R(0),
B(Star), R(2),
/* 45 S> */ B(LdaSmi), I8(1),
- B(TestEqualStrict), R(2), U8(5),
+ B(TestEqualStrict), R(2), U8(6),
B(Mov), R(0), R(3),
B(JumpIfTrue), U8(11),
B(LdaSmi), I8(2),
- B(TestEqualStrict), R(3), U8(6),
+ B(TestEqualStrict), R(3), U8(7),
B(JumpIfTrue), U8(35),
B(Jump), U8(37),
B(Ldar), R(0),
- /* 79 E> */ B(AddSmi), I8(1), U8(2),
+ /* 79 E> */ B(AddSmi), I8(1), U8(3),
B(Star), R(1),
/* 70 S> */ B(LdaSmi), I8(2),
- B(TestEqualStrict), R(1), U8(3),
+ B(TestEqualStrict), R(1), U8(4),
B(Mov), R(1), R(4),
B(JumpIfTrue), U8(4),
B(Jump), U8(8),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
index 275bdf5491..21dd67c139 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TopLevelObjectLiterals.golden
@@ -24,10 +24,10 @@ bytecodes: [
/* 8 S> */ B(LdaConstant), U8(1),
B(Star), R(1),
B(LdaZero),
- B(CreateObjectLiteral), U8(2), U8(5), U8(1), R(3),
+ B(CreateObjectLiteral), U8(2), U8(6), U8(1), R(3),
B(Star), R(2),
- B(CreateClosure), U8(3), U8(4), U8(0),
- B(StaNamedOwnProperty), R(3), U8(4), U8(6),
+ B(CreateClosure), U8(3), U8(5), U8(0),
+ B(StaNamedOwnProperty), R(3), U8(4), U8(7),
B(CallRuntime), U16(Runtime::kInitializeVarGlobal), R(1), U8(3),
B(LdaUndefined),
/* 33 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
index 93f906c287..16800815c1 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryCatch.golden
@@ -9,25 +9,25 @@ wrap: yes
snippet: "
try { return 1; } catch(e) { return 2; }
"
-frame size: 3
+frame size: 2
parameter count: 1
bytecode array length: 32
bytecodes: [
/* 30 E> */ B(StackCheck),
- B(Mov), R(context), R(1),
+ B(Mov), R(context), R(0),
/* 40 S> */ B(LdaSmi), I8(1),
/* 75 S> */ B(Return),
B(Jump), U8(23),
- B(Star), R(2),
- B(Ldar), R(closure),
- B(CreateCatchContext), R(2), U8(0), U8(1),
B(Star), R(1),
+ B(Ldar), R(closure),
+ B(CreateCatchContext), R(1), U8(0), U8(1),
+ B(Star), R(0),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(1),
- B(PushContext), R(0),
+ B(Ldar), R(0),
+ B(PushContext), R(1),
/* 63 S> */ B(LdaSmi), I8(2),
- B(PopContext), R(0),
+ B(PopContext), R(1),
/* 75 S> */ B(Return),
B(LdaUndefined),
/* 75 S> */ B(Return),
@@ -46,39 +46,39 @@ snippet: "
try { a = 1 } catch(e1) {};
try { a = 2 } catch(e2) { a = 3 }
"
-frame size: 4
+frame size: 3
parameter count: 1
bytecode array length: 61
bytecodes: [
/* 30 E> */ B(StackCheck),
- B(Mov), R(context), R(2),
+ B(Mov), R(context), R(1),
/* 47 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
B(Jump), U8(20),
- B(Star), R(3),
- B(Ldar), R(closure),
- /* 49 E> */ B(CreateCatchContext), R(3), U8(0), U8(1),
B(Star), R(2),
+ B(Ldar), R(closure),
+ /* 49 E> */ B(CreateCatchContext), R(2), U8(0), U8(1),
+ B(Star), R(1),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(2),
- B(PushContext), R(1),
- B(PopContext), R(1),
- B(Mov), R(context), R(2),
+ B(Ldar), R(1),
+ B(PushContext), R(2),
+ B(PopContext), R(2),
+ B(Mov), R(context), R(1),
/* 75 S> */ B(LdaSmi), I8(2),
B(Star), R(0),
B(Jump), U8(24),
- B(Star), R(3),
- B(Ldar), R(closure),
- /* 77 E> */ B(CreateCatchContext), R(3), U8(2), U8(3),
B(Star), R(2),
+ B(Ldar), R(closure),
+ /* 77 E> */ B(CreateCatchContext), R(2), U8(2), U8(3),
+ B(Star), R(1),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(2),
- B(PushContext), R(1),
+ B(Ldar), R(1),
+ B(PushContext), R(2),
/* 95 S> */ B(LdaSmi), I8(3),
B(Star), R(0),
- B(PopContext), R(1),
+ B(PopContext), R(2),
B(LdaUndefined),
/* 103 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden
index 6b5dadb53f..faa4f4c931 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/TryFinally.golden
@@ -12,7 +12,7 @@ snippet: "
"
frame size: 4
parameter count: 1
-bytecode array length: 46
+bytecode array length: 44
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
@@ -35,8 +35,7 @@ bytecodes: [
/* 72 E> */ B(SetPendingMessage),
B(LdaZero),
B(TestEqualStrictNoFeedback), R(1),
- B(JumpIfTrue), U8(4),
- B(Jump), U8(5),
+ B(JumpIfFalse), U8(5),
B(Ldar), R(2),
B(ReThrow),
B(LdaUndefined),
@@ -53,47 +52,46 @@ snippet: "
var a = 1;
try { a = 2; } catch(e) { a = 20 } finally { a = 3; }
"
-frame size: 7
+frame size: 6
parameter count: 1
-bytecode array length: 73
+bytecode array length: 71
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 42 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
+ B(Mov), R(context), R(3),
B(Mov), R(context), R(4),
- B(Mov), R(context), R(5),
/* 51 S> */ B(LdaSmi), I8(2),
B(Star), R(0),
B(Jump), U8(24),
- B(Star), R(6),
- B(Ldar), R(closure),
- /* 53 E> */ B(CreateCatchContext), R(6), U8(0), U8(1),
B(Star), R(5),
+ B(Ldar), R(closure),
+ /* 53 E> */ B(CreateCatchContext), R(5), U8(0), U8(1),
+ B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(5),
- B(PushContext), R(1),
+ B(Ldar), R(4),
+ B(PushContext), R(5),
/* 71 S> */ B(LdaSmi), I8(20),
B(Star), R(0),
- B(PopContext), R(1),
+ B(PopContext), R(5),
B(LdaSmi), I8(-1),
- B(Star), R(2),
+ B(Star), R(1),
B(Jump), U8(7),
- B(Star), R(3),
- B(LdaZero),
B(Star), R(2),
+ B(LdaZero),
+ B(Star), R(1),
B(LdaTheHole),
/* 73 E> */ B(SetPendingMessage),
- B(Star), R(4),
+ B(Star), R(3),
/* 90 S> */ B(LdaSmi), I8(3),
B(Star), R(0),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 92 E> */ B(SetPendingMessage),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(2),
- B(JumpIfTrue), U8(4),
- B(Jump), U8(5),
- B(Ldar), R(3),
+ B(TestEqualStrictNoFeedback), R(1),
+ B(JumpIfFalse), U8(5),
+ B(Ldar), R(2),
B(ReThrow),
B(LdaUndefined),
/* 99 S> */ B(Return),
@@ -113,58 +111,57 @@ snippet: "
try { a = 1 } catch(e) { a = 2 }
} catch(e) { a = 20 } finally { a = 3; }
"
-frame size: 8
+frame size: 7
parameter count: 1
-bytecode array length: 96
+bytecode array length: 94
bytecodes: [
/* 30 E> */ B(StackCheck),
+ B(Mov), R(context), R(3),
B(Mov), R(context), R(4),
B(Mov), R(context), R(5),
- B(Mov), R(context), R(6),
/* 55 S> */ B(LdaSmi), I8(1),
B(Star), R(0),
B(Jump), U8(24),
- B(Star), R(7),
- B(Ldar), R(closure),
- /* 57 E> */ B(CreateCatchContext), R(7), U8(0), U8(1),
B(Star), R(6),
+ B(Ldar), R(closure),
+ /* 57 E> */ B(CreateCatchContext), R(6), U8(0), U8(1),
+ B(Star), R(5),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(6),
- B(PushContext), R(1),
+ B(Ldar), R(5),
+ B(PushContext), R(6),
/* 74 S> */ B(LdaSmi), I8(2),
B(Star), R(0),
- B(PopContext), R(1),
+ B(PopContext), R(6),
B(Jump), U8(24),
- B(Star), R(6),
- B(Ldar), R(closure),
- /* 76 E> */ B(CreateCatchContext), R(6), U8(0), U8(2),
B(Star), R(5),
+ B(Ldar), R(closure),
+ /* 76 E> */ B(CreateCatchContext), R(5), U8(0), U8(2),
+ B(Star), R(4),
B(LdaTheHole),
B(SetPendingMessage),
- B(Ldar), R(5),
- B(PushContext), R(1),
+ B(Ldar), R(4),
+ B(PushContext), R(5),
/* 95 S> */ B(LdaSmi), I8(20),
B(Star), R(0),
- B(PopContext), R(1),
+ B(PopContext), R(5),
B(LdaSmi), I8(-1),
- B(Star), R(2),
+ B(Star), R(1),
B(Jump), U8(7),
- B(Star), R(3),
- B(LdaZero),
B(Star), R(2),
+ B(LdaZero),
+ B(Star), R(1),
B(LdaTheHole),
/* 97 E> */ B(SetPendingMessage),
- B(Star), R(4),
+ B(Star), R(3),
/* 114 S> */ B(LdaSmi), I8(3),
B(Star), R(0),
- B(Ldar), R(4),
+ B(Ldar), R(3),
/* 116 E> */ B(SetPendingMessage),
B(LdaZero),
- B(TestEqualStrictNoFeedback), R(2),
- B(JumpIfTrue), U8(4),
- B(Jump), U8(5),
- B(Ldar), R(3),
+ B(TestEqualStrictNoFeedback), R(1),
+ B(JumpIfFalse), U8(5),
+ B(Ldar), R(2),
B(ReThrow),
B(LdaUndefined),
/* 123 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden
index ecf9ed6d64..7a2559e453 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/Typeof.golden
@@ -42,7 +42,7 @@ parameter count: 1
bytecode array length: 6
bytecodes: [
/* 22 E> */ B(StackCheck),
- /* 28 S> */ B(LdaGlobalInsideTypeof), U8(0), U8(2),
+ /* 28 S> */ B(LdaGlobalInsideTypeof), U8(0), U8(3),
B(TypeOf),
/* 46 S> */ B(Return),
]
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
index 7bf28b4058..6473a7d0a6 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/UnaryOperators.golden
@@ -21,11 +21,11 @@ bytecodes: [
/* 42 S> */ B(LdaZero),
B(Star), R(0),
/* 54 S> */ B(LdaSmi), I8(10),
- /* 54 E> */ B(TestEqual), R(0), U8(2),
+ /* 54 E> */ B(TestEqual), R(0), U8(3),
B(JumpIfTrue), U8(13),
/* 45 E> */ B(StackCheck),
/* 65 S> */ B(Ldar), R(0),
- /* 71 E> */ B(AddSmi), I8(10), U8(3),
+ /* 71 E> */ B(AddSmi), I8(10), U8(4),
B(Star), R(0),
B(JumpLoop), U8(15), I8(0),
/* 79 S> */ B(Ldar), R(0),
@@ -56,7 +56,7 @@ bytecodes: [
B(ToBooleanLogicalNot),
B(Star), R(0),
/* 74 S> */ B(LdaFalse),
- /* 74 E> */ B(TestEqual), R(0), U8(2),
+ /* 74 E> */ B(TestEqual), R(0), U8(3),
B(JumpIfFalse), U8(5),
B(JumpLoop), U8(12), I8(0),
/* 85 S> */ B(Ldar), R(0),
@@ -80,7 +80,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), I8(101),
B(Star), R(0),
/* 47 S> */ B(Nop),
- /* 61 E> */ B(MulSmi), I8(3), U8(2),
+ /* 61 E> */ B(MulSmi), I8(3), U8(3),
B(LdaUndefined),
/* 67 S> */ B(Return),
]
@@ -103,8 +103,8 @@ bytecodes: [
/* 42 S> */ B(Wide), B(LdaSmi), I16(1234),
B(Star), R(0),
/* 56 S> */ B(Nop),
- /* 64 E> */ B(Mul), R(0), U8(2),
- /* 68 E> */ B(SubSmi), I8(1), U8(3),
+ /* 64 E> */ B(Mul), R(0), U8(3),
+ /* 68 E> */ B(SubSmi), I8(1), U8(4),
B(LdaUndefined),
B(Star), R(1),
/* 74 S> */ B(Nop),
@@ -128,7 +128,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), I8(13),
B(Star), R(0),
/* 46 S> */ B(Nop),
- /* 53 E> */ B(BitwiseXorSmi), I8(-1), U8(2),
+ /* 53 E> */ B(BitwiseXorSmi), I8(-1), U8(3),
/* 57 S> */ B(Return),
]
constant pool: [
@@ -149,7 +149,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), I8(13),
B(Star), R(0),
/* 46 S> */ B(Nop),
- /* 53 E> */ B(MulSmi), I8(1), U8(2),
+ /* 53 E> */ B(MulSmi), I8(1), U8(3),
/* 57 S> */ B(Return),
]
constant pool: [
@@ -170,7 +170,7 @@ bytecodes: [
/* 42 S> */ B(LdaSmi), I8(13),
B(Star), R(0),
/* 46 S> */ B(Nop),
- /* 53 E> */ B(MulSmi), I8(-1), U8(2),
+ /* 53 E> */ B(MulSmi), I8(-1), U8(3),
/* 57 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
index 402b9aea80..b333c2f7e7 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/WideRegisters.golden
@@ -525,7 +525,7 @@ bytecode array length: 18
bytecodes: [
/* 30 E> */ B(StackCheck),
/* 1494 S> */ B(LdaSmi), I8(3),
- /* 1501 E> */ B(TestGreaterThan), R(2), U8(2),
+ /* 1501 E> */ B(TestGreaterThan), R(2), U8(3),
B(JumpIfFalse), U8(7),
/* 1508 S> */ B(Wide), B(Ldar), R16(129),
/* 1536 S> */ B(Return),
@@ -709,12 +709,12 @@ bytecodes: [
/* 1503 S> */ B(LdaZero),
B(Star), R(0),
/* 1506 S> */ B(LdaSmi), I8(3),
- /* 1515 E> */ B(Wide), B(TestEqual), R16(129), U16(2),
+ /* 1515 E> */ B(Wide), B(TestEqual), R16(129), U16(3),
B(JumpIfFalse), U8(10),
/* 1534 S> */ B(Wide), B(Mov), R16(0), R16(129),
B(Ldar), R(0),
/* 1540 S> */ B(LdaSmi), I8(3),
- /* 1547 E> */ B(TestGreaterThan), R(2), U8(3),
+ /* 1547 E> */ B(TestGreaterThan), R(2), U8(4),
B(JumpIfFalse), U8(5),
/* 1554 S> */ B(Ldar), R(0),
/* 1580 S> */ B(Return),
@@ -901,15 +901,15 @@ bytecodes: [
/* 1523 S> */ B(LdaZero),
B(Wide), B(Star), R16(128),
/* 1538 S> */ B(LdaSmi), I8(64),
- /* 1538 E> */ B(Wide), B(TestLessThan), R16(128), U16(2),
+ /* 1538 E> */ B(Wide), B(TestLessThan), R16(128), U16(3),
B(JumpIfFalse), U8(31),
/* 1518 E> */ B(StackCheck),
/* 1555 S> */ B(Wide), B(Ldar), R16(128),
- /* 1561 E> */ B(Add), R(1), U8(4),
+ /* 1561 E> */ B(Add), R(1), U8(5),
B(Wide), B(Mov), R16(1), R16(157),
B(Star), R(1),
/* 1548 S> */ B(Wide), B(Ldar), R16(128),
- B(Inc), U8(3),
+ B(Inc), U8(4),
B(Wide), B(Star), R16(128),
B(JumpLoop), U8(36), I8(0),
/* 1567 S> */ B(Wide), B(Ldar), R16(128),
@@ -1101,12 +1101,12 @@ bytecodes: [
B(Wide), B(Star), R16(161),
/* 1526 S> */ B(Wide), B(ForInContinue), R16(161), R16(160),
B(JumpIfFalse), U8(45),
- B(Wide), B(ForInNext), R16(157), R16(161), R16(158), U16(3),
+ B(Wide), B(ForInNext), R16(157), R16(161), R16(158), U16(4),
B(JumpIfUndefined), U8(22),
B(Wide), B(Star), R16(128),
/* 1521 E> */ B(StackCheck),
/* 1541 S> */ B(Wide), B(Ldar), R16(128),
- /* 1547 E> */ B(Add), R(1), U8(2),
+ /* 1547 E> */ B(Add), R(1), U8(3),
B(Wide), B(Mov), R16(1), R16(162),
B(Star), R(1),
/* 1544 E> */ B(Wide), B(ForInStep), R16(161),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden
index 93467600ef..67822005ba 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/WithStatement.golden
@@ -9,16 +9,16 @@ wrap: yes
snippet: "
with ({x:42}) { return x; }
"
-frame size: 2
+frame size: 1
parameter count: 1
bytecode array length: 22
bytecodes: [
/* 30 E> */ B(StackCheck),
- /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(2), U8(1), R(1),
- B(Ldar), R(1),
- B(ToObject), R(1),
+ /* 34 S> */ B(CreateObjectLiteral), U8(0), U8(3), U8(1), R(0),
+ B(Ldar), R(0),
+ B(ToObject), R(0),
B(Ldar), R(closure),
- B(CreateWithContext), R(1), U8(1),
+ B(CreateWithContext), R(0), U8(1),
B(PushContext), R(0),
/* 50 S> */ B(LdaLookupSlot), U8(2),
B(PopContext), R(0),
diff --git a/deps/v8/test/cctest/interpreter/source-position-matcher.cc b/deps/v8/test/cctest/interpreter/source-position-matcher.cc
index 082ac01ef3..9cff95af5b 100644
--- a/deps/v8/test/cctest/interpreter/source-position-matcher.cc
+++ b/deps/v8/test/cctest/interpreter/source-position-matcher.cc
@@ -56,9 +56,9 @@ struct PositionTableEntryComparer {
bool SourcePositionMatcher::Match(Handle<BytecodeArray> original_bytecode,
Handle<BytecodeArray> optimized_bytecode) {
SourcePositionTableIterator original(
- original_bytecode->source_position_table());
+ original_bytecode->SourcePositionTable());
SourcePositionTableIterator optimized(
- optimized_bytecode->source_position_table());
+ optimized_bytecode->SourcePositionTable());
int last_original_bytecode_offset = 0;
int last_optimized_bytecode_offset = 0;
diff --git a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
index b249799f6c..6b6227e96e 100644
--- a/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
+++ b/deps/v8/test/cctest/interpreter/test-bytecode-generator.cc
@@ -2507,6 +2507,108 @@ TEST(ForAwaitOf) {
i::FLAG_harmony_async_iteration = old_flag;
}
+TEST(StandardForLoop) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
+
+ const char* snippets[] = {
+ "function f() {\n"
+ " for (let x = 0; x < 10; ++x) { let y = x; }\n"
+ "}\n"
+ "f();\n",
+
+ "function f() {\n"
+ " for (let x = 0; x < 10; ++x) { eval('1'); }\n"
+ "}\n"
+ "f();\n",
+
+ "function f() {\n"
+ " for (let x = 0; x < 10; ++x) { (function() { return x; })(); }\n"
+ "}\n"
+ "f();\n",
+
+ "function f() {\n"
+ " for (let { x, y } = { x: 0, y: 3 }; y > 0; --y) { let z = x + y; }\n"
+ "}\n"
+ "f();\n",
+
+ "function* f() {\n"
+ " for (let x = 0; x < 10; ++x) { let y = x; }\n"
+ "}\n"
+ "f();\n",
+
+ "function* f() {\n"
+ " for (let x = 0; x < 10; ++x) yield x;\n"
+ "}\n"
+ "f();\n",
+
+ "async function f() {\n"
+ " for (let x = 0; x < 10; ++x) { let y = x; }\n"
+ "}\n"
+ "f();\n",
+
+ "async function f() {\n"
+ " for (let x = 0; x < 10; ++x) await x;\n"
+ "}\n"
+ "f();\n"};
+
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("StandardForLoop.golden")));
+}
+
+TEST(ForOfLoop) {
+ InitializedIgnitionHandleScope scope;
+ BytecodeExpectationsPrinter printer(CcTest::isolate());
+ printer.set_wrap(false);
+ printer.set_test_function_name("f");
+
+ const char* snippets[] = {
+ "function f(arr) {\n"
+ " for (let x of arr) { let y = x; }\n"
+ "}\n"
+ "f([1, 2, 3]);\n",
+
+ "function f(arr) {\n"
+ " for (let x of arr) { eval('1'); }\n"
+ "}\n"
+ "f([1, 2, 3]);\n",
+
+ "function f(arr) {\n"
+ " for (let x of arr) { (function() { return x; })(); }\n"
+ "}\n"
+ "f([1, 2, 3]);\n",
+
+ "function f(arr) {\n"
+ " for (let { x, y } of arr) { let z = x + y; }\n"
+ "}\n"
+ "f([{ x: 0, y: 3 }, { x: 1, y: 9 }, { x: -12, y: 17 }]);\n",
+
+ "function* f(arr) {\n"
+ " for (let x of arr) { let y = x; }\n"
+ "}\n"
+ "f([1, 2, 3]);\n",
+
+ "function* f(arr) {\n"
+ " for (let x of arr) yield x;\n"
+ "}\n"
+ "f([1, 2, 3]);\n",
+
+ "async function f(arr) {\n"
+ " for (let x of arr) { let y = x; }\n"
+ "}\n"
+ "f([1, 2, 3]);\n",
+
+ "async function f(arr) {\n"
+ " for (let x of arr) await x;\n"
+ "}\n"
+ "f([1, 2, 3]);\n"};
+
+ CHECK(CompareTexts(BuildActual(printer, snippets),
+ LoadGolden("ForOfLoop.golden")));
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/interpreter/test-interpreter.cc b/deps/v8/test/cctest/interpreter/test-interpreter.cc
index f58740ea20..eba3ba3057 100644
--- a/deps/v8/test/cctest/interpreter/test-interpreter.cc
+++ b/deps/v8/test/cctest/interpreter/test-interpreter.cc
@@ -35,7 +35,7 @@ TEST(InterpreterReturn) {
Zone* zone = handles.main_zone();
Handle<Object> undefined_value = isolate->factory()->undefined_value();
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 0);
builder.Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -52,7 +52,7 @@ TEST(InterpreterLoadUndefined) {
Zone* zone = handles.main_zone();
Handle<Object> undefined_value = isolate->factory()->undefined_value();
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 0);
builder.LoadUndefined().Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -69,7 +69,7 @@ TEST(InterpreterLoadNull) {
Zone* zone = handles.main_zone();
Handle<Object> null_value = isolate->factory()->null_value();
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 0);
builder.LoadNull().Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -86,7 +86,7 @@ TEST(InterpreterLoadTheHole) {
Zone* zone = handles.main_zone();
Handle<Object> the_hole_value = isolate->factory()->the_hole_value();
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 0);
builder.LoadTheHole().Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -103,7 +103,7 @@ TEST(InterpreterLoadTrue) {
Zone* zone = handles.main_zone();
Handle<Object> true_value = isolate->factory()->true_value();
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 0);
builder.LoadTrue().Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -120,7 +120,7 @@ TEST(InterpreterLoadFalse) {
Zone* zone = handles.main_zone();
Handle<Object> false_value = isolate->factory()->false_value();
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 0);
builder.LoadFalse().Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -138,7 +138,7 @@ TEST(InterpreterLoadLiteral) {
// Small Smis.
for (int i = -128; i < 128; i++) {
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 0);
builder.LoadLiteral(Smi::FromInt(i)).Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -150,7 +150,7 @@ TEST(InterpreterLoadLiteral) {
// Large Smis.
{
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 0);
builder.LoadLiteral(Smi::FromInt(0x12345678)).Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -166,7 +166,7 @@ TEST(InterpreterLoadLiteral) {
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
isolate->heap()->HashSeed());
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 0);
builder.LoadLiteral(ast_factory.NewNumber(-2.1e19)).Return();
@@ -184,7 +184,7 @@ TEST(InterpreterLoadLiteral) {
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
isolate->heap()->HashSeed());
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 0);
const AstRawString* raw_string = ast_factory.GetOneByteString("String");
builder.LoadLiteral(raw_string).Return();
@@ -206,7 +206,7 @@ TEST(InterpreterLoadStoreRegisters) {
Zone* zone = handles.main_zone();
Handle<Object> true_value = isolate->factory()->true_value();
for (int i = 0; i <= kMaxInt8; i++) {
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, i + 1);
+ BytecodeArrayBuilder builder(isolate, zone, 1, i + 1);
Register reg(i);
builder.LoadTrue()
@@ -291,7 +291,7 @@ TEST(InterpreterShiftOpsSmi) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
Factory* factory = isolate->factory();
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 1);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 1);
FeedbackVectorSpec feedback_spec(zone);
FeedbackSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
@@ -330,7 +330,7 @@ TEST(InterpreterBinaryOpsSmi) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
Factory* factory = isolate->factory();
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 1);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 1);
FeedbackVectorSpec feedback_spec(zone);
FeedbackSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
@@ -372,7 +372,7 @@ TEST(InterpreterBinaryOpsHeapNumber) {
Factory* factory = isolate->factory();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
isolate->heap()->HashSeed());
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 1);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 1);
FeedbackVectorSpec feedback_spec(zone);
FeedbackSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
@@ -447,7 +447,7 @@ TEST(InterpreterStringAdd) {
};
for (size_t i = 0; i < arraysize(test_cases); i++) {
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 1);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 1);
FeedbackVectorSpec feedback_spec(zone);
FeedbackSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
Handle<i::FeedbackMetadata> metadata =
@@ -479,7 +479,7 @@ TEST(InterpreterParameter1) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 0);
builder.LoadAccumulatorWithRegister(builder.Receiver()).Return();
Handle<BytecodeArray> bytecode_array = builder.ToBytecodeArray(isolate);
@@ -505,7 +505,7 @@ TEST(InterpreterParameter8) {
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
isolate->heap()->HashSeed());
- BytecodeArrayBuilder builder(isolate, zone, 8, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 8, 0);
FeedbackVectorSpec feedback_spec(zone);
FeedbackSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
@@ -664,7 +664,7 @@ TEST(InterpreterBinaryOpTypeFeedback) {
Handle<Smi>(Smi::FromInt(1), isolate), BinaryOperationFeedback::kAny}};
for (const BinaryOpExpectation& test_case : kTestCases) {
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 1);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 1);
i::FeedbackVectorSpec feedback_spec(zone);
i::FeedbackSlot slot0 = feedback_spec.AddInterpreterBinaryOpICSlot();
@@ -777,7 +777,7 @@ TEST(InterpreterBinaryOpSmiTypeFeedback) {
Handle<Smi>(Smi::FromInt(1), isolate), BinaryOperationFeedback::kAny}};
for (const BinaryOpExpectation& test_case : kTestCases) {
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 1);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 1);
i::FeedbackVectorSpec feedback_spec(zone);
i::FeedbackSlot slot0 = feedback_spec.AddInterpreterBinaryOpICSlot();
@@ -828,7 +828,7 @@ TEST(InterpreterUnaryOpFeedback) {
{Token::Value::ADD, smi_one, smi_max, number, str},
{Token::Value::SUB, smi_one, smi_min, number, str}};
for (TestCase const& test_case : kTestCases) {
- BytecodeArrayBuilder builder(isolate, zone, 4, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 4, 0);
i::FeedbackVectorSpec feedback_spec(zone);
i::FeedbackSlot slot0 = feedback_spec.AddInterpreterBinaryOpICSlot();
@@ -892,7 +892,7 @@ TEST(InterpreterBitwiseTypeFeedback) {
Token::Value::SHL, Token::Value::SHR, Token::Value::SAR};
for (Token::Value op : kBitwiseBinaryOperators) {
- BytecodeArrayBuilder builder(isolate, zone, 4, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 4, 0);
i::FeedbackVectorSpec feedback_spec(zone);
i::FeedbackSlot slot0 = feedback_spec.AddInterpreterBinaryOpICSlot();
@@ -943,7 +943,7 @@ TEST(InterpreterParameter1Assign) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 0);
builder.LoadLiteral(Smi::FromInt(5))
.StoreAccumulatorInRegister(builder.Receiver())
@@ -1074,7 +1074,7 @@ TEST(InterpreterLoadNamedProperty) {
const AstRawString* name = ast_factory.GetOneByteString("val");
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 0);
builder.LoadNamedProperty(builder.Receiver(), name, GetIndex(slot)).Return();
ast_factory.Internalize(isolate);
@@ -1127,7 +1127,7 @@ TEST(InterpreterLoadKeyedProperty) {
const AstRawString* key = ast_factory.GetOneByteString("key");
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 1);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 1);
builder.LoadLiteral(key)
.LoadKeyedProperty(builder.Receiver(), GetIndex(slot))
@@ -1170,7 +1170,7 @@ TEST(InterpreterStoreNamedProperty) {
const AstRawString* name = ast_factory.GetOneByteString("val");
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 0);
builder.LoadLiteral(Smi::FromInt(999))
.StoreNamedProperty(builder.Receiver(), name, GetIndex(slot), STRICT)
@@ -1233,7 +1233,7 @@ TEST(InterpreterStoreKeyedProperty) {
const AstRawString* name = ast_factory.GetOneByteString("val");
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 1);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 1);
builder.LoadLiteral(name)
.StoreAccumulatorInRegister(Register(0))
@@ -1291,7 +1291,7 @@ static void TestInterpreterCall(TailCallMode tail_call_mode) {
// Check with no args.
{
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 1);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 1);
Register reg = builder.register_allocator()->NewRegister();
RegisterList args = builder.register_allocator()->NewRegisterList(1);
builder.LoadNamedProperty(builder.Receiver(), name, slot_index)
@@ -1319,7 +1319,7 @@ static void TestInterpreterCall(TailCallMode tail_call_mode) {
// Check that receiver is passed properly.
{
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 1);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 1);
Register reg = builder.register_allocator()->NewRegister();
RegisterList args = builder.register_allocator()->NewRegisterList(1);
builder.LoadNamedProperty(builder.Receiver(), name, slot_index)
@@ -1348,7 +1348,7 @@ static void TestInterpreterCall(TailCallMode tail_call_mode) {
// Check with two parameters (+ receiver).
{
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 4);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 4);
Register reg = builder.register_allocator()->NewRegister();
RegisterList args = builder.register_allocator()->NewRegisterList(3);
@@ -1385,7 +1385,7 @@ static void TestInterpreterCall(TailCallMode tail_call_mode) {
// Check with 10 parameters (+ receiver).
{
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 12);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 12);
Register reg = builder.register_allocator()->NewRegister();
RegisterList args = builder.register_allocator()->NewRegisterList(11);
@@ -1471,7 +1471,7 @@ TEST(InterpreterJumps) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
- BytecodeArrayBuilder builder(isolate, zone, 0, 0, 2);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 2);
FeedbackVectorSpec feedback_spec(zone);
FeedbackSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
@@ -1509,7 +1509,7 @@ TEST(InterpreterConditionalJumps) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
- BytecodeArrayBuilder builder(isolate, zone, 0, 0, 2);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 2);
FeedbackVectorSpec feedback_spec(zone);
FeedbackSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
@@ -1559,7 +1559,7 @@ TEST(InterpreterConditionalJumps2) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
- BytecodeArrayBuilder builder(isolate, zone, 0, 0, 2);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 2);
FeedbackVectorSpec feedback_spec(zone);
FeedbackSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
@@ -1610,7 +1610,7 @@ TEST(InterpreterJumpConstantWith16BitOperand) {
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
isolate->heap()->HashSeed());
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 257);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 257);
FeedbackVectorSpec feedback_spec(zone);
FeedbackSlot slot = feedback_spec.AddInterpreterBinaryOpICSlot();
@@ -1671,7 +1671,7 @@ TEST(InterpreterJumpWith32BitOperand) {
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
isolate->heap()->HashSeed());
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 1);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 1);
Register reg(0);
BytecodeLabel done;
@@ -1763,7 +1763,7 @@ TEST(InterpreterSmiComparisons) {
HandleAndZoneScope handles;
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
- BytecodeArrayBuilder builder(isolate, zone, 0, 0, 1);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 1);
FeedbackVectorSpec feedback_spec(zone);
FeedbackSlot slot = feedback_spec.AddInterpreterCompareICSlot();
@@ -1812,7 +1812,7 @@ TEST(InterpreterHeapNumberComparisons) {
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
isolate->heap()->HashSeed());
- BytecodeArrayBuilder builder(isolate, zone, 0, 0, 1);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 1);
FeedbackVectorSpec feedback_spec(zone);
FeedbackSlot slot = feedback_spec.AddInterpreterCompareICSlot();
@@ -1867,7 +1867,7 @@ TEST(InterpreterStringComparisons) {
Handle<i::FeedbackMetadata> metadata =
NewFeedbackMetadata(isolate, &feedback_spec);
- BytecodeArrayBuilder builder(isolate, zone, 0, 0, 1);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 1);
Register r0(0);
builder.LoadLiteral(ast_factory.GetOneByteString(lhs))
.StoreAccumulatorInRegister(r0)
@@ -1942,7 +1942,7 @@ TEST(InterpreterMixedComparisons) {
Zone* zone = handles.main_zone();
AstValueFactory ast_factory(zone, isolate->ast_string_constants(),
isolate->heap()->HashSeed());
- BytecodeArrayBuilder builder(isolate, zone, 0, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 0);
FeedbackVectorSpec feedback_spec(zone);
FeedbackSlot string_add_slot =
@@ -2122,7 +2122,7 @@ TEST(InterpreterCompareTypeOf) {
LiteralFlag literal_flag = kLiterals[l];
if (literal_flag == LiteralFlag::kOther) continue;
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 0);
builder.LoadAccumulatorWithRegister(builder.Receiver())
.CompareTypeOf(kLiterals[l])
.Return();
@@ -2150,7 +2150,7 @@ TEST(InterpreterInstanceOf) {
Handle<i::Object> cases[] = {Handle<i::Object>::cast(instance), other};
for (size_t i = 0; i < arraysize(cases); i++) {
bool expected_value = (i == 0);
- BytecodeArrayBuilder builder(isolate, zone, 0, 0, 1);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 1);
Register r0(0);
size_t case_entry = builder.AllocateDeferredConstantPoolEntry();
@@ -2187,7 +2187,7 @@ TEST(InterpreterTestIn) {
const char* properties[] = {"length", "fuzzle", "x", "0"};
for (size_t i = 0; i < arraysize(properties); i++) {
bool expected_value = (i == 0);
- BytecodeArrayBuilder builder(isolate, zone, 0, 0, 1);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 1);
Register r0(0);
builder.LoadLiteral(ast_factory.GetOneByteString(properties[i]))
@@ -2216,7 +2216,7 @@ TEST(InterpreterUnaryNot) {
Zone* zone = handles.main_zone();
for (size_t i = 1; i < 10; i++) {
bool expected_value = ((i & 1) == 1);
- BytecodeArrayBuilder builder(isolate, zone, 0, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 0);
Register r0(0);
builder.LoadFalse();
@@ -2255,7 +2255,7 @@ TEST(InterpreterUnaryNotNonBoolean) {
};
for (size_t i = 0; i < arraysize(object_type_tuples); i++) {
- BytecodeArrayBuilder builder(isolate, zone, 0, 0, 0);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 0);
Register r0(0);
builder.LoadLiteral(object_type_tuples[i].first);
@@ -2305,7 +2305,7 @@ TEST(InterpreterCallRuntime) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 2);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 2);
RegisterList args = builder.register_allocator()->NewRegisterList(2);
builder.LoadLiteral(Smi::FromInt(15))
@@ -2328,7 +2328,7 @@ TEST(InterpreterInvokeIntrinsic) {
Isolate* isolate = handles.main_isolate();
Zone* zone = handles.main_zone();
- BytecodeArrayBuilder builder(isolate, zone, 1, 0, 2);
+ BytecodeArrayBuilder builder(isolate, zone, 1, 2);
builder.LoadLiteral(Smi::FromInt(15))
.StoreAccumulatorInRegister(Register(0))
diff --git a/deps/v8/test/cctest/parsing/test-preparser.cc b/deps/v8/test/cctest/parsing/test-preparser.cc
index 2fcff02d95..c28195cb01 100644
--- a/deps/v8/test/cctest/parsing/test-preparser.cc
+++ b/deps/v8/test/cctest/parsing/test-preparser.cc
@@ -28,7 +28,7 @@ enum SkipTests {
TEST(PreParserScopeAnalysis) {
i::FLAG_lazy_inner_functions = true;
- i::FLAG_preparser_scope_analysis = true;
+ i::FLAG_experimental_preparser_scope_analysis = true;
i::Isolate* isolate = CcTest::i_isolate();
i::Factory* factory = isolate->factory();
i::HandleScope scope(isolate);
@@ -76,45 +76,111 @@ TEST(PreParserScopeAnalysis) {
bool strict_outer;
bool strict_test_function;
bool arrow;
+ std::vector<unsigned> location; // "Directions" to the relevant scope.
} outers[] = {
// Normal case (test function at the laziness boundary):
- {"(function outer() { ", "})();", " function test(%s) { %s }",
- "(function test(%s) { %s })()", false, false, false},
+ {"(function outer() { ",
+ "})();",
+ " function test(%s) { %s }",
+ "(function test(%s) { %s })()",
+ false,
+ false,
+ false,
+ {0, 0}},
// Test function deeper:
- {"(function outer() { ", "})();",
+ {"(function outer() { ",
+ "})();",
" function inner() { function test(%s) { %s } }",
- "(function inner() { function test(%s) { %s } })()", false, false,
- false},
+ "(function inner() { function test(%s) { %s } })()",
+ false,
+ false,
+ false,
+ {0, 0}},
// Arrow functions (they can never be at the laziness boundary):
- {"(function outer() { ", "})();", " function inner() { (%s) => { %s } }",
- "(function inner() { (%s) => { %s } })()", false, false, true},
+ {"(function outer() { ",
+ "})();",
+ " function inner() { (%s) => { %s } }",
+ "(function inner() { (%s) => { %s } })()",
+ false,
+ false,
+ true,
+ {0, 0}},
// Repeat the above mentioned cases w/ outer function declaring itself
// strict:
- {"(function outer() { 'use strict'; ", "})();",
- " function test(%s) { %s }", "(function test(%s) { %s })()", true, false,
- false},
- {"(function outer() { 'use strict'; ", "})();",
+ {"(function outer() { 'use strict'; ",
+ "})();",
+ " function test(%s) { %s }",
+ "(function test(%s) { %s })()",
+ true,
+ false,
+ false,
+ {0, 0}},
+ {"(function outer() { 'use strict'; ",
+ "})();",
" function inner() { function test(%s) { %s } }",
- "(function inner() { function test(%s) { %s } })()", true, false, false},
- {"(function outer() { 'use strict'; ", "})();",
+ "(function inner() { function test(%s) { %s } })()",
+ true,
+ false,
+ false,
+ {0, 0}},
+ {"(function outer() { 'use strict'; ",
+ "})();",
" function inner() { (%s) => { %s } }",
- "(function inner() { (%s) => { %s } })()", true, false, true},
+ "(function inner() { (%s) => { %s } })()",
+ true,
+ false,
+ true,
+ {0, 0}},
// ... and with the test function declaring itself strict:
- {"(function outer() { ", "})();",
+ {"(function outer() { ",
+ "})();",
" function test(%s) { 'use strict'; %s }",
- "(function test(%s) { 'use strict'; %s })()", false, true, false},
- {"(function outer() { ", "})();",
+ "(function test(%s) { 'use strict'; %s })()",
+ false,
+ true,
+ false,
+ {0, 0}},
+ {"(function outer() { ",
+ "})();",
" function inner() { function test(%s) { 'use strict'; %s } }",
- "(function inner() { function test(%s) { 'use strict'; %s } })()", false,
- true, false},
- {"(function outer() { ", "})();",
+ "(function inner() { function test(%s) { 'use strict'; %s } })()",
+ false,
+ true,
+ false,
+ {0, 0}},
+ {"(function outer() { ",
+ "})();",
" function inner() { (%s) => { 'use strict'; %s } }",
- "(function inner() { (%s) => { 'use strict'; %s } })()", false, true,
- true},
+ "(function inner() { (%s) => { 'use strict'; %s } })()",
+ false,
+ true,
+ true,
+ {0, 0}},
+
+ // Methods containing skippable functions. Cannot test at the laziness
+ // boundary, since there's no way to force eager parsing of a method.
+ {"class MyClass { constructor() {",
+ "} }",
+ " function test(%s) { %s }",
+ "(function test(%s) { %s })()",
+ true,
+ true,
+ false,
+ {0, 0, 0}},
+
+ {"class MyClass { mymethod() {",
+ "} }",
+ " function test(%s) { %s }",
+ "(function test(%s) { %s })()",
+ true,
+ true,
+ false,
+ // The default constructor is scope 0 inside the class.
+ {0, 1, 0}},
// FIXME(marja): Generators and async functions
};
@@ -166,6 +232,15 @@ TEST(PreParserScopeAnalysis) {
{"if (true) { const var1 = 5; }"},
{"const var1 = 5; function f() { var1; }"},
+ // Functions.
+ {"function f1() { let var2; }"},
+ {"var var1 = function f1() { let var2; }"},
+ {"let var1 = function f1() { let var2; }"},
+ {"const var1 = function f1() { let var2; }"},
+ {"var var1 = function() { let var2; }"},
+ {"let var1 = function() { let var2; }"},
+ {"const var1 = function() { let var2; }"},
+
// Redeclarations.
{"var var1; var var1;"},
{"var var1; var var1; var1 = 5;"},
@@ -491,6 +566,9 @@ TEST(PreParserScopeAnalysis) {
"{name9: var9, name10: var10}, ...var11",
"", SKIP_STRICT_FUNCTION, false},
+ // Complicated cases from bugs.
+ {"var1 = {} = {}", "", SKIP_STRICT_FUNCTION, false},
+
// Destructuring rest. Because we can.
{"var1, ...[var2]", "", SKIP_STRICT_FUNCTION},
{"var1, ...[var2]", "() => { var2; }", SKIP_STRICT_FUNCTION},
@@ -547,6 +625,55 @@ TEST(PreParserScopeAnalysis) {
// Shadowing the catch variable
{"try { } catch(var1) { var var1 = 3; }"},
{"try { } catch(var1) { var var1 = 3; function f() { var1 = 3; } }"},
+
+ // Classes
+ {"class MyClass {}"},
+ {"var1 = class MyClass {}"},
+ {"var var1 = class MyClass {}"},
+ {"let var1 = class MyClass {}"},
+ {"const var1 = class MyClass {}"},
+ {"var var1 = class {}"},
+ {"let var1 = class {}"},
+ {"const var1 = class {}"},
+
+ {"class MyClass { constructor() {} }"},
+ {"class MyClass { constructor() { var var1; } }"},
+ {"class MyClass { constructor() { var var1 = 11; } }"},
+ {"class MyClass { constructor() { var var1; function foo() { var1 = 11; "
+ "} } }"},
+
+ {"class MyClass { m() {} }"},
+ {"class MyClass { m() { var var1; } }"},
+ {"class MyClass { m() { var var1 = 11; } }"},
+ {"class MyClass { m() { var var1; function foo() { var1 = 11; } } }"},
+
+ {"class MyClass { static m() {} }"},
+ {"class MyClass { static m() { var var1; } }"},
+ {"class MyClass { static m() { var var1 = 11; } }"},
+ {"class MyClass { static m() { var var1; function foo() { var1 = 11; } } "
+ "}"},
+
+ {"class MyBase {} class MyClass extends MyBase {}"},
+ {"class MyClass extends MyBase { constructor() {} }"},
+ {"class MyClass extends MyBase { constructor() { super(); } }"},
+ {"class MyClass extends MyBase { constructor() { var var1; } }"},
+ {"class MyClass extends MyBase { constructor() { var var1 = 11; } }"},
+ {"class MyClass extends MyBase { constructor() { var var1; function "
+ "foo() { var1 = 11; } } }"},
+
+ {"class MyClass extends MyBase { m() {} }"},
+ {"class MyClass extends MyBase { m() { super.foo; } }"},
+ {"class MyClass extends MyBase { m() { var var1; } }"},
+ {"class MyClass extends MyBase { m() { var var1 = 11; } }"},
+ {"class MyClass extends MyBase { m() { var var1; function foo() { var1 = "
+ "11; } } }"},
+
+ {"class MyClass extends MyBase { static m() {} }"},
+ {"class MyClass extends MyBase { static m() { super.foo; } }"},
+ {"class MyClass extends MyBase { static m() { var var1; } }"},
+ {"class MyClass extends MyBase { static m() { var var1 = 11; } }"},
+ {"class MyClass extends MyBase { static m() { var var1; function foo() { "
+ "var1 = 11; } } }"},
};
for (unsigned outer_ix = 0; outer_ix < arraysize(outers); ++outer_ix) {
@@ -626,9 +753,8 @@ TEST(PreParserScopeAnalysis) {
CHECK(i::parsing::ParseProgram(&eager_normal, isolate));
CHECK(i::Compiler::Analyze(&eager_normal, isolate));
- i::Scope* normal_scope =
- eager_normal.literal()->scope()->inner_scope()->inner_scope();
- CHECK_NOT_NULL(normal_scope);
+ i::Scope* normal_scope = i::ScopeTestHelper::FindScope(
+ eager_normal.literal()->scope(), outers[outer_ix].location);
CHECK_NULL(normal_scope->sibling());
CHECK(normal_scope->is_function_scope());
@@ -639,15 +765,12 @@ TEST(PreParserScopeAnalysis) {
// Don't run scope analysis (that would obviously decide the correct
// allocation for the variables).
- i::Scope* unallocated_scope = eager_using_scope_data.literal()
- ->scope()
- ->inner_scope()
- ->inner_scope();
- CHECK_NOT_NULL(unallocated_scope);
+ i::Scope* unallocated_scope = i::ScopeTestHelper::FindScope(
+ eager_using_scope_data.literal()->scope(), outers[outer_ix].location);
CHECK_NULL(unallocated_scope->sibling());
CHECK(unallocated_scope->is_function_scope());
- int index = 0;
+ uint32_t index = 0;
lazy_info.preparsed_scope_data()->RestoreData(unallocated_scope, &index);
i::ScopeTestHelper::AllocateWithoutVariableResolution(unallocated_scope);
diff --git a/deps/v8/test/cctest/parsing/test-scanner-streams.cc b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
index 7b621114fe..d76dea3245 100644
--- a/deps/v8/test/cctest/parsing/test-scanner-streams.cc
+++ b/deps/v8/test/cctest/parsing/test-scanner-streams.cc
@@ -22,6 +22,13 @@ class ChunkSource : public v8::ScriptCompiler::ExternalSourceStream {
chunks++;
} while (chunks_.back().len > 0);
}
+ explicit ChunkSource(const char* chunks) : current_(0) {
+ do {
+ chunks_.push_back(
+ {reinterpret_cast<const uint8_t*>(chunks), strlen(chunks)});
+ chunks += strlen(chunks) + 1;
+ } while (chunks_.back().len > 0);
+ }
ChunkSource(const uint8_t* data, size_t len, bool extra_chunky)
: current_(0) {
// If extra_chunky, we'll use increasingly large chunk sizes.
@@ -450,3 +457,42 @@ TEST(Regress651333) {
CHECK_EQ(i::Utf16CharacterStream::kEndOfInput, stream->Advance());
}
}
+
+TEST(Regress6377) {
+ const char* cases[] = {
+ "\xf0\x90\0" // first chunk - start of 4-byte seq
+ "\x80\x80" // second chunk - end of 4-byte seq
+ "a\0", // and an 'a'
+
+ "\xe0\xbf\0" // first chunk - start of 3-byte seq
+ "\xbf" // second chunk - one-byte end of 3-byte seq
+ "a\0", // and an 'a'
+
+ "\xc3\0" // first chunk - start of 2-byte seq
+ "\xbf" // second chunk - end of 2-byte seq
+ "a\0", // and an 'a'
+
+ "\xf0\x90\x80\0" // first chunk - start of 4-byte seq
+ "\x80" // second chunk - one-byte end of 4-byte seq
+ "a\xc3\0" // and an 'a' + start of 2-byte seq
+ "\xbf\0", // third chunk - end of 2-byte seq
+ };
+ const std::vector<std::vector<uint16_t>> unicode = {
+ {0xd800, 0xdc00, 97}, {0xfff, 97}, {0xff, 97}, {0xd800, 0xdc00, 97, 0xff},
+ };
+ CHECK_EQ(unicode.size(), sizeof(cases) / sizeof(cases[0]));
+ for (size_t c = 0; c < unicode.size(); ++c) {
+ ChunkSource chunk_source(cases[c]);
+ std::unique_ptr<i::Utf16CharacterStream> stream(i::ScannerStream::For(
+ &chunk_source, v8::ScriptCompiler::StreamedSource::UTF8, nullptr));
+ for (size_t i = 0; i < unicode[c].size(); i++) {
+ CHECK_EQ(unicode[c][i], stream->Advance());
+ }
+ CHECK_EQ(i::Utf16CharacterStream::kEndOfInput, stream->Advance());
+ stream->Seek(0);
+ for (size_t i = 0; i < unicode[c].size(); i++) {
+ CHECK_EQ(unicode[c][i], stream->Advance());
+ }
+ CHECK_EQ(i::Utf16CharacterStream::kEndOfInput, stream->Advance());
+ }
+}
diff --git a/deps/v8/test/cctest/parsing/test-scanner.cc b/deps/v8/test/cctest/parsing/test-scanner.cc
index 2c27ca67ec..7ca02ba3d6 100644
--- a/deps/v8/test/cctest/parsing/test-scanner.cc
+++ b/deps/v8/test/cctest/parsing/test-scanner.cc
@@ -39,7 +39,7 @@ ScannerTestHelper make_scanner(const char* src) {
helper.stream = ScannerStream::ForTesting(src);
helper.scanner =
std::unique_ptr<Scanner>(new Scanner(helper.unicode_cache.get()));
- helper.scanner->Initialize(helper.stream.get());
+ helper.scanner->Initialize(helper.stream.get(), false);
return helper;
}
diff --git a/deps/v8/test/cctest/scope-test-helper.h b/deps/v8/test/cctest/scope-test-helper.h
index 691a723981..61a5167854 100644
--- a/deps/v8/test/cctest/scope-test-helper.h
+++ b/deps/v8/test/cctest/scope-test-helper.h
@@ -23,31 +23,38 @@ class ScopeTestHelper {
static void CompareScopes(Scope* baseline, Scope* scope,
bool precise_maybe_assigned) {
- if (!scope->is_hidden()) {
- for (auto baseline_local = baseline->locals()->begin(),
- scope_local = scope->locals()->begin();
- baseline_local != baseline->locals()->end();
- ++baseline_local, ++scope_local) {
- if (scope_local->mode() == VAR || scope_local->mode() == LET ||
- scope_local->mode() == CONST) {
- // Sanity check the variable name. If this fails, the variable order
- // is not deterministic.
- CHECK_EQ(scope_local->raw_name()->length(),
- baseline_local->raw_name()->length());
- for (int i = 0; i < scope_local->raw_name()->length(); ++i) {
- CHECK_EQ(scope_local->raw_name()->raw_data()[i],
- baseline_local->raw_name()->raw_data()[i]);
- }
+ CHECK_EQ(baseline->scope_type(), scope->scope_type());
+ CHECK_IMPLIES(baseline->is_declaration_scope(),
+ baseline->AsDeclarationScope()->function_kind() ==
+ scope->AsDeclarationScope()->function_kind());
- CHECK_EQ(scope_local->location(), baseline_local->location());
- if (precise_maybe_assigned) {
- CHECK_EQ(scope_local->maybe_assigned(),
- baseline_local->maybe_assigned());
- } else {
- STATIC_ASSERT(kMaybeAssigned > kNotAssigned);
- CHECK_GE(scope_local->maybe_assigned(),
- baseline_local->maybe_assigned());
- }
+ if (!PreParsedScopeData::ScopeNeedsData(baseline)) {
+ return;
+ }
+
+ for (auto baseline_local = baseline->locals()->begin(),
+ scope_local = scope->locals()->begin();
+ baseline_local != baseline->locals()->end();
+ ++baseline_local, ++scope_local) {
+ if (scope_local->mode() == VAR || scope_local->mode() == LET ||
+ scope_local->mode() == CONST) {
+ // Sanity check the variable name. If this fails, the variable order
+ // is not deterministic.
+ CHECK_EQ(scope_local->raw_name()->length(),
+ baseline_local->raw_name()->length());
+ for (int i = 0; i < scope_local->raw_name()->length(); ++i) {
+ CHECK_EQ(scope_local->raw_name()->raw_data()[i],
+ baseline_local->raw_name()->raw_data()[i]);
+ }
+
+ CHECK_EQ(scope_local->location(), baseline_local->location());
+ if (precise_maybe_assigned) {
+ CHECK_EQ(scope_local->maybe_assigned(),
+ baseline_local->maybe_assigned());
+ } else {
+ STATIC_ASSERT(kMaybeAssigned > kNotAssigned);
+ CHECK_GE(scope_local->maybe_assigned(),
+ baseline_local->maybe_assigned());
}
}
}
@@ -59,6 +66,20 @@ class ScopeTestHelper {
CompareScopes(baseline_inner, scope_inner, precise_maybe_assigned);
}
}
+
+ // Finds a scope given a start point and directions to it (which inner scope
+ // to pick).
+ static Scope* FindScope(Scope* scope, const std::vector<unsigned>& location) {
+ for (auto n : location) {
+ scope = scope->inner_scope();
+ CHECK_NOT_NULL(scope);
+ while (n-- > 0) {
+ scope = scope->sibling();
+ CHECK_NOT_NULL(scope);
+ }
+ }
+ return scope;
+ }
};
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-accessor-assembler.cc b/deps/v8/test/cctest/test-accessor-assembler.cc
index 629fe4226a..17617affee 100644
--- a/deps/v8/test/cctest/test-accessor-assembler.cc
+++ b/deps/v8/test/cctest/test-accessor-assembler.cc
@@ -140,18 +140,18 @@ TEST(TryProbeStubCache) {
m.TryProbeStubCache(&stub_cache, receiver, name, &if_handler, &var_handler,
&if_miss);
- m.Bind(&if_handler);
+ m.BIND(&if_handler);
m.Branch(m.WordEqual(expected_handler, var_handler.value()), &passed,
&failed);
- m.Bind(&if_miss);
+ m.BIND(&if_miss);
m.Branch(m.WordEqual(expected_handler, m.IntPtrConstant(0)), &passed,
&failed);
- m.Bind(&passed);
+ m.BIND(&passed);
m.Return(m.BooleanConstant(true));
- m.Bind(&failed);
+ m.BIND(&failed);
m.Return(m.BooleanConstant(false));
}
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index c38d77bd21..070a1a0817 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -319,7 +319,7 @@ static void CheckAccessorArgsCorrect(
CHECK(info.Data()
->Equals(info.GetIsolate()->GetCurrentContext(), v8_str("data"))
.FromJust());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(info.GetIsolate() == CcTest::isolate());
CHECK(info.This() == info.Holder());
CHECK(info.Data()
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index 9e739687b4..f81d48eb58 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -378,7 +378,7 @@ void InterceptorHasOwnPropertyGetter(
void InterceptorHasOwnPropertyGetterGC(
Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
ApiTestFuzzer::Fuzz();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
int query_counter_int = 0;
@@ -4226,7 +4226,7 @@ THREADED_TEST(NamedPropertyHandlerGetterAttributes) {
THREADED_TEST(Regress256330) {
- if (!i::FLAG_crankshaft) return;
+ if (!i::FLAG_opt) return;
i::FLAG_allow_natives_syntax = true;
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index f2cbc7dabf..bebff5f172 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -456,7 +456,7 @@ THREADED_TEST(ScriptUsingStringResource) {
CHECK_EQ(static_cast<const String::ExternalStringResourceBase*>(resource),
source->GetExternalStringResourceBase(&encoding));
CHECK_EQ(String::TWO_BYTE_ENCODING, encoding);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(0, dispose_count);
}
CcTest::i_isolate()->compilation_cache()->Clear();
@@ -487,7 +487,7 @@ THREADED_TEST(ScriptUsingOneByteStringResource) {
Local<Value> value = script->Run(env.local()).ToLocalChecked();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value(env.local()).FromJust());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(0, dispose_count);
}
CcTest::i_isolate()->compilation_cache()->Clear();
@@ -521,11 +521,11 @@ THREADED_TEST(ScriptMakingExternalString) {
Local<Value> value = script->Run(env.local()).ToLocalChecked();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value(env.local()).FromJust());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(0, dispose_count);
}
CcTest::i_isolate()->compilation_cache()->Clear();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(1, dispose_count);
}
@@ -547,11 +547,11 @@ THREADED_TEST(ScriptMakingExternalOneByteString) {
Local<Value> value = script->Run(env.local()).ToLocalChecked();
CHECK(value->IsNumber());
CHECK_EQ(7, value->Int32Value(env.local()).FromJust());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(0, dispose_count);
}
CcTest::i_isolate()->compilation_cache()->Clear();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(1, dispose_count);
}
@@ -629,7 +629,7 @@ TEST(MakingExternalUnalignedOneByteString) {
CHECK(success);
// Trigger GCs and force evacuation.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CcTest::CollectAllGarbage(i::Heap::kReduceMemoryFootprintMask);
}
@@ -651,8 +651,8 @@ THREADED_TEST(UsingExternalString) {
factory->InternalizeString(istring);
CHECK(isymbol->IsInternalizedString());
}
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
}
@@ -674,8 +674,8 @@ THREADED_TEST(UsingExternalOneByteString) {
factory->InternalizeString(istring);
CHECK(isymbol->IsInternalizedString());
}
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
}
@@ -900,8 +900,8 @@ THREADED_TEST(StringConcat) {
CHECK_EQ(68, value->Int32Value(env.local()).FromJust());
}
CcTest::i_isolate()->compilation_cache()->Clear();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
}
@@ -2670,7 +2670,7 @@ static void CheckAlignedPointerInInternalField(Local<v8::Object> obj,
void* value) {
CHECK_EQ(0, static_cast<int>(reinterpret_cast<uintptr_t>(value) & 0x1));
obj->SetAlignedPointerInInternalField(0, value);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(value, obj->GetAlignedPointerFromInternalField(0));
}
@@ -2725,14 +2725,14 @@ THREADED_TEST(SetAlignedPointerInInternalFields) {
void* values[] = {heap_allocated_1, heap_allocated_2};
obj->SetAlignedPointerInInternalFields(2, indices, values);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(heap_allocated_1, obj->GetAlignedPointerFromInternalField(0));
CHECK_EQ(heap_allocated_2, obj->GetAlignedPointerFromInternalField(1));
indices[0] = 1;
indices[1] = 0;
obj->SetAlignedPointerInInternalFields(2, indices, values);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(heap_allocated_2, obj->GetAlignedPointerFromInternalField(0));
CHECK_EQ(heap_allocated_1, obj->GetAlignedPointerFromInternalField(1));
@@ -2744,7 +2744,7 @@ static void CheckAlignedPointerInEmbedderData(LocalContext* env, int index,
void* value) {
CHECK_EQ(0, static_cast<int>(reinterpret_cast<uintptr_t>(value) & 0x1));
(*env)->SetAlignedPointerInEmbedderData(index, value);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(value, (*env)->GetAlignedPointerFromEmbedderData(index));
}
@@ -2774,7 +2774,7 @@ THREADED_TEST(EmbedderDataAlignedPointers) {
for (int i = 0; i < 100; i++) {
env->SetAlignedPointerInEmbedderData(i, AlignedTestPointer(i));
}
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
for (int i = 0; i < 100; i++) {
CHECK_EQ(AlignedTestPointer(i), env->GetAlignedPointerFromEmbedderData(i));
}
@@ -2806,7 +2806,7 @@ THREADED_TEST(IdentityHash) {
// Ensure that the test starts with an fresh heap to test whether the hash
// code is based on the address.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
Local<v8::Object> obj = v8::Object::New(isolate);
int hash = obj->GetIdentityHash();
int hash1 = obj->GetIdentityHash();
@@ -2816,7 +2816,7 @@ THREADED_TEST(IdentityHash) {
// objects should not be assigned the same hash code. If the test below fails
// the random number generator should be evaluated.
CHECK_NE(hash, hash2);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
int hash3 = v8::Object::New(isolate)->GetIdentityHash();
// Make sure that the identity hash is not based on the initial address of
// the object alone. If the test below fails the random number generator
@@ -2892,7 +2892,7 @@ TEST(SymbolIdentityHash) {
int hash = symbol->GetIdentityHash();
int hash1 = symbol->GetIdentityHash();
CHECK_EQ(hash, hash1);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
int hash3 = symbol->GetIdentityHash();
CHECK_EQ(hash, hash3);
}
@@ -2903,7 +2903,7 @@ TEST(SymbolIdentityHash) {
int hash = js_symbol->GetIdentityHash();
int hash1 = js_symbol->GetIdentityHash();
CHECK_EQ(hash, hash1);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
int hash3 = js_symbol->GetIdentityHash();
CHECK_EQ(hash, hash3);
}
@@ -2919,7 +2919,7 @@ TEST(StringIdentityHash) {
int hash = str->GetIdentityHash();
int hash1 = str->GetIdentityHash();
CHECK_EQ(hash, hash1);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
int hash3 = str->GetIdentityHash();
CHECK_EQ(hash, hash3);
@@ -2938,8 +2938,9 @@ THREADED_TEST(SymbolProperties) {
v8::Local<v8::Symbol> sym1 = v8::Symbol::New(isolate);
v8::Local<v8::Symbol> sym2 = v8::Symbol::New(isolate, v8_str("my-symbol"));
v8::Local<v8::Symbol> sym3 = v8::Symbol::New(isolate, v8_str("sym3"));
+ v8::Local<v8::Symbol> sym4 = v8::Symbol::New(isolate, v8_str("native"));
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Check basic symbol functionality.
CHECK(sym1->IsSymbol());
@@ -3008,7 +3009,7 @@ THREADED_TEST(SymbolProperties) {
CHECK_EQ(num_props + 1,
obj->GetPropertyNames(env.local()).ToLocalChecked()->Length());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(obj->SetAccessor(env.local(), sym3, SymbolAccessorGetter,
SymbolAccessorSetter)
@@ -3024,6 +3025,23 @@ THREADED_TEST(SymbolProperties) {
->Equals(env.local(), v8::Integer::New(isolate, 42))
.FromJust());
+ CHECK(obj->SetNativeDataProperty(env.local(), sym4, SymbolAccessorGetter)
+ .FromJust());
+ CHECK(obj->Get(env.local(), sym4).ToLocalChecked()->IsUndefined());
+ CHECK(obj->Set(env.local(), v8_str("accessor_native"),
+ v8::Integer::New(isolate, 123))
+ .FromJust());
+ CHECK_EQ(123, obj->Get(env.local(), sym4)
+ .ToLocalChecked()
+ ->Int32Value(env.local())
+ .FromJust());
+ CHECK(obj->Set(env.local(), sym4, v8::Integer::New(isolate, 314)).FromJust());
+ CHECK(obj->Get(env.local(), sym4)
+ .ToLocalChecked()
+ ->Equals(env.local(), v8::Integer::New(isolate, 314))
+ .FromJust());
+ CHECK(obj->Delete(env.local(), v8_str("accessor_native")).FromJust());
+
// Add another property and delete it afterwards to force the object in
// slow case.
CHECK(
@@ -3118,7 +3136,7 @@ THREADED_TEST(PrivatePropertiesOnProxies) {
v8::Local<v8::Private> priv2 =
v8::Private::New(isolate, v8_str("my-private"));
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(priv2->Name()
->Equals(env.local(),
@@ -3160,7 +3178,7 @@ THREADED_TEST(PrivatePropertiesOnProxies) {
CHECK_EQ(num_props + 1,
proxy->GetPropertyNames(env.local()).ToLocalChecked()->Length());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Add another property and delete it afterwards to force the object in
// slow case.
@@ -3212,7 +3230,7 @@ THREADED_TEST(PrivateProperties) {
v8::Local<v8::Private> priv2 =
v8::Private::New(isolate, v8_str("my-private"));
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(priv2->Name()
->Equals(env.local(),
@@ -3254,7 +3272,7 @@ THREADED_TEST(PrivateProperties) {
CHECK_EQ(num_props + 1,
obj->GetPropertyNames(env.local()).ToLocalChecked()->Length());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Add another property and delete it afterwards to force the object in
// slow case.
@@ -3357,6 +3375,15 @@ static void CheckWellKnownSymbol(v8::Local<v8::Symbol>(*getter)(v8::Isolate*),
THREADED_TEST(WellKnownSymbols) {
CheckWellKnownSymbol(v8::Symbol::GetIterator, "Symbol.iterator");
CheckWellKnownSymbol(v8::Symbol::GetUnscopables, "Symbol.unscopables");
+ CheckWellKnownSymbol(v8::Symbol::GetHasInstance, "Symbol.hasInstance");
+ CheckWellKnownSymbol(v8::Symbol::GetIsConcatSpreadable,
+ "Symbol.isConcatSpreadable");
+ CheckWellKnownSymbol(v8::Symbol::GetMatch, "Symbol.match");
+ CheckWellKnownSymbol(v8::Symbol::GetReplace, "Symbol.replace");
+ CheckWellKnownSymbol(v8::Symbol::GetSearch, "Symbol.search");
+ CheckWellKnownSymbol(v8::Symbol::GetSplit, "Symbol.split");
+ CheckWellKnownSymbol(v8::Symbol::GetToPrimitive, "Symbol.toPrimitive");
+ CheckWellKnownSymbol(v8::Symbol::GetToStringTag, "Symbol.toStringTag");
}
@@ -3417,7 +3444,7 @@ THREADED_TEST(ArrayBuffer_ApiInternalToExternal) {
CheckInternalFieldsAreZero(ab);
CHECK_EQ(1024, static_cast<int>(ab->ByteLength()));
CHECK(!ab->IsExternal());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
ScopedArrayBufferContents ab_contents(ab->Externalize());
CHECK(ab->IsExternal());
@@ -3693,7 +3720,7 @@ THREADED_TEST(SharedArrayBuffer_ApiInternalToExternal) {
CheckInternalFieldsAreZero(ab);
CHECK_EQ(1024, static_cast<int>(ab->ByteLength()));
CHECK(!ab->IsExternal());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
ScopedSharedArrayBufferContents ab_contents(ab->Externalize());
CHECK(ab->IsExternal());
@@ -3810,7 +3837,7 @@ THREADED_TEST(HiddenProperties) {
v8::Local<v8::String> empty = v8_str("");
v8::Local<v8::String> prop_name = v8_str("prop_name");
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Make sure delete of a non-existent hidden value works
obj->DeletePrivate(env.local(), key).FromJust();
@@ -3828,7 +3855,7 @@ THREADED_TEST(HiddenProperties) {
->Int32Value(env.local())
.FromJust());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Make sure we do not find the hidden property.
CHECK(!obj->Has(env.local(), empty).FromJust());
@@ -3852,7 +3879,7 @@ THREADED_TEST(HiddenProperties) {
->Int32Value(env.local())
.FromJust());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Add another property and delete it afterwards to force the object in
// slow case.
@@ -3876,7 +3903,7 @@ THREADED_TEST(HiddenProperties) {
->Int32Value(env.local())
.FromJust());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK(obj->SetPrivate(env.local(), key, v8::Integer::New(isolate, 2002))
.FromJust());
@@ -4167,7 +4194,7 @@ void SecondPassCallback(const v8::WeakCallbackInfo<TwoPassCallbackData>& data) {
if (!trigger_gc) return;
auto data_2 = new TwoPassCallbackData(data.GetIsolate(), instance_counter);
data_2->SetWeak();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
@@ -4188,7 +4215,7 @@ TEST(TwoPassPhantomCallbacks) {
data->SetWeak();
}
CHECK_EQ(static_cast<int>(kLength), instance_counter);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
EmptyMessageQueues(isolate);
CHECK_EQ(0, instance_counter);
}
@@ -4207,7 +4234,7 @@ TEST(TwoPassPhantomCallbacksNestedGc) {
array[10]->MarkTriggerGc();
array[15]->MarkTriggerGc();
CHECK_EQ(static_cast<int>(kLength), instance_counter);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
EmptyMessageQueues(isolate);
CHECK_EQ(0, instance_counter);
}
@@ -4772,7 +4799,7 @@ TEST(NativeWeakMap) {
CHECK(value->Equals(env.local(), weak_map->Get(obj2)).FromJust());
CHECK(value->Equals(env.local(), weak_map->Get(sym1)).FromJust());
}
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
{
HandleScope scope(isolate);
CHECK(value->Equals(env.local(), weak_map->Get(local1)).FromJust());
@@ -4794,7 +4821,7 @@ TEST(NativeWeakMap) {
s1.handle.SetWeak(&s1, &WeakPointerCallback,
v8::WeakCallbackType::kParameter);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(3, counter.NumberOfWeakCalls());
CHECK(o1.handle.IsEmpty());
@@ -6224,6 +6251,63 @@ THREADED_TEST(TypeOf) {
.FromJust());
}
+THREADED_TEST(InstanceOf) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ CompileRun(
+ "var A = {};"
+ "var B = {};"
+ "var C = {};"
+ "B.__proto__ = A;"
+ "C.__proto__ = B;"
+ "function F() {}"
+ "F.prototype = A;"
+ "var G = { [Symbol.hasInstance] : null};"
+ "var H = { [Symbol.hasInstance] : () => { throw new Error(); } };"
+ "var J = { [Symbol.hasInstance] : () => true };"
+ "class K {}"
+ "var D = new K;"
+ "class L extends K {}"
+ "var E = new L");
+
+ v8::Local<v8::Object> f = v8::Local<v8::Object>::Cast(CompileRun("F"));
+ v8::Local<v8::Object> g = v8::Local<v8::Object>::Cast(CompileRun("G"));
+ v8::Local<v8::Object> h = v8::Local<v8::Object>::Cast(CompileRun("H"));
+ v8::Local<v8::Object> j = v8::Local<v8::Object>::Cast(CompileRun("J"));
+ v8::Local<v8::Object> k = v8::Local<v8::Object>::Cast(CompileRun("K"));
+ v8::Local<v8::Object> l = v8::Local<v8::Object>::Cast(CompileRun("L"));
+ v8::Local<v8::Value> a = v8::Local<v8::Value>::Cast(CompileRun("A"));
+ v8::Local<v8::Value> b = v8::Local<v8::Value>::Cast(CompileRun("B"));
+ v8::Local<v8::Value> c = v8::Local<v8::Value>::Cast(CompileRun("C"));
+ v8::Local<v8::Value> d = v8::Local<v8::Value>::Cast(CompileRun("D"));
+ v8::Local<v8::Value> e = v8::Local<v8::Value>::Cast(CompileRun("E"));
+
+ v8::TryCatch try_catch(env->GetIsolate());
+ CHECK(!a->InstanceOf(env.local(), f).ToChecked());
+ CHECK(b->InstanceOf(env.local(), f).ToChecked());
+ CHECK(c->InstanceOf(env.local(), f).ToChecked());
+ CHECK(!d->InstanceOf(env.local(), f).ToChecked());
+ CHECK(!e->InstanceOf(env.local(), f).ToChecked());
+ CHECK(!try_catch.HasCaught());
+
+ CHECK(a->InstanceOf(env.local(), g).IsNothing());
+ CHECK(try_catch.HasCaught());
+ try_catch.Reset();
+
+ CHECK(b->InstanceOf(env.local(), h).IsNothing());
+ CHECK(try_catch.HasCaught());
+ try_catch.Reset();
+
+ CHECK(v8_num(1)->InstanceOf(env.local(), j).ToChecked());
+ CHECK(!try_catch.HasCaught());
+
+ CHECK(d->InstanceOf(env.local(), k).ToChecked());
+ CHECK(e->InstanceOf(env.local(), k).ToChecked());
+ CHECK(!d->InstanceOf(env.local(), l).ToChecked());
+ CHECK(e->InstanceOf(env.local(), l).ToChecked());
+ CHECK(!try_catch.HasCaught());
+}
+
THREADED_TEST(MultiRun) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
@@ -7470,7 +7554,7 @@ static void IndependentWeakHandle(bool global_gc, bool interlinked) {
b->Set(context, v8_str("x"), a).FromJust();
}
if (global_gc) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
} else {
CcTest::CollectGarbage(i::NEW_SPACE);
}
@@ -7496,7 +7580,7 @@ static void IndependentWeakHandle(bool global_gc, bool interlinked) {
object_b.handle.MarkIndependent();
CHECK(object_b.handle.IsIndependent());
if (global_gc) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
} else {
CcTest::CollectGarbage(i::NEW_SPACE);
}
@@ -7594,7 +7678,7 @@ void InternalFieldCallback(bool global_gc) {
}
}
if (global_gc) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
} else {
CcTest::CollectGarbage(i::NEW_SPACE);
}
@@ -7674,9 +7758,7 @@ THREADED_HEAP_TEST(ResetWeakHandle) {
static void InvokeScavenge() { CcTest::CollectGarbage(i::NEW_SPACE); }
-static void InvokeMarkSweep() {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
-}
+static void InvokeMarkSweep() { CcTest::CollectAllGarbage(); }
static void ForceScavenge2(
const v8::WeakCallbackInfo<FlagAndPersistent>& data) {
@@ -7752,7 +7834,7 @@ static void ArgumentsTestCallback(
CHECK(v8::Integer::New(isolate, 3)->Equals(context, args[2]).FromJust());
CHECK(v8::Undefined(isolate)->Equals(context, args[3]).FromJust());
v8::HandleScope scope(args.GetIsolate());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
@@ -9082,7 +9164,7 @@ static bool security_check_with_gc_called;
static bool SecurityTestCallbackWithGC(Local<v8::Context> accessing_context,
Local<v8::Object> accessed_object,
Local<v8::Value> data) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
security_check_with_gc_called = true;
return true;
}
@@ -11179,47 +11261,78 @@ THREADED_TEST(ConstructorForObject) {
Local<Function> function =
function_template->GetFunction(context.local()).ToLocalChecked();
Local<Object> instance1 = function;
+ CHECK(instance1->IsObject());
+ CHECK(instance1->IsFunction());
CHECK(context->Global()
->Set(context.local(), v8_str("obj4"), instance1)
.FromJust());
v8::TryCatch try_catch(isolate);
- Local<Value> value;
CHECK(!try_catch.HasCaught());
- CHECK(instance1->IsObject());
- CHECK(instance1->IsFunction());
+ {
+ Local<Value> value = CompileRun("new obj4(28)");
+ CHECK(!try_catch.HasCaught());
+ CHECK(value->IsObject());
+
+ Local<Value> args[] = {v8_num(28)};
+ value = instance1->CallAsConstructor(context.local(), 1, args)
+ .ToLocalChecked();
+ CHECK(!try_catch.HasCaught());
+ CHECK(value->IsObject());
+ }
- value = CompileRun("new obj4(28)");
+ Local<Value> proxy = CompileRun("proxy = new Proxy({},{})");
CHECK(!try_catch.HasCaught());
- CHECK(value->IsObject());
+ CHECK(proxy->IsProxy());
- Local<Value> args1[] = {v8_num(28)};
- value = instance1->CallAsConstructor(context.local(), 1, args1)
- .ToLocalChecked();
- CHECK(!try_catch.HasCaught());
- CHECK(value->IsObject());
+ {
+ Local<Value> value = CompileRun("new obj4(proxy)");
+ CHECK(!try_catch.HasCaught());
+ CHECK(value->IsProxy());
+ CHECK(value->SameValue(proxy));
+
+ Local<Value> args[] = {proxy};
+ value = instance1->CallAsConstructor(context.local(), 1, args)
+ .ToLocalChecked();
+ CHECK(!try_catch.HasCaught());
+ CHECK(value->SameValue(proxy));
+ }
Local<ObjectTemplate> instance_template = ObjectTemplate::New(isolate);
instance_template->SetCallAsFunctionHandler(FakeConstructorCallback);
Local<Object> instance2 =
instance_template->NewInstance(context.local()).ToLocalChecked();
+ CHECK(instance2->IsObject());
+ CHECK(instance2->IsFunction());
CHECK(context->Global()
->Set(context.local(), v8_str("obj5"), instance2)
.FromJust());
CHECK(!try_catch.HasCaught());
- CHECK(instance2->IsObject());
- CHECK(instance2->IsFunction());
-
- value = CompileRun("new obj5(28)");
- CHECK(!try_catch.HasCaught());
- CHECK(!value->IsObject());
+ {
+ Local<Value> value = CompileRun("new obj5(28)");
+ CHECK(!try_catch.HasCaught());
+ CHECK(!value->IsObject());
+
+ Local<Value> args[] = {v8_num(28)};
+ value = instance2->CallAsConstructor(context.local(), 1, args)
+ .ToLocalChecked();
+ CHECK(!try_catch.HasCaught());
+ CHECK(!value->IsObject());
+ }
- Local<Value> args2[] = {v8_num(28)};
- value = instance2->CallAsConstructor(context.local(), 1, args2)
- .ToLocalChecked();
- CHECK(!try_catch.HasCaught());
- CHECK(!value->IsObject());
+ {
+ Local<Value> value = CompileRun("new obj5(proxy)");
+ CHECK(!try_catch.HasCaught());
+ CHECK(value->IsProxy());
+ CHECK(value->SameValue(proxy));
+
+ Local<Value> args[] = {proxy};
+ value = instance2->CallAsConstructor(context.local(), 1, args)
+ .ToLocalChecked();
+ CHECK(!try_catch.HasCaught());
+ CHECK(value->SameValue(proxy));
+ }
}
}
@@ -11856,7 +11969,7 @@ static void InterceptorCallICFastApi(
reinterpret_cast<int*>(v8::External::Cast(*info.Data())->Value());
++(*call_count);
if ((*call_count) % 20 == 0) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
}
@@ -11913,7 +12026,7 @@ static void GenerateSomeGarbage() {
void DirectApiCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
static int count = 0;
if (count++ % 3 == 0) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// This should move the stub
GenerateSomeGarbage(); // This should ensure the old stub memory is flushed
}
@@ -11983,7 +12096,7 @@ static int p_getter_count_3;
static Local<Value> DoDirectGetter() {
if (++p_getter_count_3 % 3 == 0) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
GenerateSomeGarbage();
}
return v8_str("Direct Getter Result");
@@ -13692,7 +13805,7 @@ static void CheckSurvivingGlobalObjectsCount(int expected) {
// the first garbage collection but some of the maps have already
// been marked at that point. Therefore some of the maps are not
// collected until the second garbage collection.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CcTest::CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
int count = GetGlobalObjectsCount();
CHECK_EQ(expected, count);
@@ -13834,7 +13947,7 @@ THREADED_TEST(NewPersistentHandleFromWeakCallback) {
handle1.SetWeak(&handle1, NewPersistentHandleCallback1,
v8::WeakCallbackType::kParameter);
handle2.Reset();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
@@ -13844,7 +13957,7 @@ v8::Persistent<v8::Object> to_be_disposed;
void DisposeAndForceGcCallback2(
const v8::WeakCallbackInfo<v8::Persistent<v8::Object>>& data) {
to_be_disposed.Reset();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
@@ -13868,7 +13981,7 @@ THREADED_TEST(DoNotUseDeletedNodesInSecondLevelGc) {
handle1.SetWeak(&handle1, DisposeAndForceGcCallback1,
v8::WeakCallbackType::kParameter);
to_be_disposed.Reset(isolate, handle2);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
void DisposingCallback(
@@ -13906,7 +14019,7 @@ THREADED_TEST(NoGlobalHandlesOrphaningDueToWeakCallback) {
v8::WeakCallbackType::kParameter);
handle3.SetWeak(&handle3, HandleCreatingCallback1,
v8::WeakCallbackType::kParameter);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
EmptyMessageQueues(isolate);
}
@@ -14246,7 +14359,7 @@ void SetFunctionEntryHookTest::RunTest() {
CHECK_EQ(2, CountInvocations(NULL, "bar"));
CHECK_EQ(200, CountInvocations("bar", "foo"));
CHECK_EQ(200, CountInvocations(NULL, "foo"));
- } else if (i::FLAG_crankshaft) {
+ } else if (i::FLAG_opt) {
// For ignition we don't see the actual functions being called, instead
// we see the InterpreterEntryTrampoline at least 102 times
// (100 unoptimized calls to foo, and 2 calls to bar).
@@ -16137,7 +16250,7 @@ static void ObjectWithExternalArrayTestHelper(Local<Context> context,
"}"
"sum;");
// Force GC to trigger verification.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(28, result->Int32Value(context).FromJust());
// Make sure out-of-range loads do not throw.
@@ -16353,12 +16466,12 @@ static void FixedTypedArrayTestHelper(i::ExternalArrayType array_type,
CHECK_EQ(FixedTypedArrayClass::kInstanceType,
fixed_array->map()->instance_type());
CHECK_EQ(kElementCount, fixed_array->length());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
for (int i = 0; i < kElementCount; i++) {
fixed_array->set(i, static_cast<ElementType>(i));
}
// Force GC to trigger verification.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
for (int i = 0; i < kElementCount; i++) {
CHECK_EQ(static_cast<int64_t>(static_cast<ElementType>(i)),
static_cast<int64_t>(fixed_array->get_scalar(i)));
@@ -16550,8 +16663,8 @@ THREADED_TEST(SkipArrayBufferBackingStoreDuringGC) {
// Should not crash
CcTest::CollectGarbage(i::NEW_SPACE); // in survivor space now
CcTest::CollectGarbage(i::NEW_SPACE); // in old gen now
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
// Should not move the pointer
CHECK_EQ(ab->GetContents().Data(), store_ptr);
@@ -17080,7 +17193,7 @@ TEST(ErrorLevelWarning) {
i::Handle<i::JSMessageObject> message =
i::MessageHandler::MakeMessageObject(
i_isolate, i::MessageTemplate::kAsmJsInvalid, &location, msg,
- i::Handle<i::JSArray>::null());
+ i::Handle<i::FixedArray>::null());
message->set_error_level(levels[i]);
expected_error_level = levels[i];
i::MessageHandler::ReportMessage(i_isolate, &location, message);
@@ -18802,7 +18915,7 @@ TEST(Regress528) {
other_context->Enter();
CompileRun(source_simple);
other_context->Exit();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
if (GetGlobalObjectsCount() == 1) break;
}
CHECK_GE(2, gc_count);
@@ -18824,7 +18937,7 @@ TEST(Regress528) {
other_context->Enter();
CompileRun(source_eval);
other_context->Exit();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
if (GetGlobalObjectsCount() == 1) break;
}
CHECK_GE(2, gc_count);
@@ -18851,7 +18964,7 @@ TEST(Regress528) {
other_context->Enter();
CompileRun(source_exception);
other_context->Exit();
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
if (GetGlobalObjectsCount() == 1) break;
}
CHECK_GE(2, gc_count);
@@ -19480,26 +19593,26 @@ TEST(GCCallbacksOld) {
context->GetIsolate()->AddGCEpilogueCallback(EpilogueCallback);
CHECK_EQ(0, prologue_call_count);
CHECK_EQ(0, epilogue_call_count);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(1, prologue_call_count);
CHECK_EQ(1, epilogue_call_count);
context->GetIsolate()->AddGCPrologueCallback(PrologueCallbackSecond);
context->GetIsolate()->AddGCEpilogueCallback(EpilogueCallbackSecond);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(1, prologue_call_count_second);
CHECK_EQ(1, epilogue_call_count_second);
context->GetIsolate()->RemoveGCPrologueCallback(PrologueCallback);
context->GetIsolate()->RemoveGCEpilogueCallback(EpilogueCallback);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(2, prologue_call_count_second);
CHECK_EQ(2, epilogue_call_count_second);
context->GetIsolate()->RemoveGCPrologueCallback(PrologueCallbackSecond);
context->GetIsolate()->RemoveGCEpilogueCallback(EpilogueCallbackSecond);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(2, prologue_call_count_second);
@@ -19515,26 +19628,26 @@ TEST(GCCallbacks) {
isolate->AddGCEpilogueCallback(EpilogueCallback);
CHECK_EQ(0, prologue_call_count);
CHECK_EQ(0, epilogue_call_count);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(1, prologue_call_count);
CHECK_EQ(1, epilogue_call_count);
isolate->AddGCPrologueCallback(PrologueCallbackSecond);
isolate->AddGCEpilogueCallback(EpilogueCallbackSecond);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(1, prologue_call_count_second);
CHECK_EQ(1, epilogue_call_count_second);
isolate->RemoveGCPrologueCallback(PrologueCallback);
isolate->RemoveGCEpilogueCallback(EpilogueCallback);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(2, prologue_call_count_second);
CHECK_EQ(2, epilogue_call_count_second);
isolate->RemoveGCPrologueCallback(PrologueCallbackSecond);
isolate->RemoveGCEpilogueCallback(EpilogueCallbackSecond);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(2, prologue_call_count);
CHECK_EQ(2, epilogue_call_count);
CHECK_EQ(2, prologue_call_count_second);
@@ -19722,7 +19835,7 @@ TEST(ContainsOnlyOneByte) {
void FailedAccessCheckCallbackGC(Local<v8::Object> target,
v8::AccessType type,
Local<v8::Value> data) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CcTest::isolate()->ThrowException(
v8::Exception::Error(v8_str("cross context")));
}
@@ -20345,7 +20458,7 @@ TEST(DontDeleteCellLoadIC) {
"})()",
"ReferenceError: cell is not defined");
CompileRun("cell = \"new_second\";");
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
ExpectString("readCell()", "new_second");
ExpectString("readCell()", "new_second");
}
@@ -20415,8 +20528,8 @@ TEST(PersistentHandleInNewSpaceVisitor) {
object1.SetWrapperClassId(42);
CHECK_EQ(42, object1.WrapperClassId());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
v8::Persistent<v8::Object> object2(isolate, v8::Object::New(isolate));
CHECK_EQ(0, object2.WrapperClassId());
@@ -20794,13 +20907,16 @@ void HasOwnPropertyNamedPropertyQuery2(
}
}
-
void HasOwnPropertyAccessorGetter(
Local<String> property,
const v8::PropertyCallbackInfo<v8::Value>& info) {
info.GetReturnValue().Set(v8_str("yes"));
}
+void HasOwnPropertyAccessorNameGetter(
+ Local<Name> property, const v8::PropertyCallbackInfo<v8::Value>& info) {
+ info.GetReturnValue().Set(v8_str("yes"));
+}
TEST(HasOwnProperty) {
LocalContext env;
@@ -20882,6 +20998,18 @@ TEST(HasOwnProperty) {
CHECK(!instance->HasOwnProperty(env.local(), v8_str("foo")).FromJust());
CHECK(instance->HasOwnProperty(env.local(), v8_str("bar")).FromJust());
}
+ { // Check that non-internalized keys are handled correctly.
+ Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+ templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
+ HasOwnPropertyAccessorNameGetter));
+ Local<Object> instance = templ->NewInstance(env.local()).ToLocalChecked();
+ env->Global()->Set(env.local(), v8_str("obj"), instance).FromJust();
+ const char* src =
+ "var dyn_string = 'this string ';"
+ "dyn_string += 'does not exist elsewhere';"
+ "({}).hasOwnProperty.call(obj, dyn_string)";
+ CHECK(CompileRun(src)->BooleanValue(env.local()).FromJust());
+ }
}
@@ -21888,7 +22016,7 @@ void TestStubCache(bool primary) {
} else {
FLAG_test_secondary_stub_cache = true;
}
- FLAG_crankshaft = false;
+ FLAG_opt = false;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -22533,6 +22661,43 @@ THREADED_TEST(JSONParseNumber) {
ExpectString("JSON.stringify(obj)", "42");
}
+namespace {
+void TestJSONParseArray(Local<Context> context, const char* input_str,
+ const char* expected_output_str,
+ i::ElementsKind expected_elements_kind) {
+ Local<Value> obj =
+ v8::JSON::Parse(context, v8_str(input_str)).ToLocalChecked();
+
+ i::Handle<i::JSArray> a =
+ i::Handle<i::JSArray>::cast(v8::Utils::OpenHandle(*obj));
+ CHECK_EQ(expected_elements_kind, a->GetElementsKind());
+
+ Local<Object> global = context->Global();
+ global->Set(context, v8_str("obj"), obj).FromJust();
+ ExpectString("JSON.stringify(obj)", expected_output_str);
+}
+} // namespace
+
+THREADED_TEST(JSONParseArray) {
+ LocalContext context;
+ HandleScope scope(context->GetIsolate());
+
+ TestJSONParseArray(context.local(), "[0, 1, 2]", "[0,1,2]",
+ i::FAST_SMI_ELEMENTS);
+ TestJSONParseArray(context.local(), "[0, 1.2, 2]", "[0,1.2,2]",
+ i::FAST_DOUBLE_ELEMENTS);
+ TestJSONParseArray(context.local(), "[0.2, 1, 2]", "[0.2,1,2]",
+ i::FAST_DOUBLE_ELEMENTS);
+ TestJSONParseArray(context.local(), "[0, \"a\", 2]", "[0,\"a\",2]",
+ i::FAST_ELEMENTS);
+ TestJSONParseArray(context.local(), "[\"a\", 1, 2]", "[\"a\",1,2]",
+ i::FAST_ELEMENTS);
+ TestJSONParseArray(context.local(), "[\"a\", 1.2, 2]", "[\"a\",1.2,2]",
+ i::FAST_ELEMENTS);
+ TestJSONParseArray(context.local(), "[0, 1.2, \"a\"]", "[0,1.2,\"a\"]",
+ i::FAST_ELEMENTS);
+}
+
THREADED_TEST(JSONStringifyObject) {
LocalContext context;
HandleScope scope(context->GetIsolate());
@@ -22780,10 +22945,12 @@ TEST(AccessCheckThrows) {
CheckCorrectThrow("%GetProperty(other, 'x')");
CheckCorrectThrow("%SetProperty(other, 'x', 'foo', 0)");
CheckCorrectThrow("%AddNamedProperty(other, 'x', 'foo', 1)");
- CheckCorrectThrow("%DeleteProperty_Sloppy(other, 'x')");
- CheckCorrectThrow("%DeleteProperty_Strict(other, 'x')");
- CheckCorrectThrow("%DeleteProperty_Sloppy(other, '1')");
- CheckCorrectThrow("%DeleteProperty_Strict(other, '1')");
+ STATIC_ASSERT(i::SLOPPY == 0);
+ STATIC_ASSERT(i::STRICT == 1);
+ CheckCorrectThrow("%DeleteProperty(other, 'x', 0)"); // 0 == SLOPPY
+ CheckCorrectThrow("%DeleteProperty(other, 'x', 1)"); // 1 == STRICT
+ CheckCorrectThrow("%DeleteProperty(other, '1', 0)");
+ CheckCorrectThrow("%DeleteProperty(other, '1', 1)");
CheckCorrectThrow("Object.prototype.hasOwnProperty.call(other, 'x')");
CheckCorrectThrow("%HasProperty(other, 'x')");
CheckCorrectThrow("Object.prototype.propertyIsEnumerable(other, 'x')");
@@ -22805,7 +22972,7 @@ TEST(AccessCheckInIC) {
if (FLAG_ignition || FLAG_turbo) return;
FLAG_native_code_counters = true;
- FLAG_crankshaft = false;
+ FLAG_opt = false;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
@@ -24362,12 +24529,13 @@ TEST(GetOwnPropertyDescriptor) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope scope(isolate);
CompileRun(
- "var x = { value : 13};"
- "Object.defineProperty(x, 'p0', {value : 12});"
- "Object.defineProperty(x, 'p1', {"
- " set : function(value) { this.value = value; },"
- " get : function() { return this.value; },"
- "});");
+ "var x = { value : 13};"
+ "Object.defineProperty(x, 'p0', {value : 12});"
+ "Object.defineProperty(x, Symbol.toStringTag, {value: 'foo'});"
+ "Object.defineProperty(x, 'p1', {"
+ " set : function(value) { this.value = value; },"
+ " get : function() { return this.value; },"
+ "});");
Local<Object> x = Local<Object>::Cast(
env->Global()->Get(env.local(), v8_str("x")).ToLocalChecked());
Local<Value> desc =
@@ -24401,6 +24569,14 @@ TEST(GetOwnPropertyDescriptor) {
->Equals(env.local(),
get->Call(env.local(), x, 0, NULL).ToLocalChecked())
.FromJust());
+ desc =
+ x->GetOwnPropertyDescriptor(env.local(), Symbol::GetToStringTag(isolate))
+ .ToLocalChecked();
+ CHECK(v8_str("foo")
+ ->Equals(env.local(), Local<Object>::Cast(desc)
+ ->Get(env.local(), v8_str("value"))
+ .ToLocalChecked())
+ .FromJust());
}
@@ -26471,3 +26647,84 @@ TEST(DeterministicRandomNumberGeneration) {
v8::internal::FLAG_random_seed = previous_seed;
}
+
+UNINITIALIZED_TEST(AllowAtomicsWait) {
+ using namespace i;
+ v8::Isolate::CreateParams create_params;
+ create_params.allow_atomics_wait = false;
+ create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+ v8::Isolate* isolate = v8::Isolate::New(create_params);
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+ {
+ CHECK_EQ(false, i_isolate->allow_atomics_wait());
+ isolate->SetAllowAtomicsWait(true);
+ CHECK_EQ(true, i_isolate->allow_atomics_wait());
+ }
+ isolate->Dispose();
+}
+
+enum ContextId { EnteredContext, CurrentContext };
+
+void CheckContexts(v8::Isolate* isolate) {
+ CHECK_EQ(CurrentContext, isolate->GetCurrentContext()
+ ->GetEmbedderData(1)
+ .As<v8::Integer>()
+ ->Value());
+ CHECK_EQ(EnteredContext, isolate->GetEnteredContext()
+ ->GetEmbedderData(1)
+ .As<v8::Integer>()
+ ->Value());
+}
+
+void ContextCheckGetter(Local<String> name,
+ const v8::PropertyCallbackInfo<v8::Value>& info) {
+ CheckContexts(info.GetIsolate());
+ info.GetReturnValue().Set(true);
+}
+
+void ContextCheckSetter(Local<String> name, Local<Value>,
+ const v8::PropertyCallbackInfo<void>& info) {
+ CheckContexts(info.GetIsolate());
+}
+
+void ContextCheckToString(const v8::FunctionCallbackInfo<v8::Value>& info) {
+ CheckContexts(info.GetIsolate());
+ info.GetReturnValue().Set(v8_str("foo"));
+}
+
+TEST(CorrectEnteredContext) {
+ v8::HandleScope scope(CcTest::isolate());
+
+ LocalContext currentContext;
+ currentContext->SetEmbedderData(
+ 1, v8::Integer::New(currentContext->GetIsolate(), CurrentContext));
+ LocalContext enteredContext;
+ enteredContext->SetEmbedderData(
+ 1, v8::Integer::New(enteredContext->GetIsolate(), EnteredContext));
+
+ v8::Context::Scope contextScope(enteredContext.local());
+
+ v8::Local<v8::ObjectTemplate> object_template =
+ ObjectTemplate::New(currentContext->GetIsolate());
+ object_template->SetAccessor(v8_str("p"), &ContextCheckGetter,
+ &ContextCheckSetter);
+
+ v8::Local<v8::Object> object =
+ object_template->NewInstance(currentContext.local()).ToLocalChecked();
+
+ object->Get(currentContext.local(), v8_str("p")).ToLocalChecked();
+ object->Set(currentContext.local(), v8_str("p"), v8_int(0)).FromJust();
+
+ v8::Local<v8::Function> to_string =
+ v8::Function::New(currentContext.local(), ContextCheckToString)
+ .ToLocalChecked();
+
+ to_string->Call(currentContext.local(), object, 0, nullptr).ToLocalChecked();
+
+ object
+ ->CreateDataProperty(currentContext.local(), v8_str("toString"),
+ to_string)
+ .FromJust();
+
+ object->ToString(currentContext.local()).ToLocalChecked();
+}
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 056dd9ffa5..44b35f8d46 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -1297,9 +1297,11 @@ TEST(15) {
uint32_t vabs_s8[4], vabs_s16[4], vabs_s32[4];
uint32_t vneg_s8[4], vneg_s16[4], vneg_s32[4];
uint32_t veor[4], vand[4], vorr[4];
- float vdupf[4], vaddf[4], vsubf[4], vmulf[4];
+ float vdupf[4], vaddf[4], vpaddf[2], vsubf[4], vmulf[4];
+ uint32_t vdupf_16[2], vdupf_8[4];
uint32_t vmin_s8[4], vmin_u16[4], vmin_s32[4];
uint32_t vmax_s8[4], vmax_u16[4], vmax_s32[4];
+ uint32_t vpadd_i8[2], vpadd_i16[2], vpadd_i32[2];
uint32_t vpmin_s8[2], vpmin_u16[2], vpmin_s32[2];
uint32_t vpmax_s8[2], vpmax_u16[2], vpmax_s32[2];
uint32_t vadd8[4], vadd16[4], vadd32[4];
@@ -1309,6 +1311,7 @@ TEST(15) {
uint32_t vmul8[4], vmul16[4], vmul32[4];
uint32_t vshl8[4], vshl16[4], vshl32[5];
uint32_t vshr_s8[4], vshr_u16[4], vshr_s32[5];
+ uint32_t vsli_64[2], vsri_64[2], vsli_32[2], vsri_32[2];
uint32_t vceq[4], vceqf[4], vcgef[4], vcgtf[4];
uint32_t vcge_s8[4], vcge_u16[4], vcge_s32[4];
uint32_t vcgt_s8[4], vcgt_u16[4], vcgt_s32[4];
@@ -1439,7 +1442,7 @@ TEST(15) {
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vcvt_f32_u32))));
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
- // vdup (integer).
+ // vdup (from register).
__ mov(r4, Operand(0xa));
__ vdup(Neon8, q0, r4);
__ vdup(Neon16, q1, r4);
@@ -1451,11 +1454,16 @@ TEST(15) {
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vdup32))));
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
- // vdup (float).
+ // vdup (from scalar).
__ vmov(s0, -1.0);
- __ vdup(q0, s0);
+ __ vdup(Neon32, q1, d0, 0);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vdupf))));
- __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
+ __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
+ __ vdup(Neon16, d2, d0, 1);
+ __ vstr(d2, r0, offsetof(T, vdupf_16));
+ __ vdup(Neon8, q1, d0, 3);
+ __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vdupf_8))));
+ __ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
// vabs (float).
__ vmov(s0, -1.0);
@@ -1524,74 +1532,81 @@ TEST(15) {
// vmin (float).
__ vmov(s4, 2.0);
- __ vdup(q0, s4);
+ __ vdup(Neon32, q0, d2, 0);
__ vmov(s4, 1.0);
- __ vdup(q1, s4);
+ __ vdup(Neon32, q1, d2, 0);
__ vmin(q1, q1, q0);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vminf))));
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
// vmax (float).
__ vmov(s4, 2.0);
- __ vdup(q0, s4);
+ __ vdup(Neon32, q0, d2, 0);
__ vmov(s4, 1.0);
- __ vdup(q1, s4);
+ __ vdup(Neon32, q1, d2, 0);
__ vmax(q1, q1, q0);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmaxf))));
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
// vadd (float).
__ vmov(s4, 1.0);
- __ vdup(q0, s4);
- __ vdup(q1, s4);
+ __ vdup(Neon32, q0, d2, 0);
+ __ vdup(Neon32, q1, d2, 0);
__ vadd(q1, q1, q0);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vaddf))));
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
+ // vpadd (float).
+ __ vmov(s0, 1.0);
+ __ vmov(s1, 2.0);
+ __ vmov(s2, 3.0);
+ __ vmov(s3, 4.0);
+ __ vpadd(d2, d0, d1);
+ __ vstr(d2, r0, offsetof(T, vpaddf));
// vsub (float).
__ vmov(s4, 2.0);
- __ vdup(q0, s4);
+ __ vdup(Neon32, q0, d2, 0);
__ vmov(s4, 1.0);
- __ vdup(q1, s4);
+ __ vdup(Neon32, q1, d2, 0);
__ vsub(q1, q1, q0);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vsubf))));
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
// vmul (float).
__ vmov(s4, 2.0);
- __ vdup(q0, s4);
- __ vdup(q1, s4);
+ __ vdup(Neon32, q0, d2, 0);
+ __ vdup(Neon32, q1, d2, 0);
__ vmul(q1, q1, q0);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmulf))));
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
// vrecpe.
__ vmov(s4, 2.0);
- __ vdup(q0, s4);
+ __ vdup(Neon32, q0, d2, 0);
__ vrecpe(q1, q0);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vrecpe))));
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
// vrecps.
__ vmov(s4, 2.0);
- __ vdup(q0, s4);
+ __ vdup(Neon32, q0, d2, 0);
__ vmov(s4, 1.5);
- __ vdup(q1, s4);
+ __ vdup(Neon32, q1, d2, 0);
__ vrecps(q1, q0, q1);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vrecps))));
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
// vrsqrte.
__ vmov(s4, 4.0);
- __ vdup(q0, s4);
+ __ vdup(Neon32, q0, d2, 0);
__ vrsqrte(q1, q0);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vrsqrte))));
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
// vrsqrts.
__ vmov(s4, 2.0);
- __ vdup(q0, s4);
+ __ vdup(Neon32, q0, d2, 0);
__ vmov(s4, 2.5);
- __ vdup(q1, s4);
+ __ vdup(Neon32, q1, d2, 0);
__ vrsqrts(q1, q0, q1);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vrsqrts))));
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
// vceq (float).
__ vmov(s4, 1.0);
- __ vdup(q0, s4);
- __ vdup(q1, s4);
+ __ vdup(Neon32, q0, d2, 0);
+ __ vdup(Neon32, q1, d2, 0);
__ vceq(q1, q1, q0);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vceqf))));
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
@@ -1600,7 +1615,7 @@ TEST(15) {
__ vmov(s1, -1.0);
__ vmov(s2, -0.0);
__ vmov(s3, 0.0);
- __ vdup(q1, s3);
+ __ vdup(Neon32, q1, d1, 1);
__ vcge(q2, q1, q0);
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vcgef))));
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
@@ -1637,6 +1652,17 @@ TEST(15) {
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vmax_s32))));
__ vst1(Neon8, NeonListOperand(q2), NeonMemOperand(r4));
+ // vpadd integer.
+ __ mov(r4, Operand(0x03));
+ __ vdup(Neon16, q0, r4);
+ __ vdup(Neon8, q1, r4);
+ __ vpadd(Neon8, d0, d0, d2);
+ __ vstr(d0, r0, offsetof(T, vpadd_i8));
+ __ vpadd(Neon16, d0, d0, d2);
+ __ vstr(d0, r0, offsetof(T, vpadd_i16));
+ __ vpadd(Neon32, d0, d0, d2);
+ __ vstr(d0, r0, offsetof(T, vpadd_i32));
+
// vpmin/vpmax integer.
__ mov(r4, Operand(0x03));
__ vdup(Neon16, q0, r4);
@@ -1795,6 +1821,26 @@ TEST(15) {
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, vshr_s32))));
__ vst1(Neon8, NeonListOperand(q1), NeonMemOperand(r4));
+ // vsli, vsri.
+ __ mov(r4, Operand(0xffffffff));
+ __ mov(r5, Operand(0x1));
+ __ vmov(d0, r4, r5);
+ __ vmov(d1, r5, r5);
+ __ vsli(Neon64, d1, d0, 32);
+ __ vstr(d1, r0, offsetof(T, vsli_64));
+ __ vmov(d0, r5, r4);
+ __ vmov(d1, r5, r5);
+ __ vsri(Neon64, d1, d0, 32);
+ __ vstr(d1, r0, offsetof(T, vsri_64));
+ __ vmov(d0, r4, r5);
+ __ vmov(d1, r5, r5);
+ __ vsli(Neon32, d1, d0, 16);
+ __ vstr(d1, r0, offsetof(T, vsli_32));
+ __ vmov(d0, r5, r4);
+ __ vmov(d1, r5, r5);
+ __ vsri(Neon32, d1, d0, 16);
+ __ vstr(d1, r0, offsetof(T, vsri_32));
+
// vceq.
__ mov(r4, Operand(0x03));
__ vdup(Neon8, q0, r4);
@@ -2088,7 +2134,9 @@ TEST(15) {
CHECK_EQ_SPLAT(vdup8, 0x0a0a0a0au);
CHECK_EQ_SPLAT(vdup16, 0x000a000au);
CHECK_EQ_SPLAT(vdup32, 0x0000000au);
- CHECK_EQ_SPLAT(vdupf, -1.0);
+ CHECK_EQ_SPLAT(vdupf, -1.0); // bit pattern is 0xbf800000.
+ CHECK_EQ_32X2(vdupf_16, 0xbf80bf80u, 0xbf80bf80u);
+ CHECK_EQ_SPLAT(vdupf_8, 0xbfbfbfbfu);
// src: [-1, -1, 1, 1]
CHECK_EQ_32X4(vcvt_s32_f32, -1, -1, 1, 1);
@@ -2115,6 +2163,7 @@ TEST(15) {
CHECK_EQ_SPLAT(vand, 0x00fe00feu);
CHECK_EQ_SPLAT(vorr, 0x00ff00ffu);
CHECK_EQ_SPLAT(vaddf, 2.0);
+ CHECK_EQ_32X2(vpaddf, 3.0, 7.0);
CHECK_EQ_SPLAT(vminf, 1.0);
CHECK_EQ_SPLAT(vmaxf, 2.0);
CHECK_EQ_SPLAT(vsubf, -1.0);
@@ -2137,6 +2186,9 @@ TEST(15) {
CHECK_EQ_SPLAT(vmin_s32, 0xffffffffu);
CHECK_EQ_SPLAT(vmax_s32, 0xffu);
// [0, 3, 0, 3, ...] and [3, 3, 3, 3, ...]
+ CHECK_EQ_32X2(vpadd_i8, 0x03030303u, 0x06060606u);
+ CHECK_EQ_32X2(vpadd_i16, 0x0c0c0606u, 0x06060606u);
+ CHECK_EQ_32X2(vpadd_i32, 0x12120c0cu, 0x06060606u);
CHECK_EQ_32X2(vpmin_s8, 0x00000000u, 0x03030303u);
CHECK_EQ_32X2(vpmax_s8, 0x03030303u, 0x03030303u);
// [0, ffff, 0, ffff] and [ffff, ffff]
@@ -2166,6 +2218,10 @@ TEST(15) {
CHECK_EQ_SPLAT(vshr_s8, 0xc0c0c0c0u);
CHECK_EQ_SPLAT(vshr_u16, 0x00400040u);
CHECK_EQ_SPLAT(vshr_s32, 0xffffc040u);
+ CHECK_EQ_32X2(vsli_64, 0x01u, 0xffffffffu);
+ CHECK_EQ_32X2(vsri_64, 0xffffffffu, 0x01u);
+ CHECK_EQ_32X2(vsli_32, 0xffff0001u, 0x00010001u);
+ CHECK_EQ_32X2(vsri_32, 0x00000000u, 0x0000ffffu);
CHECK_EQ_SPLAT(vceq, 0x00ff00ffu);
// [0, 3, 0, 3, ...] >= [3, 3, 3, 3, ...]
CHECK_EQ_SPLAT(vcge_s8, 0x00ff00ffu);
@@ -3817,11 +3873,8 @@ TEST(vswp) {
const uint32_t test_2 = 0x89abcdef;
__ mov(r4, Operand(test_1));
__ mov(r5, Operand(test_2));
- // TODO(bbudge) replace with vdup when implemented.
- __ vmov(d8, r4, r4);
- __ vmov(d9, r4, r4); // q4 = [1.0, 1.0]
- __ vmov(d10, r5, r5);
- __ vmov(d11, r5, r5); // q5 = [-1.0, -1.0]
+ __ vdup(Neon32, q4, r4);
+ __ vdup(Neon32, q5, r5);
__ vswp(q4, q5);
__ add(r6, r0, Operand(static_cast<int32_t>(offsetof(T, vswp_q4))));
__ vst1(Neon8, NeonListOperand(q4), NeonMemOperand(r6));
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index 1728a9c7e7..05650ed233 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -173,15 +173,15 @@ static void InitializeVM() {
#else // ifdef USE_SIMULATOR.
// Run the test on real hardware or models.
-#define SETUP_SIZE(buf_size) \
- Isolate* isolate = CcTest::i_isolate(); \
- HandleScope scope(isolate); \
- CHECK(isolate != NULL); \
- size_t actual_size; \
- byte* buf = static_cast<byte*>( \
- v8::base::OS::Allocate(buf_size, &actual_size, true)); \
- MacroAssembler masm(isolate, buf, actual_size, \
- v8::internal::CodeObjectRequired::kYes); \
+#define SETUP_SIZE(buf_size) \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ CHECK(isolate != NULL); \
+ size_t actual_size; \
+ byte* buf = static_cast<byte*>( \
+ v8::base::OS::Allocate(buf_size, &actual_size, true)); \
+ MacroAssembler masm(isolate, buf, static_cast<unsigned>(actual_size), \
+ v8::internal::CodeObjectRequired::kYes); \
RegisterDump core;
#define RESET() \
@@ -841,11 +841,13 @@ TEST(bic) {
// field.
// Use x20 to preserve csp. We check for the result via x21 because the
// test infrastructure requires that csp be restored to its original value.
+ __ SetStackPointer(jssp); // Change stack pointer to avoid consistency check.
__ Mov(x20, csp);
__ Mov(x0, 0xffffff);
__ Bic(csp, x0, Operand(0xabcdef));
__ Mov(x21, csp);
__ Mov(csp, x20);
+ __ SetStackPointer(csp); // Restore stack pointer.
END();
RUN();
@@ -3742,6 +3744,77 @@ TEST(add_sub_zero) {
TEARDOWN();
}
+TEST(preshift_immediates) {
+ INIT_V8();
+ SETUP();
+
+ START();
+ // Test operations involving immediates that could be generated using a
+ // pre-shifted encodable immediate followed by a post-shift applied to
+ // the arithmetic or logical operation.
+
+ // Save csp and change stack pointer to avoid consistency check.
+ __ SetStackPointer(jssp);
+ __ Mov(x29, csp);
+
+ // Set the registers to known values.
+ __ Mov(x0, 0x1000);
+ __ Mov(csp, 0x1000);
+
+ // Arithmetic ops.
+ __ Add(x1, x0, 0x1f7de);
+ __ Add(w2, w0, 0xffffff1);
+ __ Adds(x3, x0, 0x18001);
+ __ Adds(w4, w0, 0xffffff1);
+ __ Add(x5, x0, 0x10100);
+ __ Sub(w6, w0, 0xffffff1);
+ __ Subs(x7, x0, 0x18001);
+ __ Subs(w8, w0, 0xffffff1);
+
+ // Logical ops.
+ __ And(x9, x0, 0x1f7de);
+ __ Orr(w10, w0, 0xffffff1);
+ __ Eor(x11, x0, 0x18001);
+
+ // Ops using the stack pointer.
+ __ Add(csp, csp, 0x1f7f0);
+ __ Mov(x12, csp);
+ __ Mov(csp, 0x1000);
+
+ __ Adds(x13, csp, 0x1f7f0);
+
+ __ Orr(csp, x0, 0x1f7f0);
+ __ Mov(x14, csp);
+ __ Mov(csp, 0x1000);
+
+ __ Add(csp, csp, 0x10100);
+ __ Mov(x15, csp);
+
+ // Restore csp.
+ __ Mov(csp, x29);
+ __ SetStackPointer(csp);
+ END();
+
+ RUN();
+
+ CHECK_EQUAL_64(0x1000, x0);
+ CHECK_EQUAL_64(0x207de, x1);
+ CHECK_EQUAL_64(0x10000ff1, x2);
+ CHECK_EQUAL_64(0x19001, x3);
+ CHECK_EQUAL_64(0x10000ff1, x4);
+ CHECK_EQUAL_64(0x11100, x5);
+ CHECK_EQUAL_64(0xf000100f, x6);
+ CHECK_EQUAL_64(0xfffffffffffe8fff, x7);
+ CHECK_EQUAL_64(0xf000100f, x8);
+ CHECK_EQUAL_64(0x1000, x9);
+ CHECK_EQUAL_64(0xffffff1, x10);
+ CHECK_EQUAL_64(0x207f0, x12);
+ CHECK_EQUAL_64(0x207f0, x13);
+ CHECK_EQUAL_64(0x1f7f0, x14);
+ CHECK_EQUAL_64(0x11100, x15);
+
+ TEARDOWN();
+}
TEST(claim_drop_zero) {
INIT_V8();
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index 2283d4978c..0d3dd096d0 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -5513,4 +5513,78 @@ TEST(maddf_msubf_d) {
});
}
+uint32_t run_Subu(uint32_t imm, int32_t num_instr) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+
+ Label code_start;
+ __ bind(&code_start);
+ __ Subu(v0, zero_reg, imm);
+ CHECK_EQ(assm.SizeOfCodeGeneratedSince(&code_start),
+ num_instr * Assembler::kInstrSize);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+
+ uint32_t res = reinterpret_cast<uint32_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+
+ return res;
+}
+
+TEST(Subu) {
+ CcTest::InitializeVM();
+
+ // Test Subu macro-instruction for min_int16 and max_int16 border cases.
+ // For subtracting int16 immediate values we use addiu.
+
+ struct TestCaseSubu {
+ uint32_t imm;
+ uint32_t expected_res;
+ int32_t num_instr;
+ };
+
+ // We call Subu(v0, zero_reg, imm) to test cases listed below.
+ // 0 - imm = expected_res
+ struct TestCaseSubu tc[] = {
+ // imm, expected_res, num_instr
+ {0xffff8000, 0x00008000, 2}, // min_int16
+ // Generates ori + addu
+ // We can't have just addiu because -min_int16 > max_int16 so use
+ // register. We can load min_int16 to at register with addiu and then
+ // subtract at with subu, but now we use ori + addu because -min_int16 can
+ // be loaded using ori.
+ {0x8000, 0xffff8000, 1}, // max_int16 + 1
+ // Generates addiu
+ // max_int16 + 1 is not int16 but -(max_int16 + 1) is, just use addiu.
+ {0xffff7fff, 0x8001, 2}, // min_int16 - 1
+ // Generates ori + addu
+ // To load this value to at we need two instructions and another one to
+ // subtract, lui + ori + subu. But we can load -value to at using just
+ // ori and then add at register with addu.
+ {0x8001, 0xffff7fff, 2}, // max_int16 + 2
+ // Generates ori + subu
+ // Not int16 but is uint16, load value to at with ori and subtract with
+ // subu.
+ {0x00010000, 0xffff0000, 2},
+ // Generates lui + subu
+ // Load value using lui to at and subtract with subu.
+ {0x00010001, 0xfffeffff, 3},
+ // Generates lui + ori + subu
+ // We have to generate three instructions in this case.
+ };
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseSubu);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ CHECK_EQ(tc[i].expected_res, run_Subu(tc[i].imm, tc[i].num_instr));
+ }
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-assembler-mips64.cc b/deps/v8/test/cctest/test-assembler-mips64.cc
index 5875f1e79d..d796b4faad 100644
--- a/deps/v8/test/cctest/test-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-assembler-mips64.cc
@@ -288,61 +288,61 @@ TEST(MIPS3) {
Label L, C;
// Double precision floating point instructions.
- __ ldc1(f4, MemOperand(a0, offsetof(T, a)) );
- __ ldc1(f6, MemOperand(a0, offsetof(T, b)) );
+ __ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
+ __ Ldc1(f6, MemOperand(a0, offsetof(T, b)));
__ add_d(f8, f4, f6);
- __ sdc1(f8, MemOperand(a0, offsetof(T, c)) ); // c = a + b.
+ __ Sdc1(f8, MemOperand(a0, offsetof(T, c))); // c = a + b.
__ mov_d(f10, f8); // c
__ neg_d(f12, f6); // -b
__ sub_d(f10, f10, f12);
- __ sdc1(f10, MemOperand(a0, offsetof(T, d)) ); // d = c - (-b).
+ __ Sdc1(f10, MemOperand(a0, offsetof(T, d))); // d = c - (-b).
- __ sdc1(f4, MemOperand(a0, offsetof(T, b)) ); // b = a.
+ __ Sdc1(f4, MemOperand(a0, offsetof(T, b))); // b = a.
__ li(a4, 120);
__ mtc1(a4, f14);
__ cvt_d_w(f14, f14); // f14 = 120.0.
__ mul_d(f10, f10, f14);
- __ sdc1(f10, MemOperand(a0, offsetof(T, e)) ); // e = d * 120 = 1.8066e16.
+ __ Sdc1(f10, MemOperand(a0, offsetof(T, e))); // e = d * 120 = 1.8066e16.
__ div_d(f12, f10, f4);
- __ sdc1(f12, MemOperand(a0, offsetof(T, f)) ); // f = e / a = 120.44.
+ __ Sdc1(f12, MemOperand(a0, offsetof(T, f))); // f = e / a = 120.44.
__ sqrt_d(f14, f12);
- __ sdc1(f14, MemOperand(a0, offsetof(T, g)) );
+ __ Sdc1(f14, MemOperand(a0, offsetof(T, g)));
// g = sqrt(f) = 10.97451593465515908537
if (kArchVariant == kMips64r2) {
- __ ldc1(f4, MemOperand(a0, offsetof(T, h)) );
- __ ldc1(f6, MemOperand(a0, offsetof(T, i)) );
+ __ Ldc1(f4, MemOperand(a0, offsetof(T, h)));
+ __ Ldc1(f6, MemOperand(a0, offsetof(T, i)));
__ madd_d(f14, f6, f4, f6);
- __ sdc1(f14, MemOperand(a0, offsetof(T, h)) );
+ __ Sdc1(f14, MemOperand(a0, offsetof(T, h)));
}
// Single precision floating point instructions.
- __ lwc1(f4, MemOperand(a0, offsetof(T, fa)) );
- __ lwc1(f6, MemOperand(a0, offsetof(T, fb)) );
+ __ Lwc1(f4, MemOperand(a0, offsetof(T, fa)));
+ __ Lwc1(f6, MemOperand(a0, offsetof(T, fb)));
__ add_s(f8, f4, f6);
- __ swc1(f8, MemOperand(a0, offsetof(T, fc)) ); // fc = fa + fb.
+ __ Swc1(f8, MemOperand(a0, offsetof(T, fc))); // fc = fa + fb.
__ neg_s(f10, f6); // -fb
__ sub_s(f10, f8, f10);
- __ swc1(f10, MemOperand(a0, offsetof(T, fd)) ); // fd = fc - (-fb).
+ __ Swc1(f10, MemOperand(a0, offsetof(T, fd))); // fd = fc - (-fb).
- __ swc1(f4, MemOperand(a0, offsetof(T, fb)) ); // fb = fa.
+ __ Swc1(f4, MemOperand(a0, offsetof(T, fb))); // fb = fa.
__ li(t0, 120);
__ mtc1(t0, f14);
__ cvt_s_w(f14, f14); // f14 = 120.0.
__ mul_s(f10, f10, f14);
- __ swc1(f10, MemOperand(a0, offsetof(T, fe)) ); // fe = fd * 120
+ __ Swc1(f10, MemOperand(a0, offsetof(T, fe))); // fe = fd * 120
__ div_s(f12, f10, f4);
- __ swc1(f12, MemOperand(a0, offsetof(T, ff)) ); // ff = fe / fa
+ __ Swc1(f12, MemOperand(a0, offsetof(T, ff))); // ff = fe / fa
__ sqrt_s(f14, f12);
- __ swc1(f14, MemOperand(a0, offsetof(T, fg)) );
+ __ Swc1(f14, MemOperand(a0, offsetof(T, fg)));
__ jr(ra);
__ nop();
@@ -408,11 +408,11 @@ TEST(MIPS4) {
} T;
T t;
- Assembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label L, C;
- __ ldc1(f4, MemOperand(a0, offsetof(T, a)));
- __ ldc1(f5, MemOperand(a0, offsetof(T, b)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
+ __ Ldc1(f5, MemOperand(a0, offsetof(T, b)));
// Swap f4 and f5, by using 3 integer registers, a4-a6,
// both two 32-bit chunks, and one 64-bit chunk.
@@ -427,16 +427,16 @@ TEST(MIPS4) {
__ dmtc1(a6, f4);
// Store the swapped f4 and f5 back to memory.
- __ sdc1(f4, MemOperand(a0, offsetof(T, a)));
- __ sdc1(f5, MemOperand(a0, offsetof(T, c)));
+ __ Sdc1(f4, MemOperand(a0, offsetof(T, a)));
+ __ Sdc1(f5, MemOperand(a0, offsetof(T, c)));
// Test sign extension of move operations from coprocessor.
- __ ldc1(f4, MemOperand(a0, offsetof(T, d)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(T, d)));
__ mfhc1(a4, f4);
__ mfc1(a5, f4);
- __ sd(a4, MemOperand(a0, offsetof(T, high)));
- __ sd(a5, MemOperand(a0, offsetof(T, low)));
+ __ Sd(a4, MemOperand(a0, offsetof(T, high)));
+ __ Sd(a5, MemOperand(a0, offsetof(T, low)));
__ jr(ra);
__ nop();
@@ -475,34 +475,34 @@ TEST(MIPS5) {
} T;
T t;
- Assembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label L, C;
// Load all structure elements to registers.
- __ ldc1(f4, MemOperand(a0, offsetof(T, a)) );
- __ ldc1(f6, MemOperand(a0, offsetof(T, b)) );
- __ lw(a4, MemOperand(a0, offsetof(T, i)) );
- __ lw(a5, MemOperand(a0, offsetof(T, j)) );
+ __ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
+ __ Ldc1(f6, MemOperand(a0, offsetof(T, b)));
+ __ Lw(a4, MemOperand(a0, offsetof(T, i)));
+ __ Lw(a5, MemOperand(a0, offsetof(T, j)));
// Convert double in f4 to int in element i.
__ cvt_w_d(f8, f4);
__ mfc1(a6, f8);
- __ sw(a6, MemOperand(a0, offsetof(T, i)) );
+ __ Sw(a6, MemOperand(a0, offsetof(T, i)));
// Convert double in f6 to int in element j.
__ cvt_w_d(f10, f6);
__ mfc1(a7, f10);
- __ sw(a7, MemOperand(a0, offsetof(T, j)) );
+ __ Sw(a7, MemOperand(a0, offsetof(T, j)));
// Convert int in original i (a4) to double in a.
__ mtc1(a4, f12);
__ cvt_d_w(f0, f12);
- __ sdc1(f0, MemOperand(a0, offsetof(T, a)) );
+ __ Sdc1(f0, MemOperand(a0, offsetof(T, a)));
// Convert int in original j (a5) to double in b.
__ mtc1(a5, f14);
__ cvt_d_w(f2, f14);
- __ sdc1(f2, MemOperand(a0, offsetof(T, b)) );
+ __ Sdc1(f2, MemOperand(a0, offsetof(T, b)));
__ jr(ra);
__ nop();
@@ -544,35 +544,35 @@ TEST(MIPS6) {
} T;
T t;
- Assembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label L, C;
// Basic word load/store.
- __ lw(a4, MemOperand(a0, offsetof(T, ui)) );
- __ sw(a4, MemOperand(a0, offsetof(T, r1)) );
+ __ Lw(a4, MemOperand(a0, offsetof(T, ui)));
+ __ Sw(a4, MemOperand(a0, offsetof(T, r1)));
// lh with positive data.
- __ lh(a5, MemOperand(a0, offsetof(T, ui)) );
- __ sw(a5, MemOperand(a0, offsetof(T, r2)) );
+ __ Lh(a5, MemOperand(a0, offsetof(T, ui)));
+ __ Sw(a5, MemOperand(a0, offsetof(T, r2)));
// lh with negative data.
- __ lh(a6, MemOperand(a0, offsetof(T, si)) );
- __ sw(a6, MemOperand(a0, offsetof(T, r3)) );
+ __ Lh(a6, MemOperand(a0, offsetof(T, si)));
+ __ Sw(a6, MemOperand(a0, offsetof(T, r3)));
// lhu with negative data.
- __ lhu(a7, MemOperand(a0, offsetof(T, si)) );
- __ sw(a7, MemOperand(a0, offsetof(T, r4)) );
+ __ Lhu(a7, MemOperand(a0, offsetof(T, si)));
+ __ Sw(a7, MemOperand(a0, offsetof(T, r4)));
- // lb with negative data.
- __ lb(t0, MemOperand(a0, offsetof(T, si)) );
- __ sw(t0, MemOperand(a0, offsetof(T, r5)) );
+ // Lb with negative data.
+ __ Lb(t0, MemOperand(a0, offsetof(T, si)));
+ __ Sw(t0, MemOperand(a0, offsetof(T, r5)));
// sh writes only 1/2 of word.
__ lui(t1, 0x3333);
__ ori(t1, t1, 0x3333);
- __ sw(t1, MemOperand(a0, offsetof(T, r6)) );
- __ lhu(t1, MemOperand(a0, offsetof(T, si)) );
- __ sh(t1, MemOperand(a0, offsetof(T, r6)) );
+ __ Sw(t1, MemOperand(a0, offsetof(T, r6)));
+ __ Lhu(t1, MemOperand(a0, offsetof(T, si)));
+ __ Sh(t1, MemOperand(a0, offsetof(T, r6)));
__ jr(ra);
__ nop();
@@ -626,8 +626,8 @@ TEST(MIPS7) {
MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label neither_is_nan, less_than, outa_here;
- __ ldc1(f4, MemOperand(a0, offsetof(T, a)) );
- __ ldc1(f6, MemOperand(a0, offsetof(T, b)) );
+ __ Ldc1(f4, MemOperand(a0, offsetof(T, a)));
+ __ Ldc1(f6, MemOperand(a0, offsetof(T, b)));
if (kArchVariant != kMips64r6) {
__ c(UN, D, f4, f6);
__ bc1f(&neither_is_nan);
@@ -636,7 +636,7 @@ TEST(MIPS7) {
__ bc1eqz(&neither_is_nan, f2);
}
__ nop();
- __ sw(zero_reg, MemOperand(a0, offsetof(T, result)) );
+ __ Sw(zero_reg, MemOperand(a0, offsetof(T, result)));
__ Branch(&outa_here);
__ bind(&neither_is_nan);
@@ -650,13 +650,12 @@ TEST(MIPS7) {
}
__ nop();
- __ sw(zero_reg, MemOperand(a0, offsetof(T, result)) );
+ __ Sw(zero_reg, MemOperand(a0, offsetof(T, result)));
__ Branch(&outa_here);
__ bind(&less_than);
__ Addu(a4, zero_reg, Operand(1));
- __ sw(a4, MemOperand(a0, offsetof(T, result)) ); // Set true.
-
+ __ Sw(a4, MemOperand(a0, offsetof(T, result))); // Set true.
// This test-case should have additional tests.
@@ -715,7 +714,7 @@ TEST(MIPS8) {
v8::internal::CodeObjectRequired::kYes);
// Basic word load.
- __ lw(a4, MemOperand(a0, offsetof(T, input)) );
+ __ Lw(a4, MemOperand(a0, offsetof(T, input)));
// ROTR instruction (called through the Ror macro).
__ Ror(a5, a4, 0x0004);
@@ -727,13 +726,13 @@ TEST(MIPS8) {
__ Ror(t3, a4, 0x001c);
// Basic word store.
- __ sw(a5, MemOperand(a0, offsetof(T, result_rotr_4)) );
- __ sw(a6, MemOperand(a0, offsetof(T, result_rotr_8)) );
- __ sw(a7, MemOperand(a0, offsetof(T, result_rotr_12)) );
- __ sw(t0, MemOperand(a0, offsetof(T, result_rotr_16)) );
- __ sw(t1, MemOperand(a0, offsetof(T, result_rotr_20)) );
- __ sw(t2, MemOperand(a0, offsetof(T, result_rotr_24)) );
- __ sw(t3, MemOperand(a0, offsetof(T, result_rotr_28)) );
+ __ Sw(a5, MemOperand(a0, offsetof(T, result_rotr_4)));
+ __ Sw(a6, MemOperand(a0, offsetof(T, result_rotr_8)));
+ __ Sw(a7, MemOperand(a0, offsetof(T, result_rotr_12)));
+ __ Sw(t0, MemOperand(a0, offsetof(T, result_rotr_16)));
+ __ Sw(t1, MemOperand(a0, offsetof(T, result_rotr_20)));
+ __ Sw(t2, MemOperand(a0, offsetof(T, result_rotr_24)));
+ __ Sw(t3, MemOperand(a0, offsetof(T, result_rotr_28)));
// ROTRV instruction (called through the Ror macro).
__ li(t3, 0x0004);
@@ -752,13 +751,13 @@ TEST(MIPS8) {
__ Ror(t3, a4, t3);
// Basic word store.
- __ sw(a5, MemOperand(a0, offsetof(T, result_rotrv_4)) );
- __ sw(a6, MemOperand(a0, offsetof(T, result_rotrv_8)) );
- __ sw(a7, MemOperand(a0, offsetof(T, result_rotrv_12)) );
- __ sw(t0, MemOperand(a0, offsetof(T, result_rotrv_16)) );
- __ sw(t1, MemOperand(a0, offsetof(T, result_rotrv_20)) );
- __ sw(t2, MemOperand(a0, offsetof(T, result_rotrv_24)) );
- __ sw(t3, MemOperand(a0, offsetof(T, result_rotrv_28)) );
+ __ Sw(a5, MemOperand(a0, offsetof(T, result_rotrv_4)));
+ __ Sw(a6, MemOperand(a0, offsetof(T, result_rotrv_8)));
+ __ Sw(a7, MemOperand(a0, offsetof(T, result_rotrv_12)));
+ __ Sw(t0, MemOperand(a0, offsetof(T, result_rotrv_16)));
+ __ Sw(t1, MemOperand(a0, offsetof(T, result_rotrv_20)));
+ __ Sw(t2, MemOperand(a0, offsetof(T, result_rotrv_24)));
+ __ Sw(t3, MemOperand(a0, offsetof(T, result_rotrv_28)));
__ jr(ra);
__ nop();
@@ -838,7 +837,7 @@ TEST(MIPS10) {
} T;
T t;
- Assembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label L, C;
if (kArchVariant == kMips64r2) {
@@ -846,43 +845,41 @@ TEST(MIPS10) {
// - 32 FP regs of 64-bits each, no odd/even pairs.
// - Note that cvt_l_d/cvt_d_l ARE legal in FR=1 mode.
// Load all structure elements to registers.
- __ ldc1(f0, MemOperand(a0, offsetof(T, a)));
+ __ Ldc1(f0, MemOperand(a0, offsetof(T, a)));
// Save the raw bits of the double.
__ mfc1(a4, f0);
__ mfhc1(a5, f0);
- __ sw(a4, MemOperand(a0, offsetof(T, dbl_mant)));
- __ sw(a5, MemOperand(a0, offsetof(T, dbl_exp)));
+ __ Sw(a4, MemOperand(a0, offsetof(T, dbl_mant)));
+ __ Sw(a5, MemOperand(a0, offsetof(T, dbl_exp)));
// Convert double in f0 to long, save hi/lo parts.
__ cvt_l_d(f0, f0);
__ mfc1(a4, f0); // f0 LS 32 bits of long.
__ mfhc1(a5, f0); // f0 MS 32 bits of long.
- __ sw(a4, MemOperand(a0, offsetof(T, long_lo)));
- __ sw(a5, MemOperand(a0, offsetof(T, long_hi)));
+ __ Sw(a4, MemOperand(a0, offsetof(T, long_lo)));
+ __ Sw(a5, MemOperand(a0, offsetof(T, long_hi)));
// Combine the high/low ints, convert back to double.
__ dsll32(a6, a5, 0); // Move a5 to high bits of a6.
__ or_(a6, a6, a4);
__ dmtc1(a6, f1);
__ cvt_d_l(f1, f1);
- __ sdc1(f1, MemOperand(a0, offsetof(T, a_converted)));
-
+ __ Sdc1(f1, MemOperand(a0, offsetof(T, a_converted)));
// Convert the b long integers to double b.
- __ lw(a4, MemOperand(a0, offsetof(T, b_long_lo)));
- __ lw(a5, MemOperand(a0, offsetof(T, b_long_hi)));
+ __ Lw(a4, MemOperand(a0, offsetof(T, b_long_lo)));
+ __ Lw(a5, MemOperand(a0, offsetof(T, b_long_hi)));
__ mtc1(a4, f8); // f8 LS 32-bits.
__ mthc1(a5, f8); // f8 MS 32-bits.
__ cvt_d_l(f10, f8);
- __ sdc1(f10, MemOperand(a0, offsetof(T, b)));
+ __ Sdc1(f10, MemOperand(a0, offsetof(T, b)));
// Convert double b back to long-int.
- __ ldc1(f31, MemOperand(a0, offsetof(T, b)));
+ __ Ldc1(f31, MemOperand(a0, offsetof(T, b)));
__ cvt_l_d(f31, f31);
__ dmfc1(a7, f31);
- __ sd(a7, MemOperand(a0, offsetof(T, b_long_as_int64)));
-
+ __ Sd(a7, MemOperand(a0, offsetof(T, b_long_as_int64)));
__ jr(ra);
__ nop();
@@ -941,82 +938,83 @@ TEST(MIPS11) {
} T;
T t;
- Assembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
// Test all combinations of LWL and vAddr.
- __ lw(a4, MemOperand(a0, offsetof(T, reg_init)));
+ __ Lw(a4, MemOperand(a0, offsetof(T, reg_init)));
__ lwl(a4, MemOperand(a0, offsetof(T, mem_init)));
- __ sw(a4, MemOperand(a0, offsetof(T, lwl_0)));
+ __ Sw(a4, MemOperand(a0, offsetof(T, lwl_0)));
- __ lw(a5, MemOperand(a0, offsetof(T, reg_init)));
+ __ Lw(a5, MemOperand(a0, offsetof(T, reg_init)));
__ lwl(a5, MemOperand(a0, offsetof(T, mem_init) + 1));
- __ sw(a5, MemOperand(a0, offsetof(T, lwl_1)));
+ __ Sw(a5, MemOperand(a0, offsetof(T, lwl_1)));
- __ lw(a6, MemOperand(a0, offsetof(T, reg_init)));
+ __ Lw(a6, MemOperand(a0, offsetof(T, reg_init)));
__ lwl(a6, MemOperand(a0, offsetof(T, mem_init) + 2));
- __ sw(a6, MemOperand(a0, offsetof(T, lwl_2)));
+ __ Sw(a6, MemOperand(a0, offsetof(T, lwl_2)));
- __ lw(a7, MemOperand(a0, offsetof(T, reg_init)));
+ __ Lw(a7, MemOperand(a0, offsetof(T, reg_init)));
__ lwl(a7, MemOperand(a0, offsetof(T, mem_init) + 3));
- __ sw(a7, MemOperand(a0, offsetof(T, lwl_3)));
+ __ Sw(a7, MemOperand(a0, offsetof(T, lwl_3)));
// Test all combinations of LWR and vAddr.
- __ lw(a4, MemOperand(a0, offsetof(T, reg_init)));
+ __ Lw(a4, MemOperand(a0, offsetof(T, reg_init)));
__ lwr(a4, MemOperand(a0, offsetof(T, mem_init)));
- __ sw(a4, MemOperand(a0, offsetof(T, lwr_0)));
+ __ Sw(a4, MemOperand(a0, offsetof(T, lwr_0)));
- __ lw(a5, MemOperand(a0, offsetof(T, reg_init)));
+ __ Lw(a5, MemOperand(a0, offsetof(T, reg_init)));
__ lwr(a5, MemOperand(a0, offsetof(T, mem_init) + 1));
- __ sw(a5, MemOperand(a0, offsetof(T, lwr_1)));
+ __ Sw(a5, MemOperand(a0, offsetof(T, lwr_1)));
- __ lw(a6, MemOperand(a0, offsetof(T, reg_init)));
+ __ Lw(a6, MemOperand(a0, offsetof(T, reg_init)));
__ lwr(a6, MemOperand(a0, offsetof(T, mem_init) + 2));
- __ sw(a6, MemOperand(a0, offsetof(T, lwr_2)) );
+ __ Sw(a6, MemOperand(a0, offsetof(T, lwr_2)));
- __ lw(a7, MemOperand(a0, offsetof(T, reg_init)));
+ __ Lw(a7, MemOperand(a0, offsetof(T, reg_init)));
__ lwr(a7, MemOperand(a0, offsetof(T, mem_init) + 3));
- __ sw(a7, MemOperand(a0, offsetof(T, lwr_3)) );
+ __ Sw(a7, MemOperand(a0, offsetof(T, lwr_3)));
// Test all combinations of SWL and vAddr.
- __ lw(a4, MemOperand(a0, offsetof(T, mem_init)));
- __ sw(a4, MemOperand(a0, offsetof(T, swl_0)));
- __ lw(a4, MemOperand(a0, offsetof(T, reg_init)));
+ __ Lw(a4, MemOperand(a0, offsetof(T, mem_init)));
+ __ Sw(a4, MemOperand(a0, offsetof(T, swl_0)));
+ __ Lw(a4, MemOperand(a0, offsetof(T, reg_init)));
__ swl(a4, MemOperand(a0, offsetof(T, swl_0)));
- __ lw(a5, MemOperand(a0, offsetof(T, mem_init)));
- __ sw(a5, MemOperand(a0, offsetof(T, swl_1)));
- __ lw(a5, MemOperand(a0, offsetof(T, reg_init)));
+ __ Lw(a5, MemOperand(a0, offsetof(T, mem_init)));
+ __ Sw(a5, MemOperand(a0, offsetof(T, swl_1)));
+ __ Lw(a5, MemOperand(a0, offsetof(T, reg_init)));
__ swl(a5, MemOperand(a0, offsetof(T, swl_1) + 1));
- __ lw(a6, MemOperand(a0, offsetof(T, mem_init)));
- __ sw(a6, MemOperand(a0, offsetof(T, swl_2)));
- __ lw(a6, MemOperand(a0, offsetof(T, reg_init)));
+ __ Lw(a6, MemOperand(a0, offsetof(T, mem_init)));
+ __ Sw(a6, MemOperand(a0, offsetof(T, swl_2)));
+ __ Lw(a6, MemOperand(a0, offsetof(T, reg_init)));
__ swl(a6, MemOperand(a0, offsetof(T, swl_2) + 2));
- __ lw(a7, MemOperand(a0, offsetof(T, mem_init)));
- __ sw(a7, MemOperand(a0, offsetof(T, swl_3)));
- __ lw(a7, MemOperand(a0, offsetof(T, reg_init)));
+ __ Lw(a7, MemOperand(a0, offsetof(T, mem_init)));
+ __ Sw(a7, MemOperand(a0, offsetof(T, swl_3)));
+ __ Lw(a7, MemOperand(a0, offsetof(T, reg_init)));
__ swl(a7, MemOperand(a0, offsetof(T, swl_3) + 3));
// Test all combinations of SWR and vAddr.
- __ lw(a4, MemOperand(a0, offsetof(T, mem_init)));
- __ sw(a4, MemOperand(a0, offsetof(T, swr_0)));
- __ lw(a4, MemOperand(a0, offsetof(T, reg_init)));
+ __ Lw(a4, MemOperand(a0, offsetof(T, mem_init)));
+ __ Sw(a4, MemOperand(a0, offsetof(T, swr_0)));
+ __ Lw(a4, MemOperand(a0, offsetof(T, reg_init)));
__ swr(a4, MemOperand(a0, offsetof(T, swr_0)));
- __ lw(a5, MemOperand(a0, offsetof(T, mem_init)));
- __ sw(a5, MemOperand(a0, offsetof(T, swr_1)));
- __ lw(a5, MemOperand(a0, offsetof(T, reg_init)));
+ __ Lw(a5, MemOperand(a0, offsetof(T, mem_init)));
+ __ Sw(a5, MemOperand(a0, offsetof(T, swr_1)));
+ __ Lw(a5, MemOperand(a0, offsetof(T, reg_init)));
__ swr(a5, MemOperand(a0, offsetof(T, swr_1) + 1));
- __ lw(a6, MemOperand(a0, offsetof(T, mem_init)));
- __ sw(a6, MemOperand(a0, offsetof(T, swr_2)));
- __ lw(a6, MemOperand(a0, offsetof(T, reg_init)));
+ __ Lw(a6, MemOperand(a0, offsetof(T, mem_init)));
+ __ Sw(a6, MemOperand(a0, offsetof(T, swr_2)));
+ __ Lw(a6, MemOperand(a0, offsetof(T, reg_init)));
__ swr(a6, MemOperand(a0, offsetof(T, swr_2) + 2));
- __ lw(a7, MemOperand(a0, offsetof(T, mem_init)));
- __ sw(a7, MemOperand(a0, offsetof(T, swr_3)));
- __ lw(a7, MemOperand(a0, offsetof(T, reg_init)));
+ __ Lw(a7, MemOperand(a0, offsetof(T, mem_init)));
+ __ Sw(a7, MemOperand(a0, offsetof(T, swr_3)));
+ __ Lw(a7, MemOperand(a0, offsetof(T, reg_init)));
__ swr(a7, MemOperand(a0, offsetof(T, swr_3) + 3));
__ jr(ra);
@@ -1097,8 +1095,8 @@ TEST(MIPS12) {
__ mov(t2, fp); // Save frame pointer.
__ mov(fp, a0); // Access struct T by fp.
- __ lw(a4, MemOperand(a0, offsetof(T, y)));
- __ lw(a7, MemOperand(a0, offsetof(T, y4)));
+ __ Lw(a4, MemOperand(a0, offsetof(T, y)));
+ __ Lw(a7, MemOperand(a0, offsetof(T, y4)));
__ addu(a5, a4, a7);
__ subu(t0, a4, a7);
@@ -1116,30 +1114,30 @@ TEST(MIPS12) {
__ push(a7);
__ pop(t0);
__ nop();
- __ sw(a4, MemOperand(fp, offsetof(T, y)));
- __ lw(a4, MemOperand(fp, offsetof(T, y)));
+ __ Sw(a4, MemOperand(fp, offsetof(T, y)));
+ __ Lw(a4, MemOperand(fp, offsetof(T, y)));
__ nop();
- __ sw(a4, MemOperand(fp, offsetof(T, y)));
- __ lw(a5, MemOperand(fp, offsetof(T, y)));
+ __ Sw(a4, MemOperand(fp, offsetof(T, y)));
+ __ Lw(a5, MemOperand(fp, offsetof(T, y)));
__ nop();
__ push(a5);
- __ lw(a5, MemOperand(fp, offsetof(T, y)));
+ __ Lw(a5, MemOperand(fp, offsetof(T, y)));
__ pop(a5);
__ nop();
__ push(a5);
- __ lw(a6, MemOperand(fp, offsetof(T, y)));
+ __ Lw(a6, MemOperand(fp, offsetof(T, y)));
__ pop(a5);
__ nop();
__ push(a5);
- __ lw(a6, MemOperand(fp, offsetof(T, y)));
+ __ Lw(a6, MemOperand(fp, offsetof(T, y)));
__ pop(a6);
__ nop();
__ push(a6);
- __ lw(a6, MemOperand(fp, offsetof(T, y)));
+ __ Lw(a6, MemOperand(fp, offsetof(T, y)));
__ pop(a5);
__ nop();
__ push(a5);
- __ lw(a6, MemOperand(fp, offsetof(T, y)));
+ __ Lw(a6, MemOperand(fp, offsetof(T, y)));
__ pop(a7);
__ nop();
@@ -1184,19 +1182,19 @@ TEST(MIPS13) {
MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
- __ sw(a4, MemOperand(a0, offsetof(T, cvt_small_in)));
+ __ Sw(a4, MemOperand(a0, offsetof(T, cvt_small_in)));
__ Cvt_d_uw(f10, a4);
- __ sdc1(f10, MemOperand(a0, offsetof(T, cvt_small_out)));
+ __ Sdc1(f10, MemOperand(a0, offsetof(T, cvt_small_out)));
__ Trunc_uw_d(f10, f10, f4);
- __ swc1(f10, MemOperand(a0, offsetof(T, trunc_small_out)));
+ __ Swc1(f10, MemOperand(a0, offsetof(T, trunc_small_out)));
- __ sw(a4, MemOperand(a0, offsetof(T, cvt_big_in)));
+ __ Sw(a4, MemOperand(a0, offsetof(T, cvt_big_in)));
__ Cvt_d_uw(f8, a4);
- __ sdc1(f8, MemOperand(a0, offsetof(T, cvt_big_out)));
+ __ Sdc1(f8, MemOperand(a0, offsetof(T, cvt_big_out)));
__ Trunc_uw_d(f8, f8, f4);
- __ swc1(f8, MemOperand(a0, offsetof(T, trunc_big_out)));
+ __ Swc1(f8, MemOperand(a0, offsetof(T, trunc_big_out)));
__ jr(ra);
__ nop();
@@ -1266,49 +1264,49 @@ TEST(MIPS14) {
__ cfc1(a1, FCSR);
// Disable FPU exceptions.
__ ctc1(zero_reg, FCSR);
-#define RUN_ROUND_TEST(x) \
- __ cfc1(t0, FCSR);\
- __ sw(t0, MemOperand(a0, offsetof(T, x##_isNaN2008))); \
- __ ldc1(f0, MemOperand(a0, offsetof(T, round_up_in))); \
- __ x##_w_d(f0, f0); \
- __ swc1(f0, MemOperand(a0, offsetof(T, x##_up_out))); \
- \
- __ ldc1(f0, MemOperand(a0, offsetof(T, round_down_in))); \
- __ x##_w_d(f0, f0); \
- __ swc1(f0, MemOperand(a0, offsetof(T, x##_down_out))); \
- \
- __ ldc1(f0, MemOperand(a0, offsetof(T, neg_round_up_in))); \
- __ x##_w_d(f0, f0); \
- __ swc1(f0, MemOperand(a0, offsetof(T, neg_##x##_up_out))); \
- \
- __ ldc1(f0, MemOperand(a0, offsetof(T, neg_round_down_in))); \
- __ x##_w_d(f0, f0); \
- __ swc1(f0, MemOperand(a0, offsetof(T, neg_##x##_down_out))); \
- \
- __ ldc1(f0, MemOperand(a0, offsetof(T, err1_in))); \
- __ ctc1(zero_reg, FCSR); \
- __ x##_w_d(f0, f0); \
- __ cfc1(a2, FCSR); \
- __ sw(a2, MemOperand(a0, offsetof(T, x##_err1_out))); \
- \
- __ ldc1(f0, MemOperand(a0, offsetof(T, err2_in))); \
- __ ctc1(zero_reg, FCSR); \
- __ x##_w_d(f0, f0); \
- __ cfc1(a2, FCSR); \
- __ sw(a2, MemOperand(a0, offsetof(T, x##_err2_out))); \
- \
- __ ldc1(f0, MemOperand(a0, offsetof(T, err3_in))); \
- __ ctc1(zero_reg, FCSR); \
- __ x##_w_d(f0, f0); \
- __ cfc1(a2, FCSR); \
- __ sw(a2, MemOperand(a0, offsetof(T, x##_err3_out))); \
- \
- __ ldc1(f0, MemOperand(a0, offsetof(T, err4_in))); \
- __ ctc1(zero_reg, FCSR); \
- __ x##_w_d(f0, f0); \
- __ cfc1(a2, FCSR); \
- __ sw(a2, MemOperand(a0, offsetof(T, x##_err4_out))); \
- __ swc1(f0, MemOperand(a0, offsetof(T, x##_invalid_result)));
+#define RUN_ROUND_TEST(x) \
+ __ cfc1(t0, FCSR); \
+ __ Sw(t0, MemOperand(a0, offsetof(T, x##_isNaN2008))); \
+ __ Ldc1(f0, MemOperand(a0, offsetof(T, round_up_in))); \
+ __ x##_w_d(f0, f0); \
+ __ Swc1(f0, MemOperand(a0, offsetof(T, x##_up_out))); \
+ \
+ __ Ldc1(f0, MemOperand(a0, offsetof(T, round_down_in))); \
+ __ x##_w_d(f0, f0); \
+ __ Swc1(f0, MemOperand(a0, offsetof(T, x##_down_out))); \
+ \
+ __ Ldc1(f0, MemOperand(a0, offsetof(T, neg_round_up_in))); \
+ __ x##_w_d(f0, f0); \
+ __ Swc1(f0, MemOperand(a0, offsetof(T, neg_##x##_up_out))); \
+ \
+ __ Ldc1(f0, MemOperand(a0, offsetof(T, neg_round_down_in))); \
+ __ x##_w_d(f0, f0); \
+ __ Swc1(f0, MemOperand(a0, offsetof(T, neg_##x##_down_out))); \
+ \
+ __ Ldc1(f0, MemOperand(a0, offsetof(T, err1_in))); \
+ __ ctc1(zero_reg, FCSR); \
+ __ x##_w_d(f0, f0); \
+ __ cfc1(a2, FCSR); \
+ __ Sw(a2, MemOperand(a0, offsetof(T, x##_err1_out))); \
+ \
+ __ Ldc1(f0, MemOperand(a0, offsetof(T, err2_in))); \
+ __ ctc1(zero_reg, FCSR); \
+ __ x##_w_d(f0, f0); \
+ __ cfc1(a2, FCSR); \
+ __ Sw(a2, MemOperand(a0, offsetof(T, x##_err2_out))); \
+ \
+ __ Ldc1(f0, MemOperand(a0, offsetof(T, err3_in))); \
+ __ ctc1(zero_reg, FCSR); \
+ __ x##_w_d(f0, f0); \
+ __ cfc1(a2, FCSR); \
+ __ Sw(a2, MemOperand(a0, offsetof(T, x##_err3_out))); \
+ \
+ __ Ldc1(f0, MemOperand(a0, offsetof(T, err4_in))); \
+ __ ctc1(zero_reg, FCSR); \
+ __ x##_w_d(f0, f0); \
+ __ cfc1(a2, FCSR); \
+ __ Sw(a2, MemOperand(a0, offsetof(T, x##_err4_out))); \
+ __ Swc1(f0, MemOperand(a0, offsetof(T, x##_invalid_result)));
RUN_ROUND_TEST(round)
RUN_ROUND_TEST(floor)
@@ -1403,51 +1401,51 @@ TEST(MIPS16) {
};
T t;
- Assembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
Label L, C;
// Basic 32-bit word load/store, with un-signed data.
- __ lw(a4, MemOperand(a0, offsetof(T, ui)));
- __ sw(a4, MemOperand(a0, offsetof(T, r1)));
+ __ Lw(a4, MemOperand(a0, offsetof(T, ui)));
+ __ Sw(a4, MemOperand(a0, offsetof(T, r1)));
// Check that the data got zero-extended into 64-bit a4.
- __ sd(a4, MemOperand(a0, offsetof(T, r2)));
+ __ Sd(a4, MemOperand(a0, offsetof(T, r2)));
// Basic 32-bit word load/store, with SIGNED data.
- __ lw(a5, MemOperand(a0, offsetof(T, si)));
- __ sw(a5, MemOperand(a0, offsetof(T, r3)));
+ __ Lw(a5, MemOperand(a0, offsetof(T, si)));
+ __ Sw(a5, MemOperand(a0, offsetof(T, r3)));
// Check that the data got sign-extended into 64-bit a4.
- __ sd(a5, MemOperand(a0, offsetof(T, r4)));
+ __ Sd(a5, MemOperand(a0, offsetof(T, r4)));
// 32-bit UNSIGNED word load/store, with SIGNED data.
- __ lwu(a6, MemOperand(a0, offsetof(T, si)));
- __ sw(a6, MemOperand(a0, offsetof(T, r5)));
+ __ Lwu(a6, MemOperand(a0, offsetof(T, si)));
+ __ Sw(a6, MemOperand(a0, offsetof(T, r5)));
// Check that the data got zero-extended into 64-bit a4.
- __ sd(a6, MemOperand(a0, offsetof(T, r6)));
+ __ Sd(a6, MemOperand(a0, offsetof(T, r6)));
// lh with positive data.
- __ lh(a5, MemOperand(a0, offsetof(T, ui)));
- __ sw(a5, MemOperand(a0, offsetof(T, r7)));
+ __ Lh(a5, MemOperand(a0, offsetof(T, ui)));
+ __ Sw(a5, MemOperand(a0, offsetof(T, r7)));
// lh with negative data.
- __ lh(a6, MemOperand(a0, offsetof(T, si)));
- __ sw(a6, MemOperand(a0, offsetof(T, r8)));
+ __ Lh(a6, MemOperand(a0, offsetof(T, si)));
+ __ Sw(a6, MemOperand(a0, offsetof(T, r8)));
// lhu with negative data.
- __ lhu(a7, MemOperand(a0, offsetof(T, si)));
- __ sw(a7, MemOperand(a0, offsetof(T, r9)));
+ __ Lhu(a7, MemOperand(a0, offsetof(T, si)));
+ __ Sw(a7, MemOperand(a0, offsetof(T, r9)));
- // lb with negative data.
- __ lb(t0, MemOperand(a0, offsetof(T, si)));
- __ sw(t0, MemOperand(a0, offsetof(T, r10)));
+ // Lb with negative data.
+ __ Lb(t0, MemOperand(a0, offsetof(T, si)));
+ __ Sw(t0, MemOperand(a0, offsetof(T, r10)));
// sh writes only 1/2 of word.
- __ lw(a4, MemOperand(a0, offsetof(T, ui)));
- __ sh(a4, MemOperand(a0, offsetof(T, r11)));
- __ lw(a4, MemOperand(a0, offsetof(T, si)));
- __ sh(a4, MemOperand(a0, offsetof(T, r12)));
+ __ Lw(a4, MemOperand(a0, offsetof(T, ui)));
+ __ Sh(a4, MemOperand(a0, offsetof(T, r11)));
+ __ Lw(a4, MemOperand(a0, offsetof(T, si)));
+ __ Sh(a4, MemOperand(a0, offsetof(T, r12)));
__ jr(ra);
__ nop();
@@ -1559,26 +1557,26 @@ TEST(seleqz_selnez) {
// Integer part of test.
__ addiu(t1, zero_reg, 1); // t1 = 1
__ seleqz(t3, t1, zero_reg); // t3 = 1
- __ sw(t3, MemOperand(a0, offsetof(Test, a))); // a = 1
+ __ Sw(t3, MemOperand(a0, offsetof(Test, a))); // a = 1
__ seleqz(t2, t1, t1); // t2 = 0
- __ sw(t2, MemOperand(a0, offsetof(Test, b))); // b = 0
+ __ Sw(t2, MemOperand(a0, offsetof(Test, b))); // b = 0
__ selnez(t3, t1, zero_reg); // t3 = 1;
- __ sw(t3, MemOperand(a0, offsetof(Test, c))); // c = 0
+ __ Sw(t3, MemOperand(a0, offsetof(Test, c))); // c = 0
__ selnez(t3, t1, t1); // t3 = 1
- __ sw(t3, MemOperand(a0, offsetof(Test, d))); // d = 1
+ __ Sw(t3, MemOperand(a0, offsetof(Test, d))); // d = 1
// Floating point part of test.
- __ ldc1(f0, MemOperand(a0, offsetof(Test, e)) ); // src
- __ ldc1(f2, MemOperand(a0, offsetof(Test, f)) ); // test
- __ lwc1(f8, MemOperand(a0, offsetof(Test, i)) ); // src
- __ lwc1(f10, MemOperand(a0, offsetof(Test, j)) ); // test
+ __ Ldc1(f0, MemOperand(a0, offsetof(Test, e))); // src
+ __ Ldc1(f2, MemOperand(a0, offsetof(Test, f))); // test
+ __ Lwc1(f8, MemOperand(a0, offsetof(Test, i))); // src
+ __ Lwc1(f10, MemOperand(a0, offsetof(Test, j))); // test
__ seleqz_d(f4, f0, f2);
__ selnez_d(f6, f0, f2);
__ seleqz_s(f12, f8, f10);
__ selnez_s(f14, f8, f10);
- __ sdc1(f4, MemOperand(a0, offsetof(Test, g)) ); // src
- __ sdc1(f6, MemOperand(a0, offsetof(Test, h)) ); // src
- __ swc1(f12, MemOperand(a0, offsetof(Test, k)) ); // src
- __ swc1(f14, MemOperand(a0, offsetof(Test, l)) ); // src
+ __ Sdc1(f4, MemOperand(a0, offsetof(Test, g))); // src
+ __ Sdc1(f6, MemOperand(a0, offsetof(Test, h))); // src
+ __ Swc1(f12, MemOperand(a0, offsetof(Test, k))); // src
+ __ Swc1(f14, MemOperand(a0, offsetof(Test, l))); // src
__ jr(ra);
__ nop();
CodeDesc desc;
@@ -1682,18 +1680,18 @@ TEST(min_max) {
float outputsfmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, 0.0, 0.0, finf,
finf, finf, finf, finf, finf, fnan};
- __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
- __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
- __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, e)));
- __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, f)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
+ __ Lwc1(f2, MemOperand(a0, offsetof(TestFloat, e)));
+ __ Lwc1(f6, MemOperand(a0, offsetof(TestFloat, f)));
__ min_d(f10, f4, f8);
__ max_d(f12, f4, f8);
__ min_s(f14, f2, f6);
__ max_s(f16, f2, f6);
- __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, c)));
- __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, d)));
- __ swc1(f14, MemOperand(a0, offsetof(TestFloat, g)));
- __ swc1(f16, MemOperand(a0, offsetof(TestFloat, h)));
+ __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, c)));
+ __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, d)));
+ __ Swc1(f14, MemOperand(a0, offsetof(TestFloat, g)));
+ __ Swc1(f16, MemOperand(a0, offsetof(TestFloat, h)));
__ jr(ra);
__ nop();
@@ -1798,11 +1796,11 @@ TEST(rint_d) {
int fcsr_inputs[4] =
{kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf};
double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM};
- __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)) );
- __ lw(t0, MemOperand(a0, offsetof(TestFloat, fcsr)) );
+ __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Lw(t0, MemOperand(a0, offsetof(TestFloat, fcsr)));
__ ctc1(t0, FCSR);
__ rint_d(f8, f4);
- __ sdc1(f8, MemOperand(a0, offsetof(TestFloat, b)) );
+ __ Sdc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
__ jr(ra);
__ nop();
@@ -1842,16 +1840,16 @@ TEST(sel) {
} Test;
Test test;
- __ ldc1(f0, MemOperand(a0, offsetof(Test, dd)) ); // test
- __ ldc1(f2, MemOperand(a0, offsetof(Test, ds)) ); // src1
- __ ldc1(f4, MemOperand(a0, offsetof(Test, dt)) ); // src2
- __ lwc1(f6, MemOperand(a0, offsetof(Test, fd)) ); // test
- __ lwc1(f8, MemOperand(a0, offsetof(Test, fs)) ); // src1
- __ lwc1(f10, MemOperand(a0, offsetof(Test, ft)) ); // src2
+ __ Ldc1(f0, MemOperand(a0, offsetof(Test, dd))); // test
+ __ Ldc1(f2, MemOperand(a0, offsetof(Test, ds))); // src1
+ __ Ldc1(f4, MemOperand(a0, offsetof(Test, dt))); // src2
+ __ Lwc1(f6, MemOperand(a0, offsetof(Test, fd))); // test
+ __ Lwc1(f8, MemOperand(a0, offsetof(Test, fs))); // src1
+ __ Lwc1(f10, MemOperand(a0, offsetof(Test, ft))); // src2
__ sel_d(f0, f2, f4);
__ sel_s(f6, f8, f10);
- __ sdc1(f0, MemOperand(a0, offsetof(Test, dd)) );
- __ swc1(f6, MemOperand(a0, offsetof(Test, fd)) );
+ __ Sdc1(f0, MemOperand(a0, offsetof(Test, dd)));
+ __ Swc1(f6, MemOperand(a0, offsetof(Test, fd)));
__ jr(ra);
__ nop();
CodeDesc desc;
@@ -1977,12 +1975,12 @@ TEST(rint_s) {
int fcsr_inputs[4] =
{kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf};
float* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM};
- __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, a)) );
- __ lw(t0, MemOperand(a0, offsetof(TestFloat, fcsr)) );
+ __ Lwc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Lw(t0, MemOperand(a0, offsetof(TestFloat, fcsr)));
__ cfc1(t1, FCSR);
__ ctc1(t0, FCSR);
__ rint_s(f8, f4);
- __ swc1(f8, MemOperand(a0, offsetof(TestFloat, b)) );
+ __ Swc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
__ ctc1(t1, FCSR);
__ jr(ra);
__ nop();
@@ -2058,18 +2056,18 @@ TEST(mina_maxa) {
5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8, 9.8, -10.0, -11.2, -9.8,
3.0, 3.0, 0.0, 0.0, finf, finf, finf, finf, finf, finf, fnan};
- __ ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
- __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, b)) );
- __ lwc1(f8, MemOperand(a0, offsetof(TestFloat, c)) );
- __ lwc1(f10, MemOperand(a0, offsetof(TestFloat, d)) );
+ __ Ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, b)));
+ __ Lwc1(f8, MemOperand(a0, offsetof(TestFloat, c)));
+ __ Lwc1(f10, MemOperand(a0, offsetof(TestFloat, d)));
__ mina_d(f6, f2, f4);
__ mina_s(f12, f8, f10);
__ maxa_d(f14, f2, f4);
__ maxa_s(f16, f8, f10);
- __ swc1(f12, MemOperand(a0, offsetof(TestFloat, resf)) );
- __ sdc1(f6, MemOperand(a0, offsetof(TestFloat, resd)) );
- __ swc1(f16, MemOperand(a0, offsetof(TestFloat, resf1)) );
- __ sdc1(f14, MemOperand(a0, offsetof(TestFloat, resd1)) );
+ __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, resf)));
+ __ Sdc1(f6, MemOperand(a0, offsetof(TestFloat, resd)));
+ __ Swc1(f16, MemOperand(a0, offsetof(TestFloat, resf1)));
+ __ Sdc1(f14, MemOperand(a0, offsetof(TestFloat, resd1)));
__ jr(ra);
__ nop();
@@ -2145,13 +2143,13 @@ TEST(trunc_l) {
dFPU64InvalidResult};
__ cfc1(t1, FCSR);
- __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
- __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
- __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
+ __ Sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
+ __ Lwc1(f6, MemOperand(a0, offsetof(Test, b)));
__ trunc_l_d(f8, f4);
__ trunc_l_s(f10, f6);
- __ sdc1(f8, MemOperand(a0, offsetof(Test, c)) );
- __ sdc1(f10, MemOperand(a0, offsetof(Test, d)) );
+ __ Sdc1(f8, MemOperand(a0, offsetof(Test, c)));
+ __ Sdc1(f10, MemOperand(a0, offsetof(Test, d)));
__ jr(ra);
__ nop();
Test test;
@@ -2214,25 +2212,25 @@ TEST(movz_movn) {
5.3, -5.3, 5.3, -2.9
};
- __ ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
- __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, c)) );
- __ ld(t0, MemOperand(a0, offsetof(TestFloat, rt)));
+ __ Ldc1(f2, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Lwc1(f6, MemOperand(a0, offsetof(TestFloat, c)));
+ __ Ld(t0, MemOperand(a0, offsetof(TestFloat, rt)));
__ Move(f12, 0.0);
__ Move(f10, 0.0);
__ Move(f16, 0.0);
__ Move(f14, 0.0);
- __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, bold)) );
- __ swc1(f10, MemOperand(a0, offsetof(TestFloat, dold)) );
- __ sdc1(f16, MemOperand(a0, offsetof(TestFloat, bold1)) );
- __ swc1(f14, MemOperand(a0, offsetof(TestFloat, dold1)) );
+ __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, bold)));
+ __ Swc1(f10, MemOperand(a0, offsetof(TestFloat, dold)));
+ __ Sdc1(f16, MemOperand(a0, offsetof(TestFloat, bold1)));
+ __ Swc1(f14, MemOperand(a0, offsetof(TestFloat, dold1)));
__ movz_s(f10, f6, t0);
__ movz_d(f12, f2, t0);
__ movn_s(f14, f6, t0);
__ movn_d(f16, f2, t0);
- __ swc1(f10, MemOperand(a0, offsetof(TestFloat, d)) );
- __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, b)) );
- __ swc1(f14, MemOperand(a0, offsetof(TestFloat, d1)) );
- __ sdc1(f16, MemOperand(a0, offsetof(TestFloat, b1)) );
+ __ Swc1(f10, MemOperand(a0, offsetof(TestFloat, d)));
+ __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, b)));
+ __ Swc1(f14, MemOperand(a0, offsetof(TestFloat, d1)));
+ __ Sdc1(f16, MemOperand(a0, offsetof(TestFloat, b1)));
__ jr(ra);
__ nop();
@@ -2313,26 +2311,26 @@ TEST(movt_movd) {
HandleScope scope(isolate);
MacroAssembler assm(isolate, NULL, 0,
v8::internal::CodeObjectRequired::kYes);
- __ ldc1(f2, MemOperand(a0, offsetof(TestFloat, srcd)) );
- __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, srcf)) );
- __ lw(t1, MemOperand(a0, offsetof(TestFloat, fcsr)) );
+ __ Ldc1(f2, MemOperand(a0, offsetof(TestFloat, srcd)));
+ __ Lwc1(f4, MemOperand(a0, offsetof(TestFloat, srcf)));
+ __ Lw(t1, MemOperand(a0, offsetof(TestFloat, fcsr)));
__ cfc1(t0, FCSR);
__ ctc1(t1, FCSR);
__ li(t2, 0x0);
__ mtc1(t2, f12);
__ mtc1(t2, f10);
- __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstdold)) );
- __ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstfold)) );
+ __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstdold)));
+ __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, dstfold)));
__ movt_s(f12, f4, test.cc);
__ movt_d(f10, f2, test.cc);
- __ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstf)) );
- __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstd)) );
- __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstdold1)) );
- __ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstfold1)) );
+ __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, dstf)));
+ __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstd)));
+ __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstdold1)));
+ __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, dstfold1)));
__ movf_s(f12, f4, test.cc);
__ movf_d(f10, f2, test.cc);
- __ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstf1)) );
- __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstd1)) );
+ __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, dstf1)));
+ __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstd1)));
__ ctc1(t0, FCSR);
__ jr(ra);
__ nop();
@@ -2413,12 +2411,12 @@ TEST(cvt_w_d) {
int fcsr_inputs[4] =
{kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf};
double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM};
- __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
- __ lw(t0, MemOperand(a0, offsetof(Test, fcsr)) );
+ __ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
+ __ Lw(t0, MemOperand(a0, offsetof(Test, fcsr)));
__ cfc1(t1, FCSR);
__ ctc1(t0, FCSR);
__ cvt_w_d(f8, f4);
- __ swc1(f8, MemOperand(a0, offsetof(Test, b)) );
+ __ Swc1(f8, MemOperand(a0, offsetof(Test, b)));
__ ctc1(t1, FCSR);
__ jr(ra);
__ nop();
@@ -2480,13 +2478,13 @@ TEST(trunc_w) {
kFPUInvalidResult};
__ cfc1(t1, FCSR);
- __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
- __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
- __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
+ __ Sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
+ __ Lwc1(f6, MemOperand(a0, offsetof(Test, b)));
__ trunc_w_d(f8, f4);
__ trunc_w_s(f10, f6);
- __ swc1(f8, MemOperand(a0, offsetof(Test, c)) );
- __ swc1(f10, MemOperand(a0, offsetof(Test, d)) );
+ __ Swc1(f8, MemOperand(a0, offsetof(Test, c)));
+ __ Swc1(f10, MemOperand(a0, offsetof(Test, d)));
__ jr(ra);
__ nop();
Test test;
@@ -2549,13 +2547,13 @@ TEST(round_w) {
kFPUInvalidResult};
__ cfc1(t1, FCSR);
- __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
- __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
- __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
+ __ Sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
+ __ Lwc1(f6, MemOperand(a0, offsetof(Test, b)));
__ round_w_d(f8, f4);
__ round_w_s(f10, f6);
- __ swc1(f8, MemOperand(a0, offsetof(Test, c)) );
- __ swc1(f10, MemOperand(a0, offsetof(Test, d)) );
+ __ Swc1(f8, MemOperand(a0, offsetof(Test, c)));
+ __ Swc1(f10, MemOperand(a0, offsetof(Test, d)));
__ jr(ra);
__ nop();
Test test;
@@ -2620,13 +2618,13 @@ TEST(round_l) {
dFPU64InvalidResult};
__ cfc1(t1, FCSR);
- __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
- __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
- __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
+ __ Sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
+ __ Lwc1(f6, MemOperand(a0, offsetof(Test, b)));
__ round_l_d(f8, f4);
__ round_l_s(f10, f6);
- __ sdc1(f8, MemOperand(a0, offsetof(Test, c)) );
- __ sdc1(f10, MemOperand(a0, offsetof(Test, d)) );
+ __ Sdc1(f8, MemOperand(a0, offsetof(Test, c)));
+ __ Sdc1(f10, MemOperand(a0, offsetof(Test, d)));
__ jr(ra);
__ nop();
Test test;
@@ -2691,14 +2689,14 @@ TEST(sub) {
0.5, -0.5, 0.0, -10.1, -10.1, -5.8,
10.1, 10.1, 5.8, -0.5, 0.5, 0.0
};
- __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
- __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, b)) );
- __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)) );
- __ ldc1(f10, MemOperand(a0, offsetof(TestFloat, d)) );
+ __ Lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Lwc1(f4, MemOperand(a0, offsetof(TestFloat, b)));
+ __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)));
+ __ Ldc1(f10, MemOperand(a0, offsetof(TestFloat, d)));
__ sub_s(f6, f2, f4);
__ sub_d(f12, f8, f10);
- __ swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)) );
- __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)) );
+ __ Swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)));
+ __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)));
__ jr(ra);
__ nop();
@@ -2757,21 +2755,20 @@ TEST(sqrt_rsqrt_recip) {
0.0, 2.0, sqrt2_s, 2e-14
};
-
- __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
- __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)) );
+ __ Lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)));
__ sqrt_s(f6, f2);
__ sqrt_d(f12, f8);
__ rsqrt_d(f14, f8);
__ rsqrt_s(f16, f2);
__ recip_d(f18, f8);
__ recip_s(f4, f2);
- __ swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)) );
- __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)) );
- __ swc1(f16, MemOperand(a0, offsetof(TestFloat, resultS1)) );
- __ sdc1(f14, MemOperand(a0, offsetof(TestFloat, resultD1)) );
- __ swc1(f4, MemOperand(a0, offsetof(TestFloat, resultS2)) );
- __ sdc1(f18, MemOperand(a0, offsetof(TestFloat, resultD2)) );
+ __ Swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)));
+ __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)));
+ __ Swc1(f16, MemOperand(a0, offsetof(TestFloat, resultS1)));
+ __ Sdc1(f14, MemOperand(a0, offsetof(TestFloat, resultD1)));
+ __ Swc1(f4, MemOperand(a0, offsetof(TestFloat, resultS2)));
+ __ Sdc1(f18, MemOperand(a0, offsetof(TestFloat, resultD2)));
__ jr(ra);
__ nop();
@@ -2844,12 +2841,12 @@ TEST(neg) {
float outputs_S[kTableLength] = {
-4.0, 2.0
};
- __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
- __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)) );
+ __ Lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, c)));
__ neg_s(f6, f2);
__ neg_d(f12, f8);
- __ swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)) );
- __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)) );
+ __ Swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)));
+ __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)));
__ jr(ra);
__ nop();
@@ -2900,14 +2897,14 @@ TEST(mul) {
4.8, 4.8, -4.8, -0.29
};
- __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) );
- __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, b)) );
- __ ldc1(f6, MemOperand(a0, offsetof(TestFloat, c)) );
- __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, d)) );
+ __ Lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Lwc1(f4, MemOperand(a0, offsetof(TestFloat, b)));
+ __ Ldc1(f6, MemOperand(a0, offsetof(TestFloat, c)));
+ __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, d)));
__ mul_s(f10, f2, f4);
__ mul_d(f12, f6, f8);
- __ swc1(f10, MemOperand(a0, offsetof(TestFloat, resultS)) );
- __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)) );
+ __ Swc1(f10, MemOperand(a0, offsetof(TestFloat, resultS)));
+ __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD)));
__ jr(ra);
__ nop();
@@ -2957,12 +2954,12 @@ TEST(mov) {
5.3, -5.3, 5.3, -2.9
};
- __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)) );
- __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, c)) );
+ __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Lwc1(f6, MemOperand(a0, offsetof(TestFloat, c)));
__ mov_s(f8, f6);
__ mov_d(f10, f4);
- __ swc1(f8, MemOperand(a0, offsetof(TestFloat, d)) );
- __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, b)) );
+ __ Swc1(f8, MemOperand(a0, offsetof(TestFloat, d)));
+ __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, b)));
__ jr(ra);
__ nop();
@@ -3023,13 +3020,13 @@ TEST(floor_w) {
kFPUInvalidResult};
__ cfc1(t1, FCSR);
- __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
- __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
- __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
+ __ Sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
+ __ Lwc1(f6, MemOperand(a0, offsetof(Test, b)));
__ floor_w_d(f8, f4);
__ floor_w_s(f10, f6);
- __ swc1(f8, MemOperand(a0, offsetof(Test, c)) );
- __ swc1(f10, MemOperand(a0, offsetof(Test, d)) );
+ __ Swc1(f8, MemOperand(a0, offsetof(Test, c)));
+ __ Swc1(f10, MemOperand(a0, offsetof(Test, d)));
__ jr(ra);
__ nop();
Test test;
@@ -3094,13 +3091,13 @@ TEST(floor_l) {
dFPU64InvalidResult};
__ cfc1(t1, FCSR);
- __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
- __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
- __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
+ __ Sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
+ __ Lwc1(f6, MemOperand(a0, offsetof(Test, b)));
__ floor_l_d(f8, f4);
__ floor_l_s(f10, f6);
- __ sdc1(f8, MemOperand(a0, offsetof(Test, c)) );
- __ sdc1(f10, MemOperand(a0, offsetof(Test, d)) );
+ __ Sdc1(f8, MemOperand(a0, offsetof(Test, c)));
+ __ Sdc1(f10, MemOperand(a0, offsetof(Test, d)));
__ jr(ra);
__ nop();
Test test;
@@ -3165,13 +3162,13 @@ TEST(ceil_w) {
kFPUInvalidResult};
__ cfc1(t1, FCSR);
- __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
- __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
- __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
+ __ Sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
+ __ Lwc1(f6, MemOperand(a0, offsetof(Test, b)));
__ ceil_w_d(f8, f4);
__ ceil_w_s(f10, f6);
- __ swc1(f8, MemOperand(a0, offsetof(Test, c)) );
- __ swc1(f10, MemOperand(a0, offsetof(Test, d)) );
+ __ Swc1(f8, MemOperand(a0, offsetof(Test, c)));
+ __ Swc1(f10, MemOperand(a0, offsetof(Test, d)));
__ jr(ra);
__ nop();
Test test;
@@ -3236,13 +3233,13 @@ TEST(ceil_l) {
dFPU64InvalidResult};
__ cfc1(t1, FCSR);
- __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
- __ ldc1(f4, MemOperand(a0, offsetof(Test, a)) );
- __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) );
+ __ Sw(t1, MemOperand(a0, offsetof(Test, isNaN2008)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(Test, a)));
+ __ Lwc1(f6, MemOperand(a0, offsetof(Test, b)));
__ ceil_l_d(f8, f4);
__ ceil_l_s(f10, f6);
- __ sdc1(f8, MemOperand(a0, offsetof(Test, c)) );
- __ sdc1(f10, MemOperand(a0, offsetof(Test, d)) );
+ __ Sdc1(f8, MemOperand(a0, offsetof(Test, c)));
+ __ Sdc1(f10, MemOperand(a0, offsetof(Test, d)));
__ jr(ra);
__ nop();
Test test;
@@ -3271,7 +3268,7 @@ TEST(jump_tables1) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, nullptr, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
const int kNumCases = 512;
int values[kNumCases];
@@ -3279,7 +3276,7 @@ TEST(jump_tables1) {
Label labels[kNumCases];
__ daddiu(sp, sp, -8);
- __ sd(ra, MemOperand(sp));
+ __ Sd(ra, MemOperand(sp));
__ Align(8);
Label done;
@@ -3293,7 +3290,7 @@ TEST(jump_tables1) {
__ dsll(at, a0, 3); // In delay slot.
__ bind(&here);
__ daddu(at, at, ra);
- __ ld(at, MemOperand(at, 4 * Assembler::kInstrSize));
+ __ Ld(at, MemOperand(at, 4 * Assembler::kInstrSize));
__ jr(at);
__ nop();
for (int i = 0; i < kNumCases; ++i) {
@@ -3310,7 +3307,7 @@ TEST(jump_tables1) {
}
__ bind(&done);
- __ ld(ra, MemOperand(sp));
+ __ Ld(ra, MemOperand(sp));
__ daddiu(sp, sp, 8);
__ jr(ra);
__ nop();
@@ -3339,7 +3336,7 @@ TEST(jump_tables2) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, nullptr, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
const int kNumCases = 512;
int values[kNumCases];
@@ -3347,7 +3344,7 @@ TEST(jump_tables2) {
Label labels[kNumCases];
__ daddiu(sp, sp, -8);
- __ sd(ra, MemOperand(sp));
+ __ Sd(ra, MemOperand(sp));
Label done, dispatch;
__ b(&dispatch);
@@ -3373,7 +3370,7 @@ TEST(jump_tables2) {
__ dsll(at, a0, 3); // In delay slot.
__ bind(&here);
__ daddu(at, at, ra);
- __ ld(at, MemOperand(at, 4 * Assembler::kInstrSize));
+ __ Ld(at, MemOperand(at, 4 * Assembler::kInstrSize));
__ jr(at);
__ nop();
for (int i = 0; i < kNumCases; ++i) {
@@ -3382,7 +3379,7 @@ TEST(jump_tables2) {
}
__ bind(&done);
- __ ld(ra, MemOperand(sp));
+ __ Ld(ra, MemOperand(sp));
__ daddiu(sp, sp, 8);
__ jr(ra);
__ nop();
@@ -3409,7 +3406,7 @@ TEST(jump_tables3) {
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope scope(isolate);
- Assembler assm(isolate, nullptr, 0);
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
const int kNumCases = 512;
Handle<Object> values[kNumCases];
@@ -3422,7 +3419,7 @@ TEST(jump_tables3) {
int64_t imm64;
__ daddiu(sp, sp, -8);
- __ sd(ra, MemOperand(sp));
+ __ Sd(ra, MemOperand(sp));
Label done, dispatch;
__ b(&dispatch);
@@ -3453,7 +3450,7 @@ TEST(jump_tables3) {
__ dsll(at, a0, 3); // In delay slot.
__ bind(&here);
__ daddu(at, at, ra);
- __ ld(at, MemOperand(at, 4 * Assembler::kInstrSize));
+ __ Ld(at, MemOperand(at, 4 * Assembler::kInstrSize));
__ jr(at);
__ nop();
for (int i = 0; i < kNumCases; ++i) {
@@ -3462,7 +3459,7 @@ TEST(jump_tables3) {
}
__ bind(&done);
- __ ld(ra, MemOperand(sp));
+ __ Ld(ra, MemOperand(sp));
__ daddiu(sp, sp, 8);
__ jr(ra);
__ nop();
@@ -3505,37 +3502,38 @@ TEST(BITSWAP) {
} T;
T t;
- Assembler assm(isolate, NULL, 0);
+ MacroAssembler assm(isolate, NULL, 0,
+ v8::internal::CodeObjectRequired::kYes);
- __ ld(a4, MemOperand(a0, offsetof(T, r1)));
+ __ Ld(a4, MemOperand(a0, offsetof(T, r1)));
__ nop();
__ bitswap(a6, a4);
- __ sd(a6, MemOperand(a0, offsetof(T, r1)));
+ __ Sd(a6, MemOperand(a0, offsetof(T, r1)));
- __ ld(a4, MemOperand(a0, offsetof(T, r2)));
+ __ Ld(a4, MemOperand(a0, offsetof(T, r2)));
__ nop();
__ bitswap(a6, a4);
- __ sd(a6, MemOperand(a0, offsetof(T, r2)));
+ __ Sd(a6, MemOperand(a0, offsetof(T, r2)));
- __ ld(a4, MemOperand(a0, offsetof(T, r3)));
+ __ Ld(a4, MemOperand(a0, offsetof(T, r3)));
__ nop();
__ bitswap(a6, a4);
- __ sd(a6, MemOperand(a0, offsetof(T, r3)));
+ __ Sd(a6, MemOperand(a0, offsetof(T, r3)));
- __ ld(a4, MemOperand(a0, offsetof(T, r4)));
+ __ Ld(a4, MemOperand(a0, offsetof(T, r4)));
__ nop();
__ bitswap(a6, a4);
- __ sd(a6, MemOperand(a0, offsetof(T, r4)));
+ __ Sd(a6, MemOperand(a0, offsetof(T, r4)));
- __ ld(a4, MemOperand(a0, offsetof(T, r5)));
+ __ Ld(a4, MemOperand(a0, offsetof(T, r5)));
__ nop();
__ dbitswap(a6, a4);
- __ sd(a6, MemOperand(a0, offsetof(T, r5)));
+ __ Sd(a6, MemOperand(a0, offsetof(T, r5)));
- __ ld(a4, MemOperand(a0, offsetof(T, r6)));
+ __ Ld(a4, MemOperand(a0, offsetof(T, r6)));
__ nop();
__ dbitswap(a6, a4);
- __ sd(a6, MemOperand(a0, offsetof(T, r6)));
+ __ Sd(a6, MemOperand(a0, offsetof(T, r6)));
__ jr(ra);
__ nop();
@@ -3599,86 +3597,86 @@ TEST(class_fmt) {
MacroAssembler assm(isolate, NULL, 0,
v8::internal::CodeObjectRequired::kYes);
- __ ldc1(f4, MemOperand(a0, offsetof(T, dSignalingNan)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(T, dSignalingNan)));
__ class_d(f6, f4);
- __ sdc1(f6, MemOperand(a0, offsetof(T, dSignalingNan)));
+ __ Sdc1(f6, MemOperand(a0, offsetof(T, dSignalingNan)));
- __ ldc1(f4, MemOperand(a0, offsetof(T, dQuietNan)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(T, dQuietNan)));
__ class_d(f6, f4);
- __ sdc1(f6, MemOperand(a0, offsetof(T, dQuietNan)));
+ __ Sdc1(f6, MemOperand(a0, offsetof(T, dQuietNan)));
- __ ldc1(f4, MemOperand(a0, offsetof(T, dNegInf)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(T, dNegInf)));
__ class_d(f6, f4);
- __ sdc1(f6, MemOperand(a0, offsetof(T, dNegInf)));
+ __ Sdc1(f6, MemOperand(a0, offsetof(T, dNegInf)));
- __ ldc1(f4, MemOperand(a0, offsetof(T, dNegNorm)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(T, dNegNorm)));
__ class_d(f6, f4);
- __ sdc1(f6, MemOperand(a0, offsetof(T, dNegNorm)));
+ __ Sdc1(f6, MemOperand(a0, offsetof(T, dNegNorm)));
- __ ldc1(f4, MemOperand(a0, offsetof(T, dNegSubnorm)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(T, dNegSubnorm)));
__ class_d(f6, f4);
- __ sdc1(f6, MemOperand(a0, offsetof(T, dNegSubnorm)));
+ __ Sdc1(f6, MemOperand(a0, offsetof(T, dNegSubnorm)));
- __ ldc1(f4, MemOperand(a0, offsetof(T, dNegZero)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(T, dNegZero)));
__ class_d(f6, f4);
- __ sdc1(f6, MemOperand(a0, offsetof(T, dNegZero)));
+ __ Sdc1(f6, MemOperand(a0, offsetof(T, dNegZero)));
- __ ldc1(f4, MemOperand(a0, offsetof(T, dPosInf)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(T, dPosInf)));
__ class_d(f6, f4);
- __ sdc1(f6, MemOperand(a0, offsetof(T, dPosInf)));
+ __ Sdc1(f6, MemOperand(a0, offsetof(T, dPosInf)));
- __ ldc1(f4, MemOperand(a0, offsetof(T, dPosNorm)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(T, dPosNorm)));
__ class_d(f6, f4);
- __ sdc1(f6, MemOperand(a0, offsetof(T, dPosNorm)));
+ __ Sdc1(f6, MemOperand(a0, offsetof(T, dPosNorm)));
- __ ldc1(f4, MemOperand(a0, offsetof(T, dPosSubnorm)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(T, dPosSubnorm)));
__ class_d(f6, f4);
- __ sdc1(f6, MemOperand(a0, offsetof(T, dPosSubnorm)));
+ __ Sdc1(f6, MemOperand(a0, offsetof(T, dPosSubnorm)));
- __ ldc1(f4, MemOperand(a0, offsetof(T, dPosZero)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(T, dPosZero)));
__ class_d(f6, f4);
- __ sdc1(f6, MemOperand(a0, offsetof(T, dPosZero)));
+ __ Sdc1(f6, MemOperand(a0, offsetof(T, dPosZero)));
// Testing instruction CLASS.S
- __ lwc1(f4, MemOperand(a0, offsetof(T, fSignalingNan)));
+ __ Lwc1(f4, MemOperand(a0, offsetof(T, fSignalingNan)));
__ class_s(f6, f4);
- __ swc1(f6, MemOperand(a0, offsetof(T, fSignalingNan)));
+ __ Swc1(f6, MemOperand(a0, offsetof(T, fSignalingNan)));
- __ lwc1(f4, MemOperand(a0, offsetof(T, fQuietNan)));
+ __ Lwc1(f4, MemOperand(a0, offsetof(T, fQuietNan)));
__ class_s(f6, f4);
- __ swc1(f6, MemOperand(a0, offsetof(T, fQuietNan)));
+ __ Swc1(f6, MemOperand(a0, offsetof(T, fQuietNan)));
- __ lwc1(f4, MemOperand(a0, offsetof(T, fNegInf)));
+ __ Lwc1(f4, MemOperand(a0, offsetof(T, fNegInf)));
__ class_s(f6, f4);
- __ swc1(f6, MemOperand(a0, offsetof(T, fNegInf)));
+ __ Swc1(f6, MemOperand(a0, offsetof(T, fNegInf)));
- __ lwc1(f4, MemOperand(a0, offsetof(T, fNegNorm)));
+ __ Lwc1(f4, MemOperand(a0, offsetof(T, fNegNorm)));
__ class_s(f6, f4);
- __ swc1(f6, MemOperand(a0, offsetof(T, fNegNorm)));
+ __ Swc1(f6, MemOperand(a0, offsetof(T, fNegNorm)));
- __ lwc1(f4, MemOperand(a0, offsetof(T, fNegSubnorm)));
+ __ Lwc1(f4, MemOperand(a0, offsetof(T, fNegSubnorm)));
__ class_s(f6, f4);
- __ swc1(f6, MemOperand(a0, offsetof(T, fNegSubnorm)));
+ __ Swc1(f6, MemOperand(a0, offsetof(T, fNegSubnorm)));
- __ lwc1(f4, MemOperand(a0, offsetof(T, fNegZero)));
+ __ Lwc1(f4, MemOperand(a0, offsetof(T, fNegZero)));
__ class_s(f6, f4);
- __ swc1(f6, MemOperand(a0, offsetof(T, fNegZero)));
+ __ Swc1(f6, MemOperand(a0, offsetof(T, fNegZero)));
- __ lwc1(f4, MemOperand(a0, offsetof(T, fPosInf)));
+ __ Lwc1(f4, MemOperand(a0, offsetof(T, fPosInf)));
__ class_s(f6, f4);
- __ swc1(f6, MemOperand(a0, offsetof(T, fPosInf)));
+ __ Swc1(f6, MemOperand(a0, offsetof(T, fPosInf)));
- __ lwc1(f4, MemOperand(a0, offsetof(T, fPosNorm)));
+ __ Lwc1(f4, MemOperand(a0, offsetof(T, fPosNorm)));
__ class_s(f6, f4);
- __ swc1(f6, MemOperand(a0, offsetof(T, fPosNorm)));
+ __ Swc1(f6, MemOperand(a0, offsetof(T, fPosNorm)));
- __ lwc1(f4, MemOperand(a0, offsetof(T, fPosSubnorm)));
+ __ Lwc1(f4, MemOperand(a0, offsetof(T, fPosSubnorm)));
__ class_s(f6, f4);
- __ swc1(f6, MemOperand(a0, offsetof(T, fPosSubnorm)));
+ __ Swc1(f6, MemOperand(a0, offsetof(T, fPosSubnorm)));
- __ lwc1(f4, MemOperand(a0, offsetof(T, fPosZero)));
+ __ Lwc1(f4, MemOperand(a0, offsetof(T, fPosZero)));
__ class_s(f6, f4);
- __ swc1(f6, MemOperand(a0, offsetof(T, fPosZero)));
+ __ Swc1(f6, MemOperand(a0, offsetof(T, fPosZero)));
__ jr(ra);
__ nop();
@@ -3759,17 +3757,17 @@ TEST(ABS) {
// Save FIR.
__ cfc1(a1, FCSR);
- __ sd(a1, MemOperand(a0, offsetof(TestFloat, fcsr)));
+ __ Sd(a1, MemOperand(a0, offsetof(TestFloat, fcsr)));
// Disable FPU exceptions.
__ ctc1(zero_reg, FCSR);
- __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
__ abs_d(f10, f4);
- __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, a)));
- __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, b)));
+ __ Lwc1(f4, MemOperand(a0, offsetof(TestFloat, b)));
__ abs_s(f10, f4);
- __ swc1(f10, MemOperand(a0, offsetof(TestFloat, b)));
+ __ Swc1(f10, MemOperand(a0, offsetof(TestFloat, b)));
// Restore FCSR.
__ ctc1(a1, FCSR);
@@ -3857,15 +3855,15 @@ TEST(ADD_FMT) {
TestFloat test;
- __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
- __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
__ add_d(f10, f8, f4);
- __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, c)));
+ __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, c)));
- __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, fa)));
- __ lwc1(f8, MemOperand(a0, offsetof(TestFloat, fb)));
+ __ Lwc1(f4, MemOperand(a0, offsetof(TestFloat, fa)));
+ __ Lwc1(f8, MemOperand(a0, offsetof(TestFloat, fb)));
__ add_s(f10, f8, f4);
- __ swc1(f10, MemOperand(a0, offsetof(TestFloat, fc)));
+ __ Swc1(f10, MemOperand(a0, offsetof(TestFloat, fc)));
__ jr(ra);
__ nop();
@@ -3944,11 +3942,11 @@ TEST(C_COND_FMT) {
__ li(t1, 1);
- __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, dOp1)));
- __ ldc1(f6, MemOperand(a0, offsetof(TestFloat, dOp2)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, dOp1)));
+ __ Ldc1(f6, MemOperand(a0, offsetof(TestFloat, dOp2)));
- __ lwc1(f14, MemOperand(a0, offsetof(TestFloat, fOp1)));
- __ lwc1(f16, MemOperand(a0, offsetof(TestFloat, fOp2)));
+ __ Lwc1(f14, MemOperand(a0, offsetof(TestFloat, fOp1)));
+ __ Lwc1(f16, MemOperand(a0, offsetof(TestFloat, fOp2)));
__ mov(t2, zero_reg);
__ mov(t3, zero_reg);
@@ -3956,8 +3954,8 @@ TEST(C_COND_FMT) {
__ c_s(F, f14, f16, 2);
__ movt(t2, t1, 0);
__ movt(t3, t1, 2);
- __ sw(t2, MemOperand(a0, offsetof(TestFloat, dF)) );
- __ sw(t3, MemOperand(a0, offsetof(TestFloat, fF)) );
+ __ Sw(t2, MemOperand(a0, offsetof(TestFloat, dF)));
+ __ Sw(t3, MemOperand(a0, offsetof(TestFloat, fF)));
__ mov(t2, zero_reg);
__ mov(t3, zero_reg);
@@ -3965,8 +3963,8 @@ TEST(C_COND_FMT) {
__ c_s(UN, f14, f16, 4);
__ movt(t2, t1, 2);
__ movt(t3, t1, 4);
- __ sw(t2, MemOperand(a0, offsetof(TestFloat, dUn)) );
- __ sw(t3, MemOperand(a0, offsetof(TestFloat, fUn)) );
+ __ Sw(t2, MemOperand(a0, offsetof(TestFloat, dUn)));
+ __ Sw(t3, MemOperand(a0, offsetof(TestFloat, fUn)));
__ mov(t2, zero_reg);
__ mov(t3, zero_reg);
@@ -3974,8 +3972,8 @@ TEST(C_COND_FMT) {
__ c_s(EQ, f14, f16, 6);
__ movt(t2, t1, 4);
__ movt(t3, t1, 6);
- __ sw(t2, MemOperand(a0, offsetof(TestFloat, dEq)) );
- __ sw(t3, MemOperand(a0, offsetof(TestFloat, fEq)) );
+ __ Sw(t2, MemOperand(a0, offsetof(TestFloat, dEq)));
+ __ Sw(t3, MemOperand(a0, offsetof(TestFloat, fEq)));
__ mov(t2, zero_reg);
__ mov(t3, zero_reg);
@@ -3983,8 +3981,8 @@ TEST(C_COND_FMT) {
__ c_s(UEQ, f14, f16, 0);
__ movt(t2, t1, 6);
__ movt(t3, t1, 0);
- __ sw(t2, MemOperand(a0, offsetof(TestFloat, dUeq)) );
- __ sw(t3, MemOperand(a0, offsetof(TestFloat, fUeq)) );
+ __ Sw(t2, MemOperand(a0, offsetof(TestFloat, dUeq)));
+ __ Sw(t3, MemOperand(a0, offsetof(TestFloat, fUeq)));
__ mov(t2, zero_reg);
__ mov(t3, zero_reg);
@@ -3992,8 +3990,8 @@ TEST(C_COND_FMT) {
__ c_s(OLT, f14, f16, 2);
__ movt(t2, t1, 0);
__ movt(t3, t1, 2);
- __ sw(t2, MemOperand(a0, offsetof(TestFloat, dOlt)) );
- __ sw(t3, MemOperand(a0, offsetof(TestFloat, fOlt)) );
+ __ Sw(t2, MemOperand(a0, offsetof(TestFloat, dOlt)));
+ __ Sw(t3, MemOperand(a0, offsetof(TestFloat, fOlt)));
__ mov(t2, zero_reg);
__ mov(t3, zero_reg);
@@ -4001,8 +3999,8 @@ TEST(C_COND_FMT) {
__ c_s(ULT, f14, f16, 4);
__ movt(t2, t1, 2);
__ movt(t3, t1, 4);
- __ sw(t2, MemOperand(a0, offsetof(TestFloat, dUlt)) );
- __ sw(t3, MemOperand(a0, offsetof(TestFloat, fUlt)) );
+ __ Sw(t2, MemOperand(a0, offsetof(TestFloat, dUlt)));
+ __ Sw(t3, MemOperand(a0, offsetof(TestFloat, fUlt)));
__ mov(t2, zero_reg);
__ mov(t3, zero_reg);
@@ -4010,8 +4008,8 @@ TEST(C_COND_FMT) {
__ c_s(OLE, f14, f16, 6);
__ movt(t2, t1, 4);
__ movt(t3, t1, 6);
- __ sw(t2, MemOperand(a0, offsetof(TestFloat, dOle)) );
- __ sw(t3, MemOperand(a0, offsetof(TestFloat, fOle)) );
+ __ Sw(t2, MemOperand(a0, offsetof(TestFloat, dOle)));
+ __ Sw(t3, MemOperand(a0, offsetof(TestFloat, fOle)));
__ mov(t2, zero_reg);
__ mov(t3, zero_reg);
@@ -4019,8 +4017,8 @@ TEST(C_COND_FMT) {
__ c_s(ULE, f14, f16, 0);
__ movt(t2, t1, 6);
__ movt(t3, t1, 0);
- __ sw(t2, MemOperand(a0, offsetof(TestFloat, dUle)) );
- __ sw(t3, MemOperand(a0, offsetof(TestFloat, fUle)) );
+ __ Sw(t2, MemOperand(a0, offsetof(TestFloat, dUle)));
+ __ Sw(t3, MemOperand(a0, offsetof(TestFloat, fUle)));
__ jr(ra);
__ nop();
@@ -4162,66 +4160,66 @@ TEST(CMP_COND_FMT) {
__ li(t1, 1);
- __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, dOp1)));
- __ ldc1(f6, MemOperand(a0, offsetof(TestFloat, dOp2)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, dOp1)));
+ __ Ldc1(f6, MemOperand(a0, offsetof(TestFloat, dOp2)));
- __ lwc1(f14, MemOperand(a0, offsetof(TestFloat, fOp1)));
- __ lwc1(f16, MemOperand(a0, offsetof(TestFloat, fOp2)));
+ __ Lwc1(f14, MemOperand(a0, offsetof(TestFloat, fOp1)));
+ __ Lwc1(f16, MemOperand(a0, offsetof(TestFloat, fOp2)));
__ cmp_d(F, f2, f4, f6);
__ cmp_s(F, f12, f14, f16);
- __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dF)) );
- __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fF)) );
+ __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dF)));
+ __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fF)));
__ cmp_d(UN, f2, f4, f6);
__ cmp_s(UN, f12, f14, f16);
- __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUn)) );
- __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUn)) );
+ __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUn)));
+ __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fUn)));
__ cmp_d(EQ, f2, f4, f6);
__ cmp_s(EQ, f12, f14, f16);
- __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dEq)) );
- __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fEq)) );
+ __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dEq)));
+ __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fEq)));
__ cmp_d(UEQ, f2, f4, f6);
__ cmp_s(UEQ, f12, f14, f16);
- __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUeq)) );
- __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUeq)) );
+ __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUeq)));
+ __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fUeq)));
__ cmp_d(LT, f2, f4, f6);
__ cmp_s(LT, f12, f14, f16);
- __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOlt)) );
- __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fOlt)) );
+ __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOlt)));
+ __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fOlt)));
__ cmp_d(ULT, f2, f4, f6);
__ cmp_s(ULT, f12, f14, f16);
- __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUlt)) );
- __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUlt)) );
+ __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUlt)));
+ __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fUlt)));
__ cmp_d(LE, f2, f4, f6);
__ cmp_s(LE, f12, f14, f16);
- __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOle)) );
- __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fOle)) );
+ __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOle)));
+ __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fOle)));
__ cmp_d(ULE, f2, f4, f6);
__ cmp_s(ULE, f12, f14, f16);
- __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUle)) );
- __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUle)) );
+ __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUle)));
+ __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fUle)));
__ cmp_d(ORD, f2, f4, f6);
__ cmp_s(ORD, f12, f14, f16);
- __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOr)) );
- __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fOr)) );
+ __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOr)));
+ __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fOr)));
__ cmp_d(UNE, f2, f4, f6);
__ cmp_s(UNE, f12, f14, f16);
- __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUne)) );
- __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUne)) );
+ __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUne)));
+ __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fUne)));
__ cmp_d(NE, f2, f4, f6);
__ cmp_s(NE, f12, f14, f16);
- __ sdc1(f2, MemOperand(a0, offsetof(TestFloat, dNe)) );
- __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fNe)) );
+ __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dNe)));
+ __ Swc1(f12, MemOperand(a0, offsetof(TestFloat, fNe)));
__ jr(ra);
__ nop();
@@ -4558,19 +4556,19 @@ TEST(DIV_FMT) {
// Disable FPU exceptions.
__ ctc1(zero_reg, FCSR);
- __ ldc1(f4, MemOperand(a0, offsetof(Test, dOp1)) );
- __ ldc1(f2, MemOperand(a0, offsetof(Test, dOp2)) );
+ __ Ldc1(f4, MemOperand(a0, offsetof(Test, dOp1)));
+ __ Ldc1(f2, MemOperand(a0, offsetof(Test, dOp2)));
__ nop();
__ div_d(f6, f4, f2);
- __ sdc1(f6, MemOperand(a0, offsetof(Test, dRes)) );
+ __ Sdc1(f6, MemOperand(a0, offsetof(Test, dRes)));
- __ lwc1(f4, MemOperand(a0, offsetof(Test, fOp1)) );
- __ lwc1(f2, MemOperand(a0, offsetof(Test, fOp2)) );
+ __ Lwc1(f4, MemOperand(a0, offsetof(Test, fOp1)));
+ __ Lwc1(f2, MemOperand(a0, offsetof(Test, fOp2)));
__ nop();
__ div_s(f6, f4, f2);
- __ swc1(f6, MemOperand(a0, offsetof(Test, fRes)) );
+ __ Swc1(f6, MemOperand(a0, offsetof(Test, fRes)));
- // Restore FCSR.
+ // Restore FCSR.
__ ctc1(a1, FCSR);
__ jr(ra);
@@ -5971,15 +5969,15 @@ void helper_madd_msub_maddf_msubf(F func) {
};
if (std::is_same<T, float>::value) {
- __ lwc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
- __ lwc1(f6, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fs)));
- __ lwc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, ft)));
- __ lwc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
+ __ Lwc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
+ __ Lwc1(f6, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fs)));
+ __ Lwc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, ft)));
+ __ Lwc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
} else if (std::is_same<T, double>::value) {
- __ ldc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
- __ ldc1(f6, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fs)));
- __ ldc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, ft)));
- __ ldc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
+ __ Ldc1(f6, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fs)));
+ __ Ldc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, ft)));
+ __ Ldc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fr)));
} else {
UNREACHABLE();
}
@@ -6023,9 +6021,9 @@ TEST(madd_msub_s) {
if (kArchVariant == kMips64r6) return;
helper_madd_msub_maddf_msubf<float>([](MacroAssembler& assm) {
__ madd_s(f10, f4, f6, f8);
- __ swc1(f10, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_add)));
+ __ Swc1(f10, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_add)));
__ msub_s(f16, f4, f6, f8);
- __ swc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_sub)));
+ __ Swc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_sub)));
});
}
@@ -6033,9 +6031,9 @@ TEST(madd_msub_d) {
if (kArchVariant == kMips64r6) return;
helper_madd_msub_maddf_msubf<double>([](MacroAssembler& assm) {
__ madd_d(f10, f4, f6, f8);
- __ sdc1(f10, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_add)));
+ __ Sdc1(f10, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_add)));
__ msub_d(f16, f4, f6, f8);
- __ sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_sub)));
+ __ Sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_sub)));
});
}
@@ -6043,9 +6041,9 @@ TEST(maddf_msubf_s) {
if (kArchVariant != kMips64r6) return;
helper_madd_msub_maddf_msubf<float>([](MacroAssembler& assm) {
__ maddf_s(f4, f6, f8);
- __ swc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_add)));
+ __ Swc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_add)));
__ msubf_s(f16, f6, f8);
- __ swc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_sub)));
+ __ Swc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_sub)));
});
}
@@ -6053,10 +6051,69 @@ TEST(maddf_msubf_d) {
if (kArchVariant != kMips64r6) return;
helper_madd_msub_maddf_msubf<double>([](MacroAssembler& assm) {
__ maddf_d(f4, f6, f8);
- __ sdc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_add)));
+ __ Sdc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_add)));
__ msubf_d(f16, f6, f8);
- __ sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_sub)));
+ __ Sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_sub)));
});
}
+uint64_t run_Dins(uint64_t imm, uint64_t source, uint16_t pos, uint16_t size) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, NULL, 0, v8::internal::CodeObjectRequired::kYes);
+
+ __ li(v0, imm);
+ __ li(t0, source);
+ __ Dins(v0, t0, pos, size);
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Handle<Code> code = isolate->factory()->NewCode(
+ desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+ F2 f = FUNCTION_CAST<F2>(code->entry());
+
+ uint64_t res = reinterpret_cast<uint64_t>(
+ CALL_GENERATED_CODE(isolate, f, 0, 0, 0, 0, 0));
+
+ return res;
+}
+
+TEST(Dins) {
+ CcTest::InitializeVM();
+
+ // Test Dins macro-instruction.
+
+ struct TestCaseDins {
+ uint64_t imm;
+ uint64_t source;
+ uint16_t pos;
+ uint16_t size;
+ uint64_t expected_res;
+ };
+
+ // We load imm to v0 and source to t0 and then call
+ // Dins(v0, t0, pos, size) to test cases listed below.
+ struct TestCaseDins tc[] = {
+ // imm, source, pos, size, expected_res
+ {0x5555555555555555, 0x1ABCDEF01, 31, 1, 0x55555555D5555555},
+ {0x5555555555555555, 0x1ABCDEF02, 30, 2, 0x5555555595555555},
+ {0x201234567, 0x1FABCDEFF, 0, 32, 0x2FABCDEFF},
+ {0x201234567, 0x7FABCDEFF, 31, 2, 0x381234567},
+ {0x800000000, 0x7FABCDEFF, 0, 33, 0x9FABCDEFF},
+ {0x1234, 0xABCDABCDABCDABCD, 0, 64, 0xABCDABCDABCDABCD},
+ {0xABCD, 0xABCEABCF, 32, 1, 0x10000ABCD},
+ {0xABCD, 0xABCEABCF, 63, 1, 0x800000000000ABCD},
+ {0xABCD, 0xABC1ABC2ABC3ABC4, 32, 32, 0xABC3ABC40000ABCD},
+ };
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseDins);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ CHECK_EQ(tc[i].expected_res,
+ run_Dins(tc[i].imm, tc[i].source, tc[i].pos, tc[i].size));
+ }
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-ast.cc b/deps/v8/test/cctest/test-ast.cc
index c027e88a52..75496e9f93 100644
--- a/deps/v8/test/cctest/test-ast.cc
+++ b/deps/v8/test/cctest/test-ast.cc
@@ -48,7 +48,7 @@ TEST(List) {
Zone zone(&allocator, ZONE_NAME);
AstValueFactory value_factory(&zone, isolate->ast_string_constants(),
isolate->heap()->HashSeed());
- AstNodeFactory factory(&value_factory);
+ AstNodeFactory factory(&value_factory, &zone);
AstNode* node = factory.NewEmptyStatement(kNoSourcePosition);
list->Add(node);
CHECK_EQ(1, list->length());
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index b8f1ccc19e..ca01f24b93 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -28,6 +28,45 @@ using compiler::CodeAssemblerVariableList;
namespace {
+int sum9(int a0, int a1, int a2, int a3, int a4, int a5, int a6, int a7,
+ int a8) {
+ return a0 + a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8;
+}
+
+} // namespace
+
+TEST(CallCFunction9) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+
+ const int kNumParams = 0;
+ CodeAssemblerTester data(isolate, kNumParams);
+ CodeStubAssembler m(data.state());
+
+ {
+ Node* const fun_constant = m.ExternalConstant(
+ ExternalReference(reinterpret_cast<Address>(sum9), isolate));
+
+ MachineType type_intptr = MachineType::IntPtr();
+
+ Node* const result = m.CallCFunction9(
+ type_intptr, type_intptr, type_intptr, type_intptr, type_intptr,
+ type_intptr, type_intptr, type_intptr, type_intptr, type_intptr,
+ fun_constant, m.IntPtrConstant(0), m.IntPtrConstant(1),
+ m.IntPtrConstant(2), m.IntPtrConstant(3), m.IntPtrConstant(4),
+ m.IntPtrConstant(5), m.IntPtrConstant(6), m.IntPtrConstant(7),
+ m.IntPtrConstant(8));
+ m.Return(m.SmiTag(result));
+ }
+
+ Handle<Code> code = data.GenerateCode();
+ FunctionTester ft(code, kNumParams);
+
+ Handle<Object> result = ft.Call().ToHandleChecked();
+ CHECK_EQ(36, Handle<Smi>::cast(result)->value());
+}
+
+namespace {
+
void CheckToUint32Result(uint32_t expected, Handle<Object> result) {
const int64_t result_int64 = NumberToInt64(*result);
const uint32_t result_uint32 = NumberToUint32(*result);
@@ -312,29 +351,29 @@ TEST(TryToName) {
m.TryToName(key, &if_keyisindex, &var_index, &if_keyisunique, &var_unique,
&if_bailout);
- m.Bind(&if_keyisindex);
+ m.BIND(&if_keyisindex);
m.GotoIfNot(m.WordEqual(expected_result,
m.SmiConstant(Smi::FromInt(kKeyIsIndex))),
&failed);
m.Branch(m.WordEqual(m.SmiUntag(expected_arg), var_index.value()),
&passed, &failed);
- m.Bind(&if_keyisunique);
+ m.BIND(&if_keyisunique);
m.GotoIfNot(m.WordEqual(expected_result,
m.SmiConstant(Smi::FromInt(kKeyIsUnique))),
&failed);
m.Branch(m.WordEqual(expected_arg, var_unique.value()), &passed, &failed);
}
- m.Bind(&if_bailout);
+ m.BIND(&if_bailout);
m.Branch(
m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kBailout))),
&passed, &failed);
- m.Bind(&passed);
+ m.BIND(&passed);
m.Return(m.BooleanConstant(true));
- m.Bind(&failed);
+ m.BIND(&failed);
m.Return(m.BooleanConstant(false));
}
@@ -496,22 +535,22 @@ void TestNameDictionaryLookup() {
m.NameDictionaryLookup<Dictionary>(dictionary, unique_name, &if_found,
&var_name_index, &if_not_found);
- m.Bind(&if_found);
+ m.BIND(&if_found);
m.GotoIfNot(
m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kFound))),
&failed);
m.Branch(m.WordEqual(m.SmiUntag(expected_arg), var_name_index.value()),
&passed, &failed);
- m.Bind(&if_not_found);
+ m.BIND(&if_not_found);
m.Branch(
m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kNotFound))),
&passed, &failed);
- m.Bind(&passed);
+ m.BIND(&passed);
m.Return(m.BooleanConstant(true));
- m.Bind(&failed);
+ m.BIND(&failed);
m.Return(m.BooleanConstant(false));
}
@@ -603,22 +642,22 @@ void TestNumberDictionaryLookup() {
m.NumberDictionaryLookup<Dictionary>(dictionary, key, &if_found, &var_entry,
&if_not_found);
- m.Bind(&if_found);
+ m.BIND(&if_found);
m.GotoIfNot(
m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kFound))),
&failed);
m.Branch(m.WordEqual(m.SmiUntag(expected_arg), var_entry.value()), &passed,
&failed);
- m.Bind(&if_not_found);
+ m.BIND(&if_not_found);
m.Branch(
m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kNotFound))),
&passed, &failed);
- m.Bind(&passed);
+ m.BIND(&passed);
m.Return(m.BooleanConstant(true));
- m.Bind(&failed);
+ m.BIND(&failed);
m.Return(m.BooleanConstant(false));
}
@@ -743,24 +782,24 @@ TEST(TryHasOwnProperty) {
m.TryHasOwnProperty(object, map, instance_type, unique_name, &if_found,
&if_not_found, &if_bailout);
- m.Bind(&if_found);
+ m.BIND(&if_found);
m.Branch(m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kFound))),
&passed, &failed);
- m.Bind(&if_not_found);
+ m.BIND(&if_not_found);
m.Branch(
m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kNotFound))),
&passed, &failed);
- m.Bind(&if_bailout);
+ m.BIND(&if_bailout);
m.Branch(
m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kBailout))),
&passed, &failed);
- m.Bind(&passed);
+ m.BIND(&passed);
m.Return(m.BooleanConstant(true));
- m.Bind(&failed);
+ m.BIND(&failed);
m.Return(m.BooleanConstant(false));
}
@@ -932,13 +971,13 @@ TEST(TryGetOwnProperty) {
unique_name, &if_found, &var_value, &if_not_found,
&if_bailout);
- m.Bind(&if_found);
+ m.BIND(&if_found);
m.Return(var_value.value());
- m.Bind(&if_not_found);
+ m.BIND(&if_not_found);
m.Return(m.HeapConstant(not_found_symbol));
- m.Bind(&if_bailout);
+ m.BIND(&if_bailout);
m.Return(m.HeapConstant(bailout_symbol));
}
@@ -1147,28 +1186,28 @@ TEST(TryLookupElement) {
m.TryLookupElement(object, map, instance_type, index, &if_found, &if_absent,
&if_not_found, &if_bailout);
- m.Bind(&if_found);
+ m.BIND(&if_found);
m.Branch(m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kFound))),
&passed, &failed);
- m.Bind(&if_absent);
+ m.BIND(&if_absent);
m.Branch(m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kAbsent))),
&passed, &failed);
- m.Bind(&if_not_found);
+ m.BIND(&if_not_found);
m.Branch(
m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kNotFound))),
&passed, &failed);
- m.Bind(&if_bailout);
+ m.BIND(&if_bailout);
m.Branch(
m.WordEqual(expected_result, m.SmiConstant(Smi::FromInt(kBailout))),
&passed, &failed);
- m.Bind(&passed);
+ m.BIND(&passed);
m.Return(m.BooleanConstant(true));
- m.Bind(&failed);
+ m.BIND(&failed);
m.Return(m.BooleanConstant(false));
}
@@ -1718,9 +1757,9 @@ TEST(IsDebugActive) {
CodeAssemblerLabel if_active(&m), if_not_active(&m);
m.Branch(m.IsDebugActive(), &if_active, &if_not_active);
- m.Bind(&if_active);
+ m.BIND(&if_active);
m.Return(m.TrueConstant());
- m.Bind(&if_not_active);
+ m.BIND(&if_not_active);
m.Return(m.FalseConstant());
Handle<Code> code = data.GenerateCode();
@@ -1768,12 +1807,11 @@ class AppendJSArrayCodeStubAssembler : public CodeStubAssembler {
Variable arg_index(this, MachineType::PointerRepresentation());
Label bailout(this);
arg_index.Bind(IntPtrConstant(0));
- Node* length = BuildAppendJSArray(
- kind_, HeapConstant(Handle<HeapObject>(isolate->context(), isolate)),
- HeapConstant(array), args, arg_index, &bailout);
+ Node* length = BuildAppendJSArray(kind_, HeapConstant(array), args,
+ arg_index, &bailout);
Return(length);
- Bind(&bailout);
+ BIND(&bailout);
Return(SmiTag(IntPtrAdd(arg_index.value(), IntPtrConstant(2))));
Handle<Code> code = tester->GenerateCode();
@@ -2448,7 +2486,7 @@ TEST(DirectMemoryTest8BitWord32Immediate) {
m.Return(m.SmiConstant(1));
- m.Bind(&bad);
+ m.BIND(&bad);
m.Return(m.SmiConstant(0));
Handle<Code> code = data.GenerateCode();
@@ -2485,7 +2523,7 @@ TEST(DirectMemoryTest16BitWord32Immediate) {
m.Return(m.SmiConstant(1));
- m.Bind(&bad);
+ m.BIND(&bad);
m.Return(m.SmiConstant(0));
Handle<Code> code = data.GenerateCode();
@@ -2534,7 +2572,7 @@ TEST(DirectMemoryTest8BitWord32) {
m.Return(m.SmiConstant(1));
- m.Bind(&bad);
+ m.BIND(&bad);
m.Return(m.SmiConstant(0));
Handle<Code> code = data.GenerateCode();
@@ -2597,7 +2635,7 @@ TEST(DirectMemoryTest16BitWord32) {
m.Return(m.SmiConstant(1));
- m.Bind(&bad);
+ m.BIND(&bad);
m.Return(m.SmiConstant(0));
Handle<Code> code = data.GenerateCode();
diff --git a/deps/v8/test/cctest/test-code-stubs-mips64.cc b/deps/v8/test/cctest/test-code-stubs-mips64.cc
index bef21717ee..45d28f2c19 100644
--- a/deps/v8/test/cctest/test-code-stubs-mips64.cc
+++ b/deps/v8/test/cctest/test-code-stubs-mips64.cc
@@ -75,7 +75,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
}
// Push the double argument.
__ Dsubu(sp, sp, Operand(kDoubleSize));
- __ sdc1(f12, MemOperand(sp));
+ __ Sdc1(f12, MemOperand(sp));
__ Move(source_reg, sp);
// Save registers make sure they don't get clobbered.
@@ -92,11 +92,11 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
// Re-push the double argument.
__ Dsubu(sp, sp, Operand(kDoubleSize));
- __ sdc1(f12, MemOperand(sp));
+ __ Sdc1(f12, MemOperand(sp));
// Call through to the actual stub
if (inline_fastpath) {
- __ ldc1(f12, MemOperand(source_reg));
+ __ Ldc1(f12, MemOperand(source_reg));
__ TryInlineTruncateDoubleToI(destination_reg, f12, &done);
if (destination_reg.is(source_reg) && !source_reg.is(sp)) {
// Restore clobbered source_reg.
@@ -112,7 +112,7 @@ ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
for (--reg_num; reg_num >= 2; --reg_num) {
Register reg = Register::from_code(reg_num);
if (!reg.is(destination_reg)) {
- __ ld(at, MemOperand(sp, 0));
+ __ Ld(at, MemOperand(sp, 0));
__ Assert(eq, kRegisterWasClobbered, reg, Operand(at));
__ Daddu(sp, sp, Operand(kPointerSize));
}
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 2dc5d6b4cb..751e1dbe07 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -289,10 +289,10 @@ TEST(GetScriptLineNumber) {
TEST(FeedbackVectorPreservedAcrossRecompiles) {
- if (i::FLAG_always_opt || !i::FLAG_crankshaft) return;
+ if (i::FLAG_always_opt || !i::FLAG_opt) return;
i::FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
- if (!CcTest::i_isolate()->use_crankshaft()) return;
+ if (!CcTest::i_isolate()->use_optimizer()) return;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
@@ -402,8 +402,8 @@ TEST(OptimizedCodeSharing1) {
env->Global()
->Get(env.local(), v8_str("closure2"))
.ToLocalChecked())));
- CHECK(fun1->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
- CHECK(fun2->IsOptimized() || !CcTest::i_isolate()->use_crankshaft());
+ CHECK(fun1->IsOptimized() || !CcTest::i_isolate()->use_optimizer());
+ CHECK(fun2->IsOptimized() || !CcTest::i_isolate()->use_optimizer());
CHECK_EQ(fun1->code(), fun2->code());
}
}
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 08f262d0e7..9ccc93f0f5 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -1033,7 +1033,7 @@ TEST(BoundFunctionCall) {
// This tests checks distribution of the samples through the source lines.
static void TickLines(bool optimize) {
- if (!optimize) i::FLAG_crankshaft = false;
+ if (!optimize) i::FLAG_opt = false;
CcTest::InitializeVM();
LocalContext env;
i::FLAG_allow_natives_syntax = true;
@@ -1072,7 +1072,7 @@ static void TickLines(bool optimize) {
CHECK(func->shared());
CHECK(func->shared()->abstract_code());
CHECK(!optimize || func->IsOptimized() ||
- !CcTest::i_isolate()->use_crankshaft());
+ !CcTest::i_isolate()->use_optimizer());
i::AbstractCode* code = func->abstract_code();
CHECK(code);
i::Address code_address = code->instruction_start();
@@ -1179,7 +1179,7 @@ TEST(FunctionCallSample) {
// Collect garbage that might have be generated while installing
// extensions.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CompileRun(call_function_test_source);
v8::Local<v8::Function> function = GetFunction(env.local(), "start");
@@ -1792,7 +1792,7 @@ const char* GetBranchDeoptReason(v8::Local<v8::Context> context,
// deopt at top function
TEST(CollectDeoptEvents) {
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
@@ -1926,7 +1926,7 @@ static const char* inlined_source =
// deopt at the first level inlined function
TEST(DeoptAtFirstLevelInlinedSource) {
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
@@ -1996,7 +1996,7 @@ TEST(DeoptAtFirstLevelInlinedSource) {
// deopt at the second level inlined function
TEST(DeoptAtSecondLevelInlinedSource) {
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
@@ -2071,7 +2071,7 @@ TEST(DeoptAtSecondLevelInlinedSource) {
// deopt in untracked function
TEST(DeoptUntrackedFunction) {
- if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
+ if (!CcTest::i_isolate()->use_optimizer() || i::FLAG_always_opt) return;
i::FLAG_allow_natives_syntax = true;
v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
diff --git a/deps/v8/test/cctest/test-date.cc b/deps/v8/test/cctest/test-date.cc
index aa9f9f7ca1..14e606a7c3 100644
--- a/deps/v8/test/cctest/test-date.cc
+++ b/deps/v8/test/cctest/test-date.cc
@@ -194,7 +194,7 @@ TEST(DateParseLegacyUseCounter) {
CHECK_EQ(1, legacy_parse_count);
}
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
TEST(DateCacheVersion) {
FLAG_allow_natives_syntax = true;
v8::Isolate* isolate = CcTest::isolate();
@@ -215,4 +215,4 @@ TEST(DateCacheVersion) {
CHECK(date_cache_version->IsNumber());
CHECK_EQ(1.0, date_cache_version->NumberValue(context).FromMaybe(-1.0));
}
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 908b59bc20..e055b5f145 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -384,14 +384,13 @@ void CheckDebuggerUnloaded(bool check_functions) {
CHECK(!CcTest::i_isolate()->debug()->debug_info_list_);
// Collect garbage to ensure weak handles are cleared.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CcTest::CollectAllGarbage(Heap::kMakeHeapIterableMask);
- // Iterate the head and check that there are no debugger related objects left.
+ // Iterate the heap and check that there are no debugger related objects left.
HeapIterator iterator(CcTest::heap());
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
CHECK(!obj->IsDebugInfo());
- CHECK(!obj->IsBreakPointInfo());
// If deep check of functions is requested check that no debug break code
// is left in all functions.
@@ -813,7 +812,7 @@ static void DebugEventBreakPointCollectGarbage(
CcTest::CollectGarbage(v8::internal::NEW_SPACE);
} else {
// Mark sweep compact.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
}
}
@@ -1224,7 +1223,7 @@ static void CallAndGC(v8::Local<v8::Context> context,
CHECK_EQ(2 + i * 3, break_point_hit_count);
// Mark sweep (and perhaps compact) and call function.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
f->Call(context, recv, 0, NULL).ToLocalChecked();
CHECK_EQ(3 + i * 3, break_point_hit_count);
}
@@ -1948,7 +1947,7 @@ TEST(ScriptBreakPointLineTopLevel) {
->Get(context, v8_str(env->GetIsolate(), "f"))
.ToLocalChecked());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
SetScriptBreakPointByNameFromJS(env->GetIsolate(), "test.html", 3, -1);
@@ -6713,3 +6712,29 @@ TEST(DebugGetPossibleBreakpointsReturnLocations) {
CHECK(returns_count == 1);
}
}
+
+TEST(DebugEvaluateNoSideEffect) {
+ LocalContext env;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+ i::List<i::Handle<i::JSFunction>> list;
+ {
+ i::HeapIterator iterator(isolate->heap());
+ while (i::HeapObject* obj = iterator.next()) {
+ if (!obj->IsJSFunction()) continue;
+ i::JSFunction* fun = i::JSFunction::cast(obj);
+ list.Add(i::Handle<i::JSFunction>(fun));
+ }
+ }
+
+ // Perform side effect check on all built-in functions. The side effect check
+ // itself contains additional sanity checks.
+ for (i::Handle<i::JSFunction> fun : list) {
+ bool failed = false;
+ {
+ i::NoSideEffectScope scope(isolate, true);
+ failed = !isolate->debug()->PerformSideEffectCheck(fun);
+ }
+ if (failed) isolate->clear_pending_exception();
+ }
+}
diff --git a/deps/v8/test/cctest/test-deoptimization.cc b/deps/v8/test/cctest/test-deoptimization.cc
index 06d746b2e6..7da027d03f 100644
--- a/deps/v8/test/cctest/test-deoptimization.cc
+++ b/deps/v8/test/cctest/test-deoptimization.cc
@@ -448,7 +448,7 @@ UNINITIALIZED_TEST(DeoptimizeBinaryOperationADDString) {
i::FLAG_always_opt = true;
CompileRun(f_source);
CompileRun("f('a+', new X());");
- CHECK(!i_isolate->use_crankshaft() ||
+ CHECK(!i_isolate->use_optimizer() ||
GetJSFunction(env.local(), "f")->IsOptimized());
// Call f and force deoptimization while processing the binary operation.
@@ -510,7 +510,7 @@ static void TestDeoptimizeBinaryOpHelper(LocalContext* env,
i::FLAG_always_opt = true;
CompileRun(f_source);
CompileRun("f(7, new X());");
- CHECK(!i_isolate->use_crankshaft() ||
+ CHECK(!i_isolate->use_optimizer() ||
GetJSFunction((*env).local(), "f")->IsOptimized());
// Call f and force deoptimization while processing the binary operation.
@@ -707,7 +707,7 @@ UNINITIALIZED_TEST(DeoptimizeCompare) {
i::FLAG_always_opt = true;
CompileRun(f_source);
CompileRun("f('a', new X());");
- CHECK(!i_isolate->use_crankshaft() ||
+ CHECK(!i_isolate->use_optimizer() ||
GetJSFunction(env.local(), "f")->IsOptimized());
// Call f and force deoptimization while processing the comparison.
@@ -798,7 +798,7 @@ UNINITIALIZED_TEST(DeoptimizeLoadICStoreIC) {
CompileRun("g1(new X());");
CompileRun("f2(new X(), 'z');");
CompileRun("g2(new X(), 'z');");
- if (i_isolate->use_crankshaft()) {
+ if (i_isolate->use_optimizer()) {
CHECK(GetJSFunction(env.local(), "f1")->IsOptimized());
CHECK(GetJSFunction(env.local(), "g1")->IsOptimized());
CHECK(GetJSFunction(env.local(), "f2")->IsOptimized());
@@ -902,7 +902,7 @@ UNINITIALIZED_TEST(DeoptimizeLoadICStoreICNested) {
CompileRun("g1(new X());");
CompileRun("f2(new X(), 'z');");
CompileRun("g2(new X(), 'z');");
- if (i_isolate->use_crankshaft()) {
+ if (i_isolate->use_optimizer()) {
CHECK(GetJSFunction(env.local(), "f1")->IsOptimized());
CHECK(GetJSFunction(env.local(), "g1")->IsOptimized());
CHECK(GetJSFunction(env.local(), "f2")->IsOptimized());
diff --git a/deps/v8/test/cctest/test-dictionary.cc b/deps/v8/test/cctest/test-dictionary.cc
index b0d429f5c8..fd015639af 100644
--- a/deps/v8/test/cctest/test-dictionary.cc
+++ b/deps/v8/test/cctest/test-dictionary.cc
@@ -29,6 +29,7 @@
#include "test/cctest/cctest.h"
#include "src/api.h"
+#include "src/builtins/builtins-constructor.h"
#include "src/debug/debug.h"
#include "src/execution.h"
#include "src/factory.h"
@@ -311,4 +312,15 @@ TEST(SetRequiresCopyOnCapacityChange) {
CHECK_NE(*dict, *new_dict);
}
+TEST(MaximumClonedShallowObjectProperties) {
+ // Assert that a NameDictionary with kMaximumClonedShallowObjectProperties is
+ // not in large-object space.
+ const int max_capacity = NameDictionary::ComputeCapacity(
+ ConstructorBuiltins::kMaximumClonedShallowObjectProperties);
+ const int max_literal_entry = max_capacity / NameDictionary::kEntrySize;
+ const int max_literal_index = NameDictionary::EntryToIndex(max_literal_entry);
+ CHECK_LE(NameDictionary::OffsetOfElementAt(max_literal_index),
+ kMaxRegularHeapObjectSize);
+}
+
} // namespace
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index 450986d3d2..89f8819a25 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -994,10 +994,14 @@ TEST(Neon) {
"eea24b30 vdup.16 q1, r4");
COMPARE(vdup(Neon32, q15, r1),
"eeae1b90 vdup.32 q15, r1");
- COMPARE(vdup(q0, s3),
- "f3bc0c41 vdup q0, d1[1]");
- COMPARE(vdup(q15, s2),
- "f3f4ec41 vdup q15, d1[0]");
+ COMPARE(vdup(Neon32, q0, d1, 1),
+ "f3bc0c41 vdup.32 q0, d1[1]");
+ COMPARE(vdup(Neon32, q15, d1, 0),
+ "f3f4ec41 vdup.32 q15, d1[0]");
+ COMPARE(vdup(Neon16, q7, d8, 3),
+ "f3beec48 vdup.16 q7, d8[3]");
+ COMPARE(vdup(Neon32, d0, d30, 0),
+ "f3b40c2e vdup.32 d0, d30[0]");
COMPARE(vcvt_f32_s32(q15, q1),
"f3fbe642 vcvt.f32.s32 q15, q1");
COMPARE(vcvt_f32_u32(q8, q9),
@@ -1044,6 +1048,14 @@ TEST(Neon) {
"f3142670 vmin.u16 q1, q2, q8");
COMPARE(vmax(NeonS32, q15, q0, q8),
"f260e660 vmax.s32 q15, q0, q8");
+ COMPARE(vpadd(d0, d1, d2),
+ "f3010d02 vpadd.f32 d0, d1, d2");
+ COMPARE(vpadd(Neon8, d0, d1, d2),
+ "f2010b12 vpadd.i8 d0, d1, d2");
+ COMPARE(vpadd(Neon16, d0, d1, d2),
+ "f2110b12 vpadd.i16 d0, d1, d2");
+ COMPARE(vpadd(Neon32, d0, d1, d2),
+ "f2210b12 vpadd.i32 d0, d1, d2");
COMPARE(vpmax(NeonS8, d0, d1, d2),
"f2010a02 vpmax.s8 d0, d1, d2");
COMPARE(vpmin(NeonU16, d1, d2, d8),
@@ -1098,6 +1110,14 @@ TEST(Neon) {
"f3d6e050 vshr.u16 q15, q0, #10");
COMPARE(vshr(NeonS32, q15, q0, 17),
"f2efe050 vshr.s32 q15, q0, #17");
+ COMPARE(vsli(Neon64, d2, d0, 32),
+ "f3a02590 vsli.64 d2, d0, #32");
+ COMPARE(vsli(Neon32, d7, d8, 17),
+ "f3b17518 vsli.32 d7, d8, #17");
+ COMPARE(vsri(Neon64, d2, d0, 32),
+ "f3a02490 vsri.64 d2, d0, #32");
+ COMPARE(vsri(Neon16, d7, d8, 8),
+ "f3987418 vsri.16 d7, d8, #8");
COMPARE(vrecpe(q15, q0),
"f3fbe540 vrecpe.f32 q15, q0");
COMPARE(vrecps(q15, q0, q8),
@@ -1385,6 +1405,39 @@ TEST(LoadStore) {
}
+static void TestLoadLiteral(byte* buffer, Assembler* assm, bool* failure,
+ int offset) {
+ int pc_offset = assm->pc_offset();
+ byte *progcounter = &buffer[pc_offset];
+ assm->ldr(r0, MemOperand(pc, offset));
+
+ const char *expected_string_template =
+ (offset >= 0) ?
+ "e59f0%03x ldr r0, [pc, #+%d] (addr %p)" :
+ "e51f0%03x ldr r0, [pc, #%d] (addr %p)";
+ char expected_string[80];
+ snprintf(expected_string, sizeof(expected_string), expected_string_template,
+ abs(offset), offset,
+ progcounter + Instruction::kPCReadOffset + offset);
+ if (!DisassembleAndCompare(progcounter, expected_string)) *failure = true;
+}
+
+
+TEST(LoadLiteral) {
+ SET_UP();
+
+ TestLoadLiteral(buffer, &assm, &failure, 0);
+ TestLoadLiteral(buffer, &assm, &failure, 1);
+ TestLoadLiteral(buffer, &assm, &failure, 4);
+ TestLoadLiteral(buffer, &assm, &failure, 4095);
+ TestLoadLiteral(buffer, &assm, &failure, -1);
+ TestLoadLiteral(buffer, &assm, &failure, -4);
+ TestLoadLiteral(buffer, &assm, &failure, -4095);
+
+ VERIFY_RUN();
+}
+
+
TEST(Barrier) {
SET_UP();
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index 3331521df9..5a560df8c3 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -428,6 +428,19 @@ TEST(DisasmIa320) {
__ minps(xmm1, Operand(ebx, ecx, times_4, 10000));
__ maxps(xmm1, xmm0);
__ maxps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ rcpps(xmm1, xmm0);
+ __ rcpps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ rsqrtps(xmm1, xmm0);
+ __ rsqrtps(xmm1, Operand(ebx, ecx, times_4, 10000));
+
+ __ cmpeqps(xmm5, xmm1);
+ __ cmpeqps(xmm5, Operand(ebx, ecx, times_4, 10000));
+ __ cmpltps(xmm5, xmm1);
+ __ cmpltps(xmm5, Operand(ebx, ecx, times_4, 10000));
+ __ cmpleps(xmm5, xmm1);
+ __ cmpleps(xmm5, Operand(ebx, ecx, times_4, 10000));
+ __ cmpneqps(xmm5, xmm1);
+ __ cmpneqps(xmm5, Operand(ebx, ecx, times_4, 10000));
__ ucomiss(xmm0, xmm1);
__ ucomiss(xmm0, Operand(ebx, ecx, times_4, 10000));
@@ -437,6 +450,10 @@ TEST(DisasmIa320) {
__ cvtsi2sd(xmm1, Operand(ebx, ecx, times_4, 10000));
__ cvtss2sd(xmm1, Operand(ebx, ecx, times_4, 10000));
__ cvtss2sd(xmm1, xmm0);
+ __ cvtdq2ps(xmm1, xmm0);
+ __ cvtdq2ps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ cvttps2dq(xmm1, xmm0);
+ __ cvttps2dq(xmm1, Operand(ebx, ecx, times_4, 10000));
__ movsd(xmm1, Operand(ebx, ecx, times_4, 10000));
__ movsd(Operand(ebx, ecx, times_4, 10000), xmm1);
// 128 bit move instructions.
@@ -566,6 +583,19 @@ TEST(DisasmIa320) {
__ vdivps(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
__ vmaxps(xmm0, xmm1, xmm2);
__ vmaxps(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ vrcpps(xmm1, xmm0);
+ __ vrcpps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ vrsqrtps(xmm1, xmm0);
+ __ vrsqrtps(xmm1, Operand(ebx, ecx, times_4, 10000));
+
+ __ vcmpeqps(xmm5, xmm4, xmm1);
+ __ vcmpeqps(xmm5, xmm4, Operand(ebx, ecx, times_4, 10000));
+ __ vcmpltps(xmm5, xmm4, xmm1);
+ __ vcmpltps(xmm5, xmm4, Operand(ebx, ecx, times_4, 10000));
+ __ vcmpleps(xmm5, xmm4, xmm1);
+ __ vcmpleps(xmm5, xmm4, Operand(ebx, ecx, times_4, 10000));
+ __ vcmpneqps(xmm5, xmm4, xmm1);
+ __ vcmpneqps(xmm5, xmm4, Operand(ebx, ecx, times_4, 10000));
__ vandpd(xmm0, xmm1, xmm2);
__ vandpd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
@@ -590,6 +620,12 @@ TEST(DisasmIa320) {
__ vpsrld(xmm0, xmm7, 21);
__ vpsraw(xmm0, xmm7, 21);
__ vpsrad(xmm0, xmm7, 21);
+
+ __ vcvtdq2ps(xmm1, xmm0);
+ __ vcvtdq2ps(xmm1, Operand(ebx, ecx, times_4, 10000));
+ __ vcvttps2dq(xmm1, xmm0);
+ __ vcvttps2dq(xmm1, Operand(ebx, ecx, times_4, 10000));
+
#define EMIT_SSE2_AVXINSTR(instruction, notUsed1, notUsed2, notUsed3) \
__ v##instruction(xmm7, xmm5, xmm1); \
__ v##instruction(xmm7, xmm5, Operand(edx, 4));
diff --git a/deps/v8/test/cctest/test-disasm-mips64.cc b/deps/v8/test/cctest/test-disasm-mips64.cc
index eca9e1ffdd..5046527d4e 100644
--- a/deps/v8/test/cctest/test-disasm-mips64.cc
+++ b/deps/v8/test/cctest/test-disasm-mips64.cc
@@ -711,26 +711,41 @@ TEST(Type0) {
COMPARE(dsbh(s6, s7), "7c17b0a4 dsbh s6, s7");
COMPARE(dsbh(v0, v1), "7c0310a4 dsbh v0, v1");
+ COMPARE(dshd(a0, a1), "7c052164 dshd a0, a1");
+ COMPARE(dshd(s6, s7), "7c17b164 dshd s6, s7");
+ COMPARE(dshd(v0, v1), "7c031164 dshd v0, v1");
+
+ COMPARE(ext_(a0, a1, 31, 1), "7ca407c0 ext a0, a1, 31, 1");
+ COMPARE(ext_(s6, s7, 30, 2), "7ef60f80 ext s6, s7, 30, 2");
+ COMPARE(ext_(v0, v1, 0, 32), "7c62f800 ext v0, v1, 0, 32");
+
+ COMPARE(dext_(a0, a1, 31, 1), "7ca407c3 dext a0, a1, 31, 1");
+ COMPARE(dext_(s6, s7, 30, 2), "7ef60f83 dext s6, s7, 30, 2");
+ COMPARE(dext_(v0, v1, 0, 32), "7c62f803 dext v0, v1, 0, 32");
+
+ COMPARE(dextm_(a0, a1, 31, 33), "7ca407c1 dextm a0, a1, 31, 33");
+ COMPARE(dextm_(s6, s7, 0, 33), "7ef60001 dextm s6, s7, 0, 33");
+ COMPARE(dextm_(v0, v1, 0, 64), "7c62f801 dextm v0, v1, 0, 64");
+
+ COMPARE(dextu_(a0, a1, 32, 1), "7ca40002 dextu a0, a1, 32, 1");
+ COMPARE(dextu_(s6, s7, 63, 1), "7ef607c2 dextu s6, s7, 63, 1");
+ COMPARE(dextu_(v0, v1, 32, 32), "7c62f802 dextu v0, v1, 32, 32");
+
+ COMPARE(ins_(a0, a1, 31, 1), "7ca4ffc4 ins a0, a1, 31, 1");
+ COMPARE(ins_(s6, s7, 30, 2), "7ef6ff84 ins s6, s7, 30, 2");
+ COMPARE(ins_(v0, v1, 0, 32), "7c62f804 ins v0, v1, 0, 32");
+
COMPARE(dins_(a0, a1, 31, 1), "7ca4ffc7 dins a0, a1, 31, 1");
COMPARE(dins_(s6, s7, 30, 2), "7ef6ff87 dins s6, s7, 30, 2");
COMPARE(dins_(v0, v1, 0, 32), "7c62f807 dins v0, v1, 0, 32");
- COMPARE(dshd(a0, a1), "7c052164 dshd a0, a1");
- COMPARE(dshd(s6, s7), "7c17b164 dshd s6, s7");
- COMPARE(dshd(v0, v1), "7c031164 dshd v0, v1");
+ COMPARE(dinsm_(a0, a1, 31, 2), "7ca407c5 dinsm a0, a1, 31, 2");
+ COMPARE(dinsm_(s6, s7, 0, 33), "7ef60005 dinsm s6, s7, 0, 33");
+ COMPARE(dinsm_(v0, v1, 0, 64), "7c62f805 dinsm v0, v1, 0, 64");
- COMPARE(ins_(a0, a1, 31, 1),
- "7ca4ffc4 ins a0, a1, 31, 1");
- COMPARE(ins_(s6, s7, 30, 2),
- "7ef6ff84 ins s6, s7, 30, 2");
- COMPARE(ins_(v0, v1, 0, 32),
- "7c62f804 ins v0, v1, 0, 32");
- COMPARE(ext_(a0, a1, 31, 1),
- "7ca407c0 ext a0, a1, 31, 1");
- COMPARE(ext_(s6, s7, 30, 2),
- "7ef60f80 ext s6, s7, 30, 2");
- COMPARE(ext_(v0, v1, 0, 32),
- "7c62f800 ext v0, v1, 0, 32");
+ COMPARE(dinsu_(a0, a1, 32, 1), "7ca40006 dinsu a0, a1, 32, 1");
+ COMPARE(dinsu_(s6, s7, 63, 1), "7ef6ffc6 dinsu s6, s7, 63, 1");
+ COMPARE(dinsu_(v0, v1, 32, 32), "7c62f806 dinsu v0, v1, 32, 32");
COMPARE(add_s(f4, f6, f8), "46083100 add.s f4, f6, f8");
COMPARE(add_d(f12, f14, f16), "46307300 add.d f12, f14, f16");
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 8ab35365b8..41f06a61bb 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -469,6 +469,9 @@ TEST(DisasmX64) {
__ punpckldq(xmm5, Operand(rdx, 4));
__ punpckhdq(xmm8, xmm15);
+ __ pshuflw(xmm2, xmm4, 3);
+ __ pshufhw(xmm1, xmm9, 6);
+
#define EMIT_SSE2_INSTR(instruction, notUsed1, notUsed2, notUsed3) \
__ instruction(xmm5, xmm1); \
__ instruction(xmm5, Operand(rdx, 4));
@@ -521,6 +524,7 @@ TEST(DisasmX64) {
__ insertps(xmm5, xmm1, 123);
__ extractps(rax, xmm1, 0);
__ pextrw(rbx, xmm2, 1);
+ __ pinsrw(xmm2, rcx, 1);
__ pextrd(rbx, xmm15, 0);
__ pextrd(r12, xmm0, 1);
__ pinsrd(xmm9, r9, 0);
diff --git a/deps/v8/test/cctest/test-feedback-vector.cc b/deps/v8/test/cctest/test-feedback-vector.cc
index dc111e340f..2ae65197ef 100644
--- a/deps/v8/test/cctest/test-feedback-vector.cc
+++ b/deps/v8/test/cctest/test-feedback-vector.cc
@@ -220,7 +220,7 @@ TEST(VectorCallICStates) {
CHECK_EQ(GENERIC, nexus.StateFromFeedback());
// After a collection, state should remain GENERIC.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(GENERIC, nexus.StateFromFeedback());
}
@@ -245,7 +245,7 @@ TEST(VectorCallFeedbackForArray) {
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
CHECK(nexus.GetFeedback()->IsAllocationSite());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// It should stay monomorphic even after a GC.
CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
}
@@ -349,9 +349,9 @@ TEST(VectorLoadICStates) {
CompileRun("f({ blarg: 3, torino: 10, foo: 2 })");
CHECK_EQ(POLYMORPHIC, nexus.StateFromFeedback());
- MapHandleList maps;
- nexus.FindAllMaps(&maps);
- CHECK_EQ(4, maps.length());
+ MapHandles maps;
+ nexus.ExtractMaps(&maps);
+ CHECK_EQ(4, maps.size());
// Finally driven megamorphic.
CompileRun("f({ blarg: 3, gran: 3, torino: 10, foo: 2 })");
@@ -359,7 +359,7 @@ TEST(VectorLoadICStates) {
CHECK(!nexus.FindFirstMap());
// After a collection, state should not be reset to PREMONOMORPHIC.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
CHECK_EQ(MEGAMORPHIC, nexus.StateFromFeedback());
}
@@ -428,9 +428,9 @@ TEST(VectorLoadICOnSmi) {
CompileRun("f(o)");
CHECK_EQ(POLYMORPHIC, nexus.StateFromFeedback());
- MapHandleList maps;
- nexus.FindAllMaps(&maps);
- CHECK_EQ(2, maps.length());
+ MapHandles maps;
+ nexus.ExtractMaps(&maps);
+ CHECK_EQ(2, maps.size());
// One of the maps should be the o map.
v8::MaybeLocal<v8::Value> v8_o =
@@ -439,8 +439,7 @@ TEST(VectorLoadICOnSmi) {
Handle<JSObject>::cast(v8::Utils::OpenHandle(*v8_o.ToLocalChecked()));
bool number_map_found = false;
bool o_map_found = false;
- for (int i = 0; i < maps.length(); i++) {
- Handle<Map> current = maps[i];
+ for (Handle<Map> current : maps) {
if (*current == number_map)
number_map_found = true;
else if (*current == o->map())
@@ -451,9 +450,9 @@ TEST(VectorLoadICOnSmi) {
// The degree of polymorphism doesn't change.
CompileRun("f(100)");
CHECK_EQ(POLYMORPHIC, nexus.StateFromFeedback());
- MapHandleList maps2;
- nexus.FindAllMaps(&maps2);
- CHECK_EQ(2, maps2.length());
+ MapHandles maps2;
+ nexus.ExtractMaps(&maps2);
+ CHECK_EQ(2, maps2.size());
}
diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc
index 0ecf7911bd..79bb9b7860 100644
--- a/deps/v8/test/cctest/test-field-type-tracking.cc
+++ b/deps/v8/test/cctest/test-field-type-tracking.cc
@@ -322,8 +322,21 @@ class Expectations {
Handle<FieldType> heap_type) {
CHECK_EQ(number_of_properties_, map->NumberOfOwnDescriptors());
int property_index = number_of_properties_++;
- SetDataField(property_index, attributes, constness, representation,
- heap_type);
+ PropertyConstness expected_constness = constness;
+ Representation expected_representation = representation;
+ Handle<FieldType> expected_heap_type = heap_type;
+ if (IsTransitionableFastElementsKind(map->elements_kind())) {
+ // Maps with transitionable elements kinds must have non in-place
+ // generalizable fields.
+ if (FLAG_track_constant_fields && FLAG_modify_map_inplace) {
+ expected_constness = kMutable;
+ }
+ if (representation.IsHeapObject() && heap_type->IsClass()) {
+ expected_heap_type = FieldType::Any(isolate_);
+ }
+ }
+ SetDataField(property_index, attributes, expected_constness,
+ expected_representation, expected_heap_type);
Handle<String> name = MakeName("prop", property_index);
return Map::CopyWithField(map, name, heap_type, attributes, constness,
@@ -1768,9 +1781,9 @@ static void TestReconfigureElementsKind_GeneralizeField(
// Ensure Map::FindElementsKindTransitionedMap() is able to find the
// transitioned map.
{
- MapHandleList map_list;
- map_list.Add(updated_map);
- Map* transitioned_map = map2->FindElementsKindTransitionedMap(&map_list);
+ MapHandles map_list;
+ map_list.push_back(updated_map);
+ Map* transitioned_map = map2->FindElementsKindTransitionedMap(map_list);
CHECK_EQ(*updated_map, transitioned_map);
}
}
@@ -1788,8 +1801,7 @@ static void TestReconfigureElementsKind_GeneralizeField(
// where "p2A" and "p2B" differ only in the representation/field type.
//
static void TestReconfigureElementsKind_GeneralizeFieldTrivial(
- const CRFTData& from, const CRFTData& to, const CRFTData& expected,
- bool expected_field_type_dependency = true) {
+ const CRFTData& from, const CRFTData& to, const CRFTData& expected) {
Isolate* isolate = CcTest::i_isolate();
Expectations expectations(isolate, FAST_SMI_ELEMENTS);
@@ -1851,7 +1863,7 @@ static void TestReconfigureElementsKind_GeneralizeFieldTrivial(
expected.representation, expected.type);
CHECK(!map->is_deprecated());
CHECK_EQ(*map, *new_map);
- CHECK_EQ(expected_field_type_dependency, dependencies.HasAborted());
+ CHECK(!dependencies.HasAborted());
dependencies.Rollback(); // Properly cleanup compilation info.
CHECK(!new_map->is_deprecated());
@@ -1863,9 +1875,9 @@ static void TestReconfigureElementsKind_GeneralizeFieldTrivial(
// Ensure Map::FindElementsKindTransitionedMap() is able to find the
// transitioned map.
{
- MapHandleList map_list;
- map_list.Add(updated_map);
- Map* transitioned_map = map2->FindElementsKindTransitionedMap(&map_list);
+ MapHandles map_list;
+ map_list.push_back(updated_map);
+ Map* transitioned_map = map2->FindElementsKindTransitionedMap(map_list);
CHECK_EQ(*updated_map, transitioned_map);
}
}
@@ -2012,7 +2024,7 @@ TEST(ReconfigureElementsKind_GeneralizeHeapObjFieldToHeapObj) {
TestReconfigureElementsKind_GeneralizeFieldTrivial(
{kConst, Representation::HeapObject(), any_type},
{kConst, Representation::HeapObject(), new_type},
- {kConst, Representation::HeapObject(), any_type}, false);
+ {kConst, Representation::HeapObject(), any_type});
if (FLAG_modify_map_inplace) {
// kConst to kMutable migration does not create a new map, therefore
@@ -2033,12 +2045,12 @@ TEST(ReconfigureElementsKind_GeneralizeHeapObjFieldToHeapObj) {
TestReconfigureElementsKind_GeneralizeFieldTrivial(
{kMutable, Representation::HeapObject(), any_type},
{kConst, Representation::HeapObject(), new_type},
- {kMutable, Representation::HeapObject(), any_type}, false);
+ {kMutable, Representation::HeapObject(), any_type});
}
TestReconfigureElementsKind_GeneralizeFieldTrivial(
{kMutable, Representation::HeapObject(), any_type},
{kMutable, Representation::HeapObject(), new_type},
- {kMutable, Representation::HeapObject(), any_type}, false);
+ {kMutable, Representation::HeapObject(), any_type});
}
TEST(ReconfigureElementsKind_GeneralizeHeapObjectFieldToTagged) {
diff --git a/deps/v8/test/cctest/test-flags.cc b/deps/v8/test/cctest/test-flags.cc
index 0abbca6909..8c4865875b 100644
--- a/deps/v8/test/cctest/test-flags.cc
+++ b/deps/v8/test/cctest/test-flags.cc
@@ -252,7 +252,7 @@ TEST(FlagsRemoveIncomplete) {
// if the list of arguments ends unexpectedly.
SetFlagsToDefault();
int argc = 3;
- const char* argv[] = {"", "--crankshaft", "--expose-natives-as"};
+ const char* argv[] = {"", "--opt", "--expose-natives-as"};
CHECK_EQ(2, FlagList::SetFlagsFromCommandLine(&argc,
const_cast<char **>(argv),
true));
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 7855f75b38..b96e161566 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -689,7 +689,7 @@ TEST(HeapSnapshotAddressReuse) {
CompileRun(
"for (var i = 0; i < 10000; ++i)\n"
" a[i] = new A();\n");
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
const v8::HeapSnapshot* snapshot2 = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot2));
@@ -731,7 +731,7 @@ TEST(HeapEntryIdsAndArrayShift) {
"for (var i = 0; i < 1; ++i)\n"
" a.shift();\n");
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
const v8::HeapSnapshot* snapshot2 = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot2));
@@ -772,7 +772,7 @@ TEST(HeapEntryIdsAndGC) {
const v8::HeapSnapshot* snapshot1 = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot1));
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
const v8::HeapSnapshot* snapshot2 = heap_profiler->TakeHeapSnapshot();
CHECK(ValidateSnapshot(snapshot2));
@@ -1101,7 +1101,7 @@ TEST(HeapSnapshotObjectsStats) {
// We have to call GC 6 times. In other case the garbage will be
// the reason of flakiness.
for (int i = 0; i < 6; ++i) {
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
v8::SnapshotObjectId initial_id;
@@ -2671,7 +2671,7 @@ TEST(WeakContainers) {
i::FLAG_allow_natives_syntax = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
- if (!CcTest::i_isolate()->use_crankshaft()) return;
+ if (!CcTest::i_isolate()->use_optimizer()) return;
v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
CompileRun(
"function foo(a) { return a.x; }\n"
@@ -2905,7 +2905,7 @@ TEST(SamplingHeapProfiler) {
" eval(\"new Array(100)\");\n"
"}\n");
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
std::unique_ptr<v8::AllocationProfile> profile(
heap_profiler->GetAllocationProfile());
diff --git a/deps/v8/test/cctest/test-identity-map.cc b/deps/v8/test/cctest/test-identity-map.cc
index f57036afc9..a16298b09b 100644
--- a/deps/v8/test/cctest/test-identity-map.cc
+++ b/deps/v8/test/cctest/test-identity-map.cc
@@ -187,7 +187,6 @@ class IdentityMapTester : public HandleAndZoneScope {
void Rehash() { map.Rehash(); }
};
-
TEST(Find_smi_not_found) {
IdentityMapTester t;
for (int i = 0; i < 100; i++) {
@@ -673,7 +672,6 @@ TEST(Collisions_7) { CollisionTest(7); }
TEST(Resize) { CollisionTest(9, false, true); }
TEST(Rehash) { CollisionTest(11, true, false); }
-
TEST(ExplicitGC) {
IdentityMapTester t;
Handle<Object> num_keys[] = {t.num(2.1), t.num(2.4), t.num(3.3), t.num(4.3),
@@ -695,7 +693,6 @@ TEST(ExplicitGC) {
}
}
-
TEST(CanonicalHandleScope) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = CcTest::heap();
@@ -738,7 +735,7 @@ TEST(CanonicalHandleScope) {
Handle<String> string2(*string1);
CHECK_EQ(number1.location(), number2.location());
CHECK_EQ(string1.location(), string2.location());
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
Handle<HeapNumber> number3(*number2);
Handle<String> string3(*string2);
CHECK_EQ(number1.location(), number3.location());
@@ -775,5 +772,54 @@ TEST(CanonicalHandleScope) {
}
}
+TEST(GCShortCutting) {
+ IdentityMapTester t;
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ const int kDummyValue = 0;
+
+ for (int i = 0; i < 16; i++) {
+ // Insert a varying number of Smis as padding to ensure some tests straddle
+ // a boundary where the thin string short cutting will cause size_ to be
+ // greater to capacity_ if not corrected by IdentityMap
+ // (see crbug.com/704132).
+ for (int j = 0; j < i; j++) {
+ t.map.Set(t.smi(j), reinterpret_cast<void*>(kDummyValue));
+ }
+
+ Handle<String> thin_string =
+ factory->NewStringFromAsciiChecked("thin_string");
+ Handle<String> internalized_string =
+ factory->InternalizeString(thin_string);
+ DCHECK_IMPLIES(FLAG_thin_strings, thin_string->IsThinString());
+ DCHECK_NE(*thin_string, *internalized_string);
+
+ // Insert both keys into the map.
+ t.map.Set(thin_string, &thin_string);
+ t.map.Set(internalized_string, &internalized_string);
+
+ // Do an explicit, real GC, this should short-cut the thin string to point
+ // to the internalized string.
+ t.heap()->CollectGarbage(i::NEW_SPACE,
+ i::GarbageCollectionReason::kTesting);
+ DCHECK_IMPLIES(FLAG_thin_strings && !FLAG_optimize_for_size,
+ *thin_string == *internalized_string);
+
+ // Check that getting the object points to one of the handles.
+ void** thin_string_entry = t.map.Get(thin_string);
+ CHECK(*thin_string_entry == &thin_string ||
+ *thin_string_entry == &internalized_string);
+ void** internalized_string_entry = t.map.Get(internalized_string);
+ CHECK(*internalized_string_entry == &thin_string ||
+ *internalized_string_entry == &internalized_string);
+
+ // Trigger resize.
+ for (int j = 0; j < 16; j++) {
+ t.map.Set(t.smi(j + 16), reinterpret_cast<void*>(kDummyValue));
+ }
+ t.map.Clear();
+ }
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-macro-assembler-arm.cc b/deps/v8/test/cctest/test-macro-assembler-arm.cc
index 4de4647f98..d1748b0be3 100644
--- a/deps/v8/test/cctest/test-macro-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-arm.cc
@@ -169,7 +169,7 @@ TEST(ExtractLane) {
__ ExtractLane(r5, q1, NeonS32, i);
__ str(r5, MemOperand(r0, offsetof(T, i32x4_low) + 4 * i));
SwVfpRegister si = SwVfpRegister::from_code(i);
- __ ExtractLane(si, q1, r4, i);
+ __ ExtractLane(si, q1, i);
__ vstr(si, r0, offsetof(T, f32x4_low) + 4 * i);
}
@@ -203,7 +203,7 @@ TEST(ExtractLane) {
__ ExtractLane(r5, q15, NeonS32, i);
__ str(r5, MemOperand(r0, offsetof(T, i32x4_high) + 4 * i));
SwVfpRegister si = SwVfpRegister::from_code(i);
- __ ExtractLane(si, q15, r4, i);
+ __ ExtractLane(si, q15, i);
__ vstr(si, r0, offsetof(T, f32x4_high) + 4 * i);
}
@@ -304,8 +304,6 @@ TEST(ReplaceLane) {
__ stm(db_w, sp, r4.bit() | r5.bit() | r6.bit() | r7.bit() | lr.bit());
- const Register kScratch = r5;
-
__ veor(q0, q0, q0); // Zero
__ veor(q1, q1, q1); // Zero
for (int i = 0; i < 4; i++) {
@@ -313,7 +311,7 @@ TEST(ReplaceLane) {
__ ReplaceLane(q0, q0, r4, NeonS32, i);
SwVfpRegister si = SwVfpRegister::from_code(i);
__ vmov(si, r4);
- __ ReplaceLane(q1, q1, si, kScratch, i);
+ __ ReplaceLane(q1, q1, si, i);
}
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, i32x4_low))));
__ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
@@ -344,7 +342,7 @@ TEST(ReplaceLane) {
__ ReplaceLane(q14, q14, r4, NeonS32, i);
SwVfpRegister si = SwVfpRegister::from_code(i);
__ vmov(si, r4);
- __ ReplaceLane(q15, q15, si, kScratch, i);
+ __ ReplaceLane(q15, q15, si, i);
}
__ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, i32x4_high))));
__ vst1(Neon8, NeonListOperand(q14), NeonMemOperand(r4));
@@ -405,115 +403,4 @@ TEST(ReplaceLane) {
}
}
-#define CHECK_EQ_32X4(field, v0, v1, v2, v3) \
- CHECK_EQ(v0, t.field[0]); \
- CHECK_EQ(v1, t.field[1]); \
- CHECK_EQ(v2, t.field[2]); \
- CHECK_EQ(v3, t.field[3]);
-
-TEST(Swizzle) {
- if (!CpuFeatures::IsSupported(NEON)) return;
-
- // Allocate an executable page of memory.
- size_t actual_size;
- byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
- Assembler::kMinimalBufferSize, &actual_size, true));
- CHECK(buffer);
- Isolate* isolate = CcTest::i_isolate();
- HandleScope handles(isolate);
- MacroAssembler assembler(isolate, buffer, static_cast<int>(actual_size),
- v8::internal::CodeObjectRequired::kYes);
- MacroAssembler* masm = &assembler; // Create a pointer for the __ macro.
-
- typedef struct {
- int32_t _32x4_3210[4]; // identity
- int32_t _32x4_1032[4]; // high / low swap
- int32_t _32x4_0000[4]; // vdup's
- int32_t _32x4_1111[4];
- int32_t _32x4_2222[4];
- int32_t _32x4_3333[4];
- int32_t _32x4_2103[4]; // rotate left
- int32_t _32x4_0321[4]; // rotate right
- int32_t _32x4_1132[4]; // irregular
- int32_t _32x4_1132_in_place[4]; // irregular, in-place
- } T;
- T t;
-
- __ stm(db_w, sp, r4.bit() | r5.bit() | r6.bit() | r7.bit() | lr.bit());
-
- const Register kScratch = r5;
-
- // Make test vector [0, 1, 2, 3]
- __ veor(q1, q1, q1); // Zero
- for (int i = 0; i < 4; i++) {
- __ mov(r4, Operand(i));
- __ ReplaceLane(q1, q1, r4, NeonS32, i);
- }
- __ Swizzle(q0, q1, kScratch, Neon32, 0x3210);
- __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, _32x4_3210))));
- __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
-
- __ Swizzle(q0, q1, kScratch, Neon32, 0x1032);
- __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, _32x4_1032))));
- __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
-
- __ Swizzle(q0, q1, kScratch, Neon32, 0x0000);
- __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, _32x4_0000))));
- __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
-
- __ Swizzle(q0, q1, kScratch, Neon32, 0x1111);
- __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, _32x4_1111))));
- __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
-
- __ Swizzle(q0, q1, kScratch, Neon32, 0x2222);
- __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, _32x4_2222))));
- __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
-
- __ Swizzle(q0, q1, kScratch, Neon32, 0x3333);
- __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, _32x4_3333))));
- __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
-
- __ Swizzle(q0, q1, kScratch, Neon32, 0x2103);
- __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, _32x4_2103))));
- __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
-
- __ Swizzle(q0, q1, kScratch, Neon32, 0x0321);
- __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, _32x4_0321))));
- __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
-
- __ Swizzle(q0, q1, kScratch, Neon32, 0x1132);
- __ add(r4, r0, Operand(static_cast<int32_t>(offsetof(T, _32x4_1132))));
- __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
-
- __ vmov(q0, q1);
- __ Swizzle(q0, q0, kScratch, Neon32, 0x1132);
- __ add(r4, r0,
- Operand(static_cast<int32_t>(offsetof(T, _32x4_1132_in_place))));
- __ vst1(Neon8, NeonListOperand(q0), NeonMemOperand(r4));
-
- __ ldm(ia_w, sp, r4.bit() | r5.bit() | r6.bit() | r7.bit() | pc.bit());
-
- CodeDesc desc;
- masm->GetCode(&desc);
- Handle<Code> code = isolate->factory()->NewCode(
- desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
-#ifdef DEBUG
- OFStream os(stdout);
- code->Print(os);
-#endif
- F3 f = FUNCTION_CAST<F3>(code->entry());
- Object* dummy = CALL_GENERATED_CODE(isolate, f, &t, 0, 0, 0, 0);
- USE(dummy);
- CHECK_EQ_32X4(_32x4_3210, 0, 1, 2, 3);
- CHECK_EQ_32X4(_32x4_1032, 2, 3, 0, 1);
- CHECK_EQ_32X4(_32x4_0000, 0, 0, 0, 0);
- CHECK_EQ_32X4(_32x4_1111, 1, 1, 1, 1);
- CHECK_EQ_32X4(_32x4_2222, 2, 2, 2, 2);
- CHECK_EQ_32X4(_32x4_3333, 3, 3, 3, 3);
- CHECK_EQ_32X4(_32x4_2103, 3, 0, 1, 2);
- CHECK_EQ_32X4(_32x4_0321, 1, 2, 3, 0);
- CHECK_EQ_32X4(_32x4_1132, 2, 3, 1, 1);
- CHECK_EQ_32X4(_32x4_1132_in_place, 2, 3, 1, 1);
-}
-
#undef __
diff --git a/deps/v8/test/cctest/test-macro-assembler-mips64.cc b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
index 9527e620bd..37517d0d42 100644
--- a/deps/v8/test/cctest/test-macro-assembler-mips64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-mips64.cc
@@ -68,40 +68,40 @@ TEST(BYTESWAP) {
MacroAssembler* masm = &assembler;
- __ ld(a4, MemOperand(a0, offsetof(T, r1)));
+ __ Ld(a4, MemOperand(a0, offsetof(T, r1)));
__ nop();
__ ByteSwapSigned(a4, a4, 8);
- __ sd(a4, MemOperand(a0, offsetof(T, r1)));
+ __ Sd(a4, MemOperand(a0, offsetof(T, r1)));
- __ ld(a4, MemOperand(a0, offsetof(T, r2)));
+ __ Ld(a4, MemOperand(a0, offsetof(T, r2)));
__ nop();
__ ByteSwapSigned(a4, a4, 4);
- __ sd(a4, MemOperand(a0, offsetof(T, r2)));
+ __ Sd(a4, MemOperand(a0, offsetof(T, r2)));
- __ ld(a4, MemOperand(a0, offsetof(T, r3)));
+ __ Ld(a4, MemOperand(a0, offsetof(T, r3)));
__ nop();
__ ByteSwapSigned(a4, a4, 2);
- __ sd(a4, MemOperand(a0, offsetof(T, r3)));
+ __ Sd(a4, MemOperand(a0, offsetof(T, r3)));
- __ ld(a4, MemOperand(a0, offsetof(T, r4)));
+ __ Ld(a4, MemOperand(a0, offsetof(T, r4)));
__ nop();
__ ByteSwapSigned(a4, a4, 1);
- __ sd(a4, MemOperand(a0, offsetof(T, r4)));
+ __ Sd(a4, MemOperand(a0, offsetof(T, r4)));
- __ ld(a4, MemOperand(a0, offsetof(T, r5)));
+ __ Ld(a4, MemOperand(a0, offsetof(T, r5)));
__ nop();
__ ByteSwapUnsigned(a4, a4, 1);
- __ sd(a4, MemOperand(a0, offsetof(T, r5)));
+ __ Sd(a4, MemOperand(a0, offsetof(T, r5)));
- __ ld(a4, MemOperand(a0, offsetof(T, r6)));
+ __ Ld(a4, MemOperand(a0, offsetof(T, r6)));
__ nop();
__ ByteSwapUnsigned(a4, a4, 2);
- __ sd(a4, MemOperand(a0, offsetof(T, r6)));
+ __ Sd(a4, MemOperand(a0, offsetof(T, r6)));
- __ ld(a4, MemOperand(a0, offsetof(T, r7)));
+ __ Ld(a4, MemOperand(a0, offsetof(T, r7)));
__ nop();
__ ByteSwapUnsigned(a4, a4, 4);
- __ sd(a4, MemOperand(a0, offsetof(T, r7)));
+ __ Sd(a4, MemOperand(a0, offsetof(T, r7)));
__ jr(ra);
__ nop();
@@ -151,7 +151,7 @@ TEST(LoadConstants) {
for (int i = 0; i < 64; i++) {
// Load constant.
__ li(a5, Operand(refConstants[i]));
- __ sd(a5, MemOperand(a4));
+ __ Sd(a5, MemOperand(a4));
__ Daddu(a4, a4, Operand(kPointerSize));
}
@@ -311,7 +311,7 @@ TEST(jump_tables5) {
__ addiupc(at, 6 + 1);
__ Dlsa(at, at, a0, 3);
- __ ld(at, MemOperand(at));
+ __ Ld(at, MemOperand(at));
__ jalr(at);
__ nop(); // Branch delay slot nop.
__ bc(&done);
@@ -1502,7 +1502,7 @@ TEST(min_max_nan) {
auto handle_dnan = [masm](FPURegister dst, Label* nan, Label* back) {
__ bind(nan);
__ LoadRoot(at, Heap::kNanValueRootIndex);
- __ ldc1(dst, FieldMemOperand(at, HeapNumber::kValueOffset));
+ __ Ldc1(dst, FieldMemOperand(at, HeapNumber::kValueOffset));
__ Branch(back);
};
@@ -1517,10 +1517,10 @@ TEST(min_max_nan) {
__ push(s6);
__ InitializeRootRegister();
- __ ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
- __ ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
- __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, e)));
- __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, f)));
+ __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, b)));
+ __ Lwc1(f2, MemOperand(a0, offsetof(TestFloat, e)));
+ __ Lwc1(f6, MemOperand(a0, offsetof(TestFloat, f)));
__ Float64Min(f10, f4, f8, &handle_mind_nan);
__ bind(&back_mind_nan);
__ Float64Max(f12, f4, f8, &handle_maxd_nan);
@@ -1529,10 +1529,10 @@ TEST(min_max_nan) {
__ bind(&back_mins_nan);
__ Float32Max(f16, f2, f6, &handle_maxs_nan);
__ bind(&back_maxs_nan);
- __ sdc1(f10, MemOperand(a0, offsetof(TestFloat, c)));
- __ sdc1(f12, MemOperand(a0, offsetof(TestFloat, d)));
- __ swc1(f14, MemOperand(a0, offsetof(TestFloat, g)));
- __ swc1(f16, MemOperand(a0, offsetof(TestFloat, h)));
+ __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, c)));
+ __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, d)));
+ __ Swc1(f14, MemOperand(a0, offsetof(TestFloat, g)));
+ __ Swc1(f16, MemOperand(a0, offsetof(TestFloat, h)));
__ pop(s6);
__ jr(ra);
__ nop();
@@ -1988,11 +1988,11 @@ static ::F4 GenerateMacroFloat32MinMax(MacroAssembler* masm) {
Label done_max_abc, done_max_aab, done_max_aba;
#define FLOAT_MIN_MAX(fminmax, res, x, y, done, ool, res_field) \
- __ lwc1(x, MemOperand(a0, offsetof(Inputs, src1_))); \
- __ lwc1(y, MemOperand(a0, offsetof(Inputs, src2_))); \
+ __ Lwc1(x, MemOperand(a0, offsetof(Inputs, src1_))); \
+ __ Lwc1(y, MemOperand(a0, offsetof(Inputs, src2_))); \
__ fminmax(res, x, y, &ool); \
__ bind(&done); \
- __ swc1(a, MemOperand(a1, offsetof(Results, res_field)))
+ __ Swc1(a, MemOperand(a1, offsetof(Results, res_field)))
// a = min(b, c);
FLOAT_MIN_MAX(Float32Min, a, b, c, done_min_abc, ool_min_abc, min_abc_);
@@ -2131,11 +2131,11 @@ static ::F4 GenerateMacroFloat64MinMax(MacroAssembler* masm) {
Label done_max_abc, done_max_aab, done_max_aba;
#define FLOAT_MIN_MAX(fminmax, res, x, y, done, ool, res_field) \
- __ ldc1(x, MemOperand(a0, offsetof(Inputs, src1_))); \
- __ ldc1(y, MemOperand(a0, offsetof(Inputs, src2_))); \
+ __ Ldc1(x, MemOperand(a0, offsetof(Inputs, src1_))); \
+ __ Ldc1(y, MemOperand(a0, offsetof(Inputs, src2_))); \
__ fminmax(res, x, y, &ool); \
__ bind(&done); \
- __ sdc1(a, MemOperand(a1, offsetof(Results, res_field)))
+ __ Sdc1(a, MemOperand(a1, offsetof(Results, res_field)))
// a = min(b, c);
FLOAT_MIN_MAX(Float64Min, a, b, c, done_min_abc, ool_min_abc, min_abc_);
diff --git a/deps/v8/test/cctest/test-mementos.cc b/deps/v8/test/cctest/test-mementos.cc
index 5c9ac2e327..af480877e2 100644
--- a/deps/v8/test/cctest/test-mementos.cc
+++ b/deps/v8/test/cctest/test-mementos.cc
@@ -46,7 +46,7 @@ static void SetUpNewSpaceWithPoisonedMementoAtTop() {
NewSpace* new_space = heap->new_space();
// Make sure we can allocate some objects without causing a GC later.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// Allocate a string, the GC may suspect a memento behind the string.
Handle<SeqOneByteString> string =
@@ -57,7 +57,8 @@ static void SetUpNewSpaceWithPoisonedMementoAtTop() {
// site pointer.
AllocationMemento* memento =
reinterpret_cast<AllocationMemento*>(new_space->top() + kHeapObjectTag);
- memento->set_map_no_write_barrier(heap->allocation_memento_map());
+ memento->set_map_after_allocation(heap->allocation_memento_map(),
+ SKIP_WRITE_BARRIER);
memento->set_allocation_site(
reinterpret_cast<AllocationSite*>(kHeapObjectTag), SKIP_WRITE_BARRIER);
}
diff --git a/deps/v8/test/cctest/test-modules.cc b/deps/v8/test/cctest/test-modules.cc
index 9e8bf100d5..f63a04d011 100644
--- a/deps/v8/test/cctest/test-modules.cc
+++ b/deps/v8/test/cctest/test-modules.cc
@@ -27,9 +27,11 @@ ScriptOrigin ModuleOrigin(Local<v8::Value> resource_name, Isolate* isolate) {
return origin;
}
-MaybeLocal<Module> AlwaysEmptyResolveCallback(Local<Context> context,
- Local<String> specifier,
- Local<Module> referrer) {
+MaybeLocal<Module> FailAlwaysResolveCallback(Local<Context> context,
+ Local<String> specifier,
+ Local<Module> referrer) {
+ Isolate* isolate = context->GetIsolate();
+ isolate->ThrowException(v8_str("boom"));
return MaybeLocal<Module>();
}
@@ -37,18 +39,22 @@ static int g_count = 0;
MaybeLocal<Module> FailOnSecondCallResolveCallback(Local<Context> context,
Local<String> specifier,
Local<Module> referrer) {
- if (g_count++ > 0) return MaybeLocal<Module>();
+ Isolate* isolate = CcTest::isolate();
+ if (g_count++ > 0) {
+ isolate->ThrowException(v8_str("booom"));
+ return MaybeLocal<Module>();
+ }
Local<String> source_text = v8_str("");
- ScriptOrigin origin = ModuleOrigin(v8_str("module.js"), CcTest::isolate());
+ ScriptOrigin origin = ModuleOrigin(v8_str("module.js"), isolate);
ScriptCompiler::Source source(source_text, origin);
- return ScriptCompiler::CompileModule(CcTest::isolate(), &source)
- .ToLocalChecked();
+ return ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
}
TEST(ModuleInstantiationFailures) {
Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
LocalContext env;
+ v8::TryCatch try_catch(isolate);
Local<String> source_text = v8_str(
"import './foo.js';"
@@ -62,14 +68,26 @@ TEST(ModuleInstantiationFailures) {
CHECK(v8_str("./bar.js")->StrictEquals(module->GetModuleRequest(1)));
// Instantiation should fail.
- CHECK(!module->Instantiate(env.local(), AlwaysEmptyResolveCallback));
+ {
+ v8::TryCatch inner_try_catch(isolate);
+ CHECK(!module->Instantiate(env.local(), FailAlwaysResolveCallback));
+ CHECK(inner_try_catch.HasCaught());
+ CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("boom")));
+ }
// Start over again...
module = ScriptCompiler::CompileModule(isolate, &source).ToLocalChecked();
// Instantiation should fail if a sub-module fails to resolve.
g_count = 0;
- CHECK(!module->Instantiate(env.local(), FailOnSecondCallResolveCallback));
+ {
+ v8::TryCatch inner_try_catch(isolate);
+ CHECK(!module->Instantiate(env.local(), FailOnSecondCallResolveCallback));
+ CHECK(inner_try_catch.HasCaught());
+ CHECK(inner_try_catch.Exception()->StrictEquals(v8_str("booom")));
+ }
+
+ CHECK(!try_catch.HasCaught());
}
static MaybeLocal<Module> CompileSpecifierAsModuleResolveCallback(
@@ -84,6 +102,7 @@ TEST(ModuleEvaluation) {
Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
LocalContext env;
+ v8::TryCatch try_catch(isolate);
Local<String> source_text = v8_str(
"import 'Object.expando = 5';"
@@ -96,12 +115,15 @@ TEST(ModuleEvaluation) {
CompileSpecifierAsModuleResolveCallback));
CHECK(!module->Evaluate(env.local()).IsEmpty());
ExpectInt32("Object.expando", 10);
+
+ CHECK(!try_catch.HasCaught());
}
TEST(ModuleEvaluationCompletion1) {
Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
LocalContext env;
+ v8::TryCatch try_catch(isolate);
const char* sources[] = {
"",
@@ -133,12 +155,15 @@ TEST(ModuleEvaluationCompletion1) {
CompileSpecifierAsModuleResolveCallback));
CHECK(module->Evaluate(env.local()).ToLocalChecked()->IsUndefined());
}
+
+ CHECK(!try_catch.HasCaught());
}
TEST(ModuleEvaluationCompletion2) {
Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
LocalContext env;
+ v8::TryCatch try_catch(isolate);
const char* sources[] = {
"'gaga'; ",
@@ -171,6 +196,8 @@ TEST(ModuleEvaluationCompletion2) {
.ToLocalChecked()
->StrictEquals(v8_str("gaga")));
}
+
+ CHECK(!try_catch.HasCaught());
}
} // anonymous namespace
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 196c154bae..c2ee2b2452 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -78,7 +78,7 @@ TEST(ScanKeywords) {
{
auto stream = i::ScannerStream::ForTesting(keyword, length);
i::Scanner scanner(&unicode_cache);
- scanner.Initialize(stream.get());
+ scanner.Initialize(stream.get(), false);
CHECK_EQ(key_token.token, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
}
@@ -86,7 +86,7 @@ TEST(ScanKeywords) {
{
auto stream = i::ScannerStream::ForTesting(keyword, length - 1);
i::Scanner scanner(&unicode_cache);
- scanner.Initialize(stream.get());
+ scanner.Initialize(stream.get(), false);
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
}
@@ -97,7 +97,7 @@ TEST(ScanKeywords) {
buffer[length] = chars_to_append[j];
auto stream = i::ScannerStream::ForTesting(buffer, length + 1);
i::Scanner scanner(&unicode_cache);
- scanner.Initialize(stream.get());
+ scanner.Initialize(stream.get(), false);
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
}
@@ -107,7 +107,7 @@ TEST(ScanKeywords) {
buffer[length - 1] = '_';
auto stream = i::ScannerStream::ForTesting(buffer, length);
i::Scanner scanner(&unicode_cache);
- scanner.Initialize(stream.get());
+ scanner.Initialize(stream.get(), false);
CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
CHECK_EQ(i::Token::EOS, scanner.Next());
}
@@ -173,7 +173,7 @@ TEST(ScanHTMLEndComments) {
const char* source = tests[i];
auto stream = i::ScannerStream::ForTesting(source);
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
- scanner.Initialize(stream.get());
+ scanner.Initialize(stream.get(), false);
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
&zone, CcTest::i_isolate()->ast_string_constants(),
@@ -192,7 +192,7 @@ TEST(ScanHTMLEndComments) {
const char* source = fail_tests[i];
auto stream = i::ScannerStream::ForTesting(source);
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
- scanner.Initialize(stream.get());
+ scanner.Initialize(stream.get(), false);
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
&zone, CcTest::i_isolate()->ast_string_constants(),
@@ -209,6 +209,28 @@ TEST(ScanHTMLEndComments) {
}
}
+TEST(ScanHtmlComments) {
+ const char* src = "a <!-- b --> c";
+ i::UnicodeCache unicode_cache;
+
+ // Disallow HTML comments.
+ {
+ auto stream = i::ScannerStream::ForTesting(src);
+ i::Scanner scanner(&unicode_cache);
+ scanner.Initialize(stream.get(), true);
+ CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
+ CHECK_EQ(i::Token::ILLEGAL, scanner.Next());
+ }
+
+ // Skip HTML comments:
+ {
+ auto stream = i::ScannerStream::ForTesting(src);
+ i::Scanner scanner(&unicode_cache);
+ scanner.Initialize(stream.get(), false);
+ CHECK_EQ(i::Token::IDENTIFIER, scanner.Next());
+ CHECK_EQ(i::Token::EOS, scanner.Next());
+ }
+}
class ScriptResource : public v8::String::ExternalOneByteStringResource {
public:
@@ -365,7 +387,7 @@ TEST(StandAlonePreParser) {
for (int i = 0; programs[i]; i++) {
auto stream = i::ScannerStream::ForTesting(programs[i]);
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
- scanner.Initialize(stream.get());
+ scanner.Initialize(stream.get(), false);
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
@@ -401,7 +423,7 @@ TEST(StandAlonePreParserNoNatives) {
for (int i = 0; programs[i]; i++) {
auto stream = i::ScannerStream::ForTesting(programs[i]);
i::Scanner scanner(isolate->unicode_cache());
- scanner.Initialize(stream.get());
+ scanner.Initialize(stream.get(), false);
// Preparser defaults to disallowing natives syntax.
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
@@ -471,7 +493,7 @@ TEST(RegressChromium62639) {
auto stream = i::ScannerStream::ForTesting(program);
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
- scanner.Initialize(stream.get());
+ scanner.Initialize(stream.get(), false);
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
&zone, CcTest::i_isolate()->ast_string_constants(),
@@ -548,7 +570,7 @@ TEST(PreParseOverflow) {
auto stream = i::ScannerStream::ForTesting(program.get(), kProgramSize);
i::Scanner scanner(isolate->unicode_cache());
- scanner.Initialize(stream.get());
+ scanner.Initialize(stream.get(), false);
i::Zone zone(CcTest::i_isolate()->allocator(), ZONE_NAME);
i::AstValueFactory ast_value_factory(
@@ -568,7 +590,7 @@ void TestStreamScanner(i::Utf16CharacterStream* stream,
int skip_pos = 0, // Zero means not skipping.
int skip_to = 0) {
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
- scanner.Initialize(stream);
+ scanner.Initialize(stream, false);
int i = 0;
do {
@@ -646,7 +668,7 @@ void TestScanRegExp(const char* re_source, const char* expected) {
auto stream = i::ScannerStream::ForTesting(re_source);
i::HandleScope scope(CcTest::i_isolate());
i::Scanner scanner(CcTest::i_isolate()->unicode_cache());
- scanner.Initialize(stream.get());
+ scanner.Initialize(stream.get(), false);
i::Token::Value start = scanner.peek();
CHECK(start == i::Token::DIV || start == i::Token::ASSIGN_DIV);
@@ -845,8 +867,7 @@ TEST(ScopeUsesArgumentsSuperThis) {
}
}
-
-static void CheckParsesToNumber(const char* source, bool with_dot) {
+static void CheckParsesToNumber(const char* source) {
v8::V8::Initialize();
HandleAndZoneScope handles;
@@ -877,40 +898,27 @@ static void CheckParsesToNumber(const char* source, bool with_dot) {
CHECK(fun->body()->at(0)->IsReturnStatement());
i::ReturnStatement* ret = fun->body()->at(0)->AsReturnStatement();
i::Literal* lit = ret->expression()->AsLiteral();
- if (lit != NULL) {
- const i::AstValue* val = lit->raw_value();
- CHECK(with_dot == val->ContainsDot());
- } else if (with_dot) {
- i::BinaryOperation* bin = ret->expression()->AsBinaryOperation();
- CHECK(bin != NULL);
- CHECK_EQ(i::Token::MUL, bin->op());
- i::Literal* rlit = bin->right()->AsLiteral();
- const i::AstValue* val = rlit->raw_value();
- CHECK(with_dot == val->ContainsDot());
- CHECK_EQ(1.0, val->AsNumber());
- }
+ CHECK(lit->IsNumberLiteral());
}
TEST(ParseNumbers) {
- CheckParsesToNumber("1.", true);
- CheckParsesToNumber("1.34", true);
- CheckParsesToNumber("134", false);
- CheckParsesToNumber("134e44", false);
- CheckParsesToNumber("134.e44", true);
- CheckParsesToNumber("134.44e44", true);
- CheckParsesToNumber(".44", true);
-
- CheckParsesToNumber("-1.", true);
- CheckParsesToNumber("-1.0", true);
- CheckParsesToNumber("-1.34", true);
- CheckParsesToNumber("-134", false);
- CheckParsesToNumber("-134e44", false);
- CheckParsesToNumber("-134.e44", true);
- CheckParsesToNumber("-134.44e44", true);
- CheckParsesToNumber("-.44", true);
+ CheckParsesToNumber("1.");
+ CheckParsesToNumber("1.34");
+ CheckParsesToNumber("134");
+ CheckParsesToNumber("134e44");
+ CheckParsesToNumber("134.e44");
+ CheckParsesToNumber("134.44e44");
+ CheckParsesToNumber(".44");
- CheckParsesToNumber("+x", true);
+ CheckParsesToNumber("-1.");
+ CheckParsesToNumber("-1.0");
+ CheckParsesToNumber("-1.34");
+ CheckParsesToNumber("-134");
+ CheckParsesToNumber("-134e44");
+ CheckParsesToNumber("-134.e44");
+ CheckParsesToNumber("-134.44e44");
+ CheckParsesToNumber("-.44");
}
@@ -1334,7 +1342,7 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
&pending_error_handler,
isolate->counters()->runtime_call_stats());
SetParserFlags(&preparser, flags);
- scanner.Initialize(stream.get());
+ scanner.Initialize(stream.get(), is_module);
i::PreParser::PreParseResult result = preparser.PreParseProgram(is_module);
CHECK_EQ(i::PreParser::kPreParseSuccess, result);
}
@@ -3521,14 +3529,7 @@ static void TestMaybeAssigned(Input input, const char* variable, bool module,
i::Variable* var;
{
// Find the variable.
- for (auto it = input.location.begin(); it != input.location.end(); ++it) {
- unsigned n = *it;
- scope = scope->inner_scope();
- while (n-- > 0) {
- scope = scope->sibling();
- }
- }
- CHECK_NOT_NULL(scope);
+ scope = i::ScopeTestHelper::FindScope(scope, input.location);
const i::AstRawString* var_name =
info->ast_value_factory()->GetOneByteString(variable);
var = scope->Lookup(var_name);
@@ -4240,6 +4241,7 @@ TEST(ErrorsArrowFunctions) {
"(c, a.b) => {}",
"(a['b'], c) => {}",
"(c, a['b']) => {}",
+ "(...a = b) => b",
// crbug.com/582626
"(...rest - a) => b",
@@ -8967,6 +8969,10 @@ TEST(AsyncAwaitErrors) {
// v8:5148 assert that errors are still thrown for calls that may have been
// async functions
"async({ foo33 = 1 })",
+
+ "async(...a = b) => b",
+ "async(...a,) => b",
+ "async(...a, b) => b",
NULL
};
@@ -10161,3 +10167,188 @@ TEST(AsyncGeneratorErrors) {
RunParserSyncTest(context_data, statement_data, kError, NULL, 0, always_flags,
arraysize(always_flags));
}
+
+TEST(LexicalLoopVariable) {
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+ LocalContext env;
+ typedef std::function<void(const i::ParseInfo& info, i::DeclarationScope*)>
+ TestCB;
+ auto TestProgram = [isolate](const char* program, TestCB test) {
+ i::Factory* const factory = isolate->factory();
+ i::Handle<i::String> source =
+ factory->NewStringFromUtf8(i::CStrVector(program)).ToHandleChecked();
+ i::Handle<i::Script> script = factory->NewScript(source);
+ i::ParseInfo info(script);
+
+ info.set_allow_lazy_parsing(false);
+ CHECK(i::parsing::ParseProgram(&info, isolate));
+ CHECK(i::Rewriter::Rewrite(&info, isolate));
+ i::DeclarationScope::Analyze(&info, isolate, i::AnalyzeMode::kRegular);
+ CHECK(info.literal() != NULL);
+
+ i::DeclarationScope* script_scope = info.literal()->scope();
+ CHECK(script_scope->is_script_scope());
+
+ test(info, script_scope);
+ };
+
+ // Check `let` loop variables is a stack local when not captured by an eval
+ // or closure within the area of the loop body.
+ const char* local_bindings[] = {
+ "function loop() {"
+ " for (let loop_var = 0; loop_var < 10; ++loop_var) {"
+ " }"
+ " eval('0');"
+ "}",
+
+ "function loop() {"
+ " for (let loop_var = 0; loop_var < 10; ++loop_var) {"
+ " }"
+ " function foo() {}"
+ " foo();"
+ "}",
+ };
+ for (const char* source : local_bindings) {
+ TestProgram(source, [=](const i::ParseInfo& info, i::DeclarationScope* s) {
+ i::Scope* fn = s->inner_scope();
+ CHECK(fn->is_function_scope());
+
+ i::Scope* loop_block = fn->inner_scope();
+ if (loop_block->is_function_scope()) loop_block = loop_block->sibling();
+ CHECK(loop_block->is_block_scope());
+
+ const i::AstRawString* var_name =
+ info.ast_value_factory()->GetOneByteString("loop_var");
+ i::Variable* loop_var = loop_block->LookupLocal(var_name);
+ CHECK_NOT_NULL(loop_var);
+ CHECK(loop_var->IsStackLocal());
+ CHECK_EQ(loop_block->ContextLocalCount(), 0);
+ CHECK_EQ(loop_block->inner_scope()->ContextLocalCount(), 0);
+ });
+ }
+
+ // Check `let` loop variable is not a stack local, and is duplicated in the
+ // loop body to ensure capturing can work correctly.
+ // In this version of the test, the inner loop block's duplicate `loop_var`
+ // binding is not captured, and is a local.
+ const char* context_bindings1[] = {
+ "function loop() {"
+ " for (let loop_var = eval('0'); loop_var < 10; ++loop_var) {"
+ " }"
+ "}",
+
+ "function loop() {"
+ " for (let loop_var = (() => (loop_var, 0))(); loop_var < 10;"
+ " ++loop_var) {"
+ " }"
+ "}"};
+ for (const char* source : context_bindings1) {
+ TestProgram(source, [=](const i::ParseInfo& info, i::DeclarationScope* s) {
+ i::Scope* fn = s->inner_scope();
+ CHECK(fn->is_function_scope());
+
+ i::Scope* loop_block = fn->inner_scope();
+ CHECK(loop_block->is_block_scope());
+
+ const i::AstRawString* var_name =
+ info.ast_value_factory()->GetOneByteString("loop_var");
+ i::Variable* loop_var = loop_block->LookupLocal(var_name);
+ CHECK_NOT_NULL(loop_var);
+ CHECK(loop_var->IsContextSlot());
+ CHECK_EQ(loop_block->ContextLocalCount(), 1);
+
+ i::Variable* loop_var2 = loop_block->inner_scope()->LookupLocal(var_name);
+ CHECK_NE(loop_var, loop_var2);
+ CHECK(loop_var2->IsStackLocal());
+ CHECK_EQ(loop_block->inner_scope()->ContextLocalCount(), 0);
+ });
+ }
+
+ // Check `let` loop variable is not a stack local, and is duplicated in the
+ // loop body to ensure capturing can work correctly.
+ // In this version of the test, the inner loop block's duplicate `loop_var`
+ // binding is captured, and must be context allocated.
+ const char* context_bindings2[] = {
+ "function loop() {"
+ " for (let loop_var = 0; loop_var < 10; ++loop_var) {"
+ " eval('0');"
+ " }"
+ "}",
+
+ "function loop() {"
+ " for (let loop_var = 0; loop_var < eval('10'); ++loop_var) {"
+ " }"
+ "}",
+
+ "function loop() {"
+ " for (let loop_var = 0; loop_var < 10; eval('++loop_var')) {"
+ " }"
+ "}",
+ };
+
+ for (const char* source : context_bindings2) {
+ TestProgram(source, [=](const i::ParseInfo& info, i::DeclarationScope* s) {
+ i::Scope* fn = s->inner_scope();
+ CHECK(fn->is_function_scope());
+
+ i::Scope* loop_block = fn->inner_scope();
+ CHECK(loop_block->is_block_scope());
+
+ const i::AstRawString* var_name =
+ info.ast_value_factory()->GetOneByteString("loop_var");
+ i::Variable* loop_var = loop_block->LookupLocal(var_name);
+ CHECK_NOT_NULL(loop_var);
+ CHECK(loop_var->IsContextSlot());
+ CHECK_EQ(loop_block->ContextLocalCount(), 1);
+
+ i::Variable* loop_var2 = loop_block->inner_scope()->LookupLocal(var_name);
+ CHECK_NE(loop_var, loop_var2);
+ CHECK(loop_var2->IsContextSlot());
+ CHECK_EQ(loop_block->inner_scope()->ContextLocalCount(), 1);
+ });
+ }
+
+ // Similar to the above, but the first block scope's variables are not
+ // captured due to the closure occurring in a nested scope.
+ const char* context_bindings3[] = {
+ "function loop() {"
+ " for (let loop_var = 0; loop_var < 10; ++loop_var) {"
+ " (() => loop_var)();"
+ " }"
+ "}",
+
+ "function loop() {"
+ " for (let loop_var = 0; loop_var < (() => (loop_var, 10))();"
+ " ++loop_var) {"
+ " }"
+ "}",
+
+ "function loop() {"
+ " for (let loop_var = 0; loop_var < 10; (() => ++loop_var)()) {"
+ " }"
+ "}",
+ };
+
+ for (const char* source : context_bindings3) {
+ TestProgram(source, [=](const i::ParseInfo& info, i::DeclarationScope* s) {
+ i::Scope* fn = s->inner_scope();
+ CHECK(fn->is_function_scope());
+
+ i::Scope* loop_block = fn->inner_scope();
+ CHECK(loop_block->is_block_scope());
+
+ const i::AstRawString* var_name =
+ info.ast_value_factory()->GetOneByteString("loop_var");
+ i::Variable* loop_var = loop_block->LookupLocal(var_name);
+ CHECK_NOT_NULL(loop_var);
+ CHECK(loop_var->IsStackLocal());
+ CHECK_EQ(loop_block->ContextLocalCount(), 0);
+
+ i::Variable* loop_var2 = loop_block->inner_scope()->LookupLocal(var_name);
+ CHECK_NE(loop_var, loop_var2);
+ CHECK(loop_var2->IsContextSlot());
+ CHECK_EQ(loop_block->inner_scope()->ContextLocalCount(), 1);
+ });
+ }
+}
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index c9af4182e5..058dd55d3c 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -1090,7 +1090,7 @@ TEST(CodeSerializerLargeCodeObject) {
Vector<const uint8_t> source =
ConstructSource(STATIC_CHAR_VECTOR("var j=1; if (j == 0) {"),
STATIC_CHAR_VECTOR("for (let i of Object.prototype);"),
- STATIC_CHAR_VECTOR("} j=7; j"), 1050);
+ STATIC_CHAR_VECTOR("} j=7; j"), 1100);
Handle<String> source_str =
isolate->factory()->NewStringFromOneByte(source).ToHandleChecked();
@@ -1191,7 +1191,7 @@ TEST(CodeSerializerLargeCodeObjectWithIncrementalMarking) {
// We should have missed a write barrier. Complete incremental marking
// to flush out the bug.
heap::SimulateIncrementalMarking(heap, true);
- CcTest::CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
Handle<JSFunction> copy_fun =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
@@ -2547,51 +2547,6 @@ TEST(SnapshotCreatorIncludeGlobalProxy) {
delete[] blob.data;
}
-UNINITIALIZED_TEST(ReinitializeStringHashSeedNotRehashable) {
- DisableAlwaysOpt();
- i::FLAG_rehash_snapshot = true;
- i::FLAG_hash_seed = 42;
- i::FLAG_allow_natives_syntax = true;
- v8::StartupData blob;
- {
- v8::SnapshotCreator creator;
- v8::Isolate* isolate = creator.GetIsolate();
- {
- v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- v8::Context::Scope context_scope(context);
- // Create dictionary mode object.
- CompileRun(
- "var a = {};"
- "a.b = 1;"
- "a.c = 2;"
- "delete a.b;");
- ExpectInt32("a.c", 2);
- creator.SetDefaultContext(context);
- }
- blob =
- creator.CreateBlob(v8::SnapshotCreator::FunctionCodeHandling::kClear);
- }
-
- i::FLAG_hash_seed = 1337;
- v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- create_params.snapshot_blob = &blob;
- v8::Isolate* isolate = v8::Isolate::New(create_params);
- {
- // Check that no rehashing has been performed.
- CHECK_EQ(42, reinterpret_cast<i::Isolate*>(isolate)->heap()->HashSeed());
- v8::Isolate::Scope isolate_scope(isolate);
- v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Context> context = v8::Context::New(isolate);
- CHECK(!context.IsEmpty());
- v8::Context::Scope context_scope(context);
- ExpectInt32("a.c", 2);
- }
- isolate->Dispose();
- delete[] blob.data;
-}
-
TEST(SerializationMemoryStats) {
FLAG_profile_deserialization = true;
FLAG_always_opt = false;
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 0829f25e97..615fdeb32e 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -1190,6 +1190,9 @@ class OneByteVectorResource : public v8::String::ExternalOneByteStringResource {
};
TEST(InternalizeExternal) {
+ // TODO(mlippautz): Remove once we add support for forwarding ThinStrings in
+ // minor MC.
+ if (FLAG_minor_mc) return;
FLAG_thin_strings = true;
CcTest::InitializeVM();
i::Isolate* isolate = CcTest::i_isolate();
diff --git a/deps/v8/test/cctest/test-symbols.cc b/deps/v8/test/cctest/test-symbols.cc
index 0d032e96ce..3f184de6a0 100644
--- a/deps/v8/test/cctest/test-symbols.cc
+++ b/deps/v8/test/cctest/test-symbols.cc
@@ -72,7 +72,7 @@ TEST(Create) {
}
CcTest::CollectGarbage(i::NEW_SPACE);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// All symbols should be distinct.
for (int i = 0; i < kNumSymbols; ++i) {
diff --git a/deps/v8/test/cctest/test-types.cc b/deps/v8/test/cctest/test-types.cc
index 3b5f1c4511..7b89c9f693 100644
--- a/deps/v8/test/cctest/test-types.cc
+++ b/deps/v8/test/cctest/test-types.cc
@@ -631,11 +631,12 @@ struct Tests {
CheckSub(T.Object, T.Receiver);
CheckSub(T.Proxy, T.Receiver);
+ CheckSub(T.Array, T.Object);
CheckSub(T.OtherObject, T.Object);
CheckSub(T.OtherUndetectable, T.Object);
- CheckSub(T.OtherObject, T.Object);
CheckUnordered(T.Object, T.Proxy);
+ CheckUnordered(T.Array, T.Undetectable);
CheckUnordered(T.OtherObject, T.Undetectable);
// Subtyping between concrete structural types
@@ -646,7 +647,7 @@ struct Tests {
CheckSub(T.ObjectConstant1, T.Object);
CheckSub(T.ObjectConstant2, T.Object);
CheckSub(T.ArrayConstant, T.Object);
- CheckSub(T.ArrayConstant, T.OtherObject);
+ CheckSub(T.ArrayConstant, T.Array);
CheckSub(T.ArrayConstant, T.Receiver);
CheckSub(T.UninitializedConstant, T.Internal);
CheckUnordered(T.ObjectConstant1, T.ObjectConstant2);
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
index a81fdfccd6..7911412c2d 100644
--- a/deps/v8/test/cctest/test-unboxed-doubles.cc
+++ b/deps/v8/test/cctest/test-unboxed-doubles.cc
@@ -916,6 +916,10 @@ TEST(Regress436816) {
Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
+ // Force a GC to free up space before we allocate objects whose
+ // mid-test states would fail heap verification.
+ CcTest::CollectAllGarbage();
+
const int kPropsCount = kSmiValueSize * 3;
TestPropertyKind props[kPropsCount];
for (int i = 0; i < kPropsCount; i++) {
@@ -951,7 +955,7 @@ TEST(Regress436816) {
CHECK(object->map()->HasFastPointerLayout());
// Trigger GCs and heap verification.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
@@ -1010,7 +1014,7 @@ TEST(DescriptorArrayTrimming) {
// Call GC that should trim both |map|'s descriptor array and layout
// descriptor.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
// The unused tail of the layout descriptor is now "clean" again.
CHECK(map->layout_descriptor()->IsConsistentWithMap(*map, true));
diff --git a/deps/v8/test/cctest/test-weakmaps.cc b/deps/v8/test/cctest/test-weakmaps.cc
index 79edee5577..9c88456eb5 100644
--- a/deps/v8/test/cctest/test-weakmaps.cc
+++ b/deps/v8/test/cctest/test-weakmaps.cc
@@ -197,7 +197,7 @@ TEST(Regress2060a) {
// Force compacting garbage collection.
CHECK(FLAG_always_compact);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
@@ -239,9 +239,9 @@ TEST(Regress2060b) {
// Force compacting garbage collection. The subsequent collections are used
// to verify that key references were actually updated.
CHECK(FLAG_always_compact);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
}
@@ -259,5 +259,5 @@ TEST(Regress399527) {
// The weak map is marked black here but leaving the handle scope will make
// the object unreachable. Aborting incremental marking will clear all the
// marking bits which makes the weak map garbage.
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
diff --git a/deps/v8/test/cctest/test-weaksets.cc b/deps/v8/test/cctest/test-weaksets.cc
index 13e3b65886..f0d3354f4f 100644
--- a/deps/v8/test/cctest/test-weaksets.cc
+++ b/deps/v8/test/cctest/test-weaksets.cc
@@ -196,7 +196,7 @@ TEST(WeakSet_Regress2060a) {
// Force compacting garbage collection.
CHECK(FLAG_always_compact);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
}
@@ -238,7 +238,7 @@ TEST(WeakSet_Regress2060b) {
// Force compacting garbage collection. The subsequent collections are used
// to verify that key references were actually updated.
CHECK(FLAG_always_compact);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
- CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
+ CcTest::CollectAllGarbage();
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
index dff57e8f92..06c787dba1 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-64.cc
@@ -9,12 +9,12 @@
#include "src/assembler-inl.h"
#include "src/base/bits.h"
#include "src/objects-inl.h"
-#include "src/wasm/wasm-macro-gen.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-macro-gen.h"
// If the target architecture is 64-bit, enable all tests.
#if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
@@ -1614,9 +1614,6 @@ static void Run_WasmMixedCall_N(WasmExecutionMode execution_mode, int start) {
// =========================================================================
std::vector<byte> code;
- // Load the offset for the store.
- ADD_CODE(code, WASM_ZERO);
-
// Load the arguments.
for (int i = 0; i < num_params; i++) {
int offset = (i + 1) * kElemSize;
@@ -1626,10 +1623,13 @@ static void Run_WasmMixedCall_N(WasmExecutionMode execution_mode, int start) {
// Call the selector function.
ADD_CODE(code, WASM_CALL_FUNCTION0(t.function_index()));
+ // Store the result in a local.
+ byte local_index = r.AllocateLocal(WasmOpcodes::ValueTypeFor(result));
+ ADD_CODE(code, kExprSetLocal, local_index);
+
// Store the result in memory.
ADD_CODE(code,
- static_cast<byte>(WasmOpcodes::LoadStoreOpcodeOf(result, true)),
- ZERO_ALIGNMENT, ZERO_OFFSET);
+ WASM_STORE_MEM(result, WASM_ZERO, WASM_GET_LOCAL(local_index)));
// Return the expected value.
ADD_CODE(code, WASM_I32V_2(kExpected));
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc b/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
index 4f7c9210f9..cf022ef91d 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-asmjs.cc
@@ -9,12 +9,11 @@
#include "src/assembler-inl.h"
#include "src/base/platform/elapsed-timer.h"
-#include "src/wasm/wasm-macro-gen.h"
-
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-macro-gen.h"
using namespace v8::base;
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
index ccc0bb5865..66fa2c48d0 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
@@ -10,11 +10,11 @@
#include "src/assembler-inl.h"
#include "src/wasm/wasm-interpreter.h"
-#include "src/wasm/wasm-macro-gen.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-macro-gen.h"
using namespace v8::base;
using namespace v8::internal;
@@ -325,19 +325,11 @@ TEST(GrowMemoryPreservesData) {
}
TEST(GrowMemoryInvalidSize) {
- {
- // Grow memory by an invalid amount without initial memory.
- WasmRunner<int32_t, uint32_t> r(kExecuteInterpreted);
- BUILD(r, WASM_GROW_MEMORY(WASM_GET_LOCAL(0)));
- CHECK_EQ(-1, r.Call(1048575));
- }
- {
- // Grow memory by an invalid amount without initial memory.
- WasmRunner<int32_t, uint32_t> r(kExecuteInterpreted);
- r.module().AddMemory(WasmModule::kPageSize);
- BUILD(r, WASM_GROW_MEMORY(WASM_GET_LOCAL(0)));
- CHECK_EQ(-1, r.Call(1048575));
- }
+ // Grow memory by an invalid amount without initial memory.
+ WasmRunner<int32_t, uint32_t> r(kExecuteInterpreted);
+ r.module().AddMemory(WasmModule::kPageSize);
+ BUILD(r, WASM_GROW_MEMORY(WASM_GET_LOCAL(0)));
+ CHECK_EQ(-1, r.Call(1048575));
}
TEST(TestPossibleNondeterminism) {
@@ -431,6 +423,7 @@ TEST(WasmInterpreterActivations) {
TEST(InterpreterLoadWithoutMemory) {
WasmRunner<int32_t, int32_t> r(kExecuteInterpreted);
+ r.module().AddMemory(0);
BUILD(r, WASM_LOAD_MEM(MachineType::Int32(), WASM_GET_LOCAL(0)));
CHECK_TRAP32(r.Call(0));
}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
index add2b02fd4..1ae2afb4c7 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
@@ -8,11 +8,11 @@
#include <string.h>
#include "src/assembler-inl.h"
-#include "src/wasm/wasm-macro-gen.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-macro-gen.h"
using namespace v8::base;
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
index b023afb0b6..fac0e09f66 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-module.cc
@@ -9,7 +9,6 @@
#include "src/snapshot/code-serializer.h"
#include "src/version.h"
#include "src/wasm/module-decoder.h"
-#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
@@ -17,6 +16,7 @@
#include "test/cctest/cctest.h"
#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-macro-gen.h"
#include "test/common/wasm/wasm-module-runner.h"
using namespace v8::base;
@@ -62,7 +62,9 @@ void TestModuleException(Zone* zone, WasmModuleBuilder* builder) {
isolate->clear_pending_exception();
}
-void ExportAsMain(WasmFunctionBuilder* f) { f->ExportAs(CStrVector("main")); }
+void ExportAsMain(WasmFunctionBuilder* f) {
+ f->builder()->AddExport(CStrVector("main"), f);
+}
#define EMIT_CODE_WITH_END(f, code) \
do { \
@@ -226,7 +228,7 @@ class WasmSerializationTest {
WasmFunctionBuilder* f = builder->AddFunction(sigs.i_i());
byte code[] = {WASM_GET_LOCAL(0), kExprI32Const, 1, kExprI32Add};
EMIT_CODE_WITH_END(f, code);
- f->ExportAs(CStrVector(kFunctionName));
+ builder->AddExport(CStrVector(kFunctionName), f);
builder->WriteTo(*buffer);
}
@@ -1098,39 +1100,38 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
ModuleOrigin::kWasmOrigin);
CHECK(!instance.is_null());
Handle<JSArrayBuffer> memory(instance->memory_buffer(), isolate);
+ void* const old_allocation_base = memory->allocation_base();
+ size_t const old_allocation_length = memory->allocation_length();
// Fake the Embedder flow by creating a memory object, externalize and grow.
Handle<WasmMemoryObject> mem_obj =
WasmMemoryObject::New(isolate, memory, 100);
- // TODO(eholk): Skipping calls to externalize when guard pages are enabled
- // for now. This will have to be dealt with when turning on guard pages as
- // currently gin assumes that it can take ownership of the ArrayBuffer.
- // Potential for crashes as this might lead to externalizing an already
- // externalized buffer.
- if (!memory->has_guard_region()) v8::Utils::ToLocal(memory)->Externalize();
- void* backing_store = memory->backing_store();
- uint64_t byte_length = NumberToSize(memory->byte_length());
+ v8::Utils::ToLocal(memory)->Externalize();
+
uint32_t result = WasmMemoryObject::Grow(isolate, mem_obj, 4);
- wasm::DetachWebAssemblyMemoryBuffer(isolate, memory, true);
+ const bool free_memory = true;
+ wasm::DetachWebAssemblyMemoryBuffer(isolate, memory, free_memory);
CHECK_EQ(16, result);
- if (!memory->has_guard_region()) {
- isolate->array_buffer_allocator()->Free(backing_store, byte_length);
- }
memory = handle(mem_obj->buffer());
- byte_length = NumberToSize(memory->byte_length());
instance->set_memory_buffer(*memory);
// Externalize should make no difference without the JS API as in this case
// the buffer is not detached.
- if (!memory->has_guard_region()) v8::Utils::ToLocal(memory)->Externalize();
+ v8::Utils::ToLocal(memory)->Externalize();
result = testing::RunWasmModuleForTesting(isolate, instance, 0, nullptr,
ModuleOrigin::kWasmOrigin);
CHECK_EQ(kExpectedValue, result);
// Free the buffer as the tracker does not know about it.
- if (!memory->has_guard_region()) {
- isolate->array_buffer_allocator()->Free(
- memory->backing_store(), NumberToSize(memory->byte_length()));
- }
+ const v8::ArrayBuffer::Allocator::AllocationMode allocation_mode =
+ memory->allocation_mode();
+ CHECK_NOT_NULL(memory->allocation_base());
+ isolate->array_buffer_allocator()->Free(memory->allocation_base(),
+ memory->allocation_length(),
+ allocation_mode);
+ isolate->array_buffer_allocator()->Free(
+ old_allocation_base, old_allocation_length, allocation_mode);
+ memory->set_allocation_base(nullptr);
+ memory->set_allocation_length(0);
}
Cleanup();
}
@@ -1142,7 +1143,8 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMemMemSize) {
void* backing_store =
isolate->array_buffer_allocator()->Allocate(16 * WasmModule::kPageSize);
Handle<JSArrayBuffer> buffer = wasm::SetupArrayBuffer(
- isolate, backing_store, 16 * WasmModule::kPageSize, false, false);
+ isolate, backing_store, 16 * WasmModule::kPageSize, backing_store,
+ 16 * WasmModule::kPageSize, false, false);
Handle<WasmMemoryObject> mem_obj =
WasmMemoryObject::New(isolate, buffer, 100);
v8::Utils::ToLocal(buffer)->Externalize();
@@ -1155,3 +1157,22 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMemMemSize) {
}
Cleanup();
}
+
+TEST(Run_WasmModule_Buffer_Externalized_Detach) {
+ {
+ // Regression test for
+ // https://bugs.chromium.org/p/chromium/issues/detail?id=731046
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ HandleScope scope(isolate);
+ void* backing_store =
+ isolate->array_buffer_allocator()->Allocate(16 * WasmModule::kPageSize);
+ Handle<JSArrayBuffer> buffer = wasm::SetupArrayBuffer(
+ isolate, backing_store, 16 * WasmModule::kPageSize, backing_store,
+ 16 * WasmModule::kPageSize, false, false);
+ v8::Utils::ToLocal(buffer)->Externalize();
+ wasm::DetachWebAssemblyMemoryBuffer(isolate, buffer, true);
+ isolate->array_buffer_allocator()->Free(backing_store,
+ 16 * WasmModule::kPageSize);
+ }
+ Cleanup();
+}
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc b/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc
index a0d8a07189..d10c88a584 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-relocation.cc
@@ -10,6 +10,7 @@
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/c-signature.h"
#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/wasm-macro-gen.h"
using namespace v8::internal;
using namespace v8::internal::compiler;
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index b69eefce5e..4a47248cea 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -3,10 +3,10 @@
// found in the LICENSE file.
#include "src/assembler-inl.h"
-#include "src/wasm/wasm-macro-gen.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/wasm-macro-gen.h"
using namespace v8::base;
using namespace v8::internal;
@@ -75,7 +75,8 @@ T Maximum(T a, T b) {
}
// For float operands, Min and Max must return NaN if either operand is NaN.
-#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
+#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64
template <>
float Minimum(float a, float b) {
if (std::isnan(a) || std::isnan(b))
@@ -89,7 +90,8 @@ float Maximum(float a, float b) {
return std::numeric_limits<float>::quiet_NaN();
return a >= b ? a : b;
}
-#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
+#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64
template <typename T>
T UnsignedMinimum(T a, T b) {
@@ -264,16 +266,6 @@ T RecipSqrt(T a) {
return 1.0f / std::sqrt(a);
}
-template <typename T>
-T RecipRefine(T a, T b) {
- return 2.0f - a * b;
-}
-
-template <typename T>
-T RecipSqrtRefine(T a, T b) {
- return (3.0f - a * b) * 0.5f;
-}
-
} // namespace
#define WASM_SIMD_CHECK_LANE(TYPE, value, LANE_TYPE, lane_value, lane_index) \
@@ -366,6 +358,8 @@ T RecipSqrtRefine(T a, T b) {
#define WASM_SIMD_UNOP(op, x) x, WASM_SIMD_OP(op)
#define WASM_SIMD_BINOP(op, x, y) x, y, WASM_SIMD_OP(op)
#define WASM_SIMD_SHIFT_OP(op, shift, x) x, WASM_SIMD_OP(op), TO_BYTE(shift)
+#define WASM_SIMD_CONCAT_OP(op, bytes, x, y) \
+ x, y, WASM_SIMD_OP(op), TO_BYTE(bytes)
#define WASM_SIMD_SELECT(format, x, y, z) \
x, y, z, WASM_SIMD_OP(kExprS##format##Select)
// Since boolean vectors can't be checked directly, materialize them into
@@ -399,6 +393,20 @@ T RecipSqrtRefine(T a, T b) {
#define WASM_SIMD_I8x16_REPLACE_LANE(lane, x, y) \
x, y, WASM_SIMD_OP(kExprI8x16ReplaceLane), TO_BYTE(lane)
+#define WASM_SIMD_S32x4_SHUFFLE_OP(opcode, m, x, y) \
+ x, y, WASM_SIMD_OP(opcode), TO_BYTE(m[0]), TO_BYTE(m[1]), TO_BYTE(m[2]), \
+ TO_BYTE(m[3])
+#define WASM_SIMD_S16x8_SHUFFLE_OP(opcode, m, x, y) \
+ x, y, WASM_SIMD_OP(opcode), TO_BYTE(m[0]), TO_BYTE(m[1]), TO_BYTE(m[2]), \
+ TO_BYTE(m[3]), TO_BYTE(m[4]), TO_BYTE(m[5]), TO_BYTE(m[6]), \
+ TO_BYTE(m[7])
+#define WASM_SIMD_S8x16_SHUFFLE_OP(opcode, m, x, y) \
+ x, y, WASM_SIMD_OP(opcode), TO_BYTE(m[0]), TO_BYTE(m[1]), TO_BYTE(m[2]), \
+ TO_BYTE(m[3]), TO_BYTE(m[4]), TO_BYTE(m[5]), TO_BYTE(m[6]), \
+ TO_BYTE(m[7]), TO_BYTE(m[8]), TO_BYTE(m[9]), TO_BYTE(m[10]), \
+ TO_BYTE(m[11]), TO_BYTE(m[12]), TO_BYTE(m[13]), TO_BYTE(m[14]), \
+ TO_BYTE(m[15])
+
// Skip FP tests involving extremely large or extremely small values, which
// may fail due to non-IEEE-754 SIMD arithmetic on some platforms.
bool SkipFPValue(float x) {
@@ -485,7 +493,8 @@ WASM_EXEC_COMPILED_TEST(F32x4ConvertI32x4) {
#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
+#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64
void RunF32x4UnOpTest(WasmOpcode simd_op, FloatUnOp expected_op,
float error = 0.0f) {
FLAG_wasm_simd_prototype = true;
@@ -510,13 +519,10 @@ void RunF32x4UnOpTest(WasmOpcode simd_op, FloatUnOp expected_op,
WASM_EXEC_COMPILED_TEST(F32x4Abs) { RunF32x4UnOpTest(kExprF32x4Abs, std::abs); }
WASM_EXEC_COMPILED_TEST(F32x4Neg) { RunF32x4UnOpTest(kExprF32x4Neg, Negate); }
-#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
-
-#if SIMD_LOWERING_TARGET
-WASM_EXEC_COMPILED_TEST(F32x4Sqrt) { RunF32x4UnOpTest(kExprF32x4Sqrt, Sqrt); }
-#endif // SIMD_LOWERING_TARGET
+#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
static const float kApproxError = 0.01f;
WASM_EXEC_COMPILED_TEST(F32x4RecipApprox) {
@@ -526,9 +532,10 @@ WASM_EXEC_COMPILED_TEST(F32x4RecipApprox) {
WASM_EXEC_COMPILED_TEST(F32x4RecipSqrtApprox) {
RunF32x4UnOpTest(kExprF32x4RecipSqrtApprox, RecipSqrt, kApproxError);
}
-#endif // V8_TARGET_ARCH_ARM
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
+#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64
void RunF32x4BinOpTest(WasmOpcode simd_op, FloatBinOp expected_op) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, float, float, float> r(kExecuteCompiled);
@@ -563,23 +570,11 @@ WASM_EXEC_COMPILED_TEST(F32x4_Min) {
WASM_EXEC_COMPILED_TEST(F32x4_Max) {
RunF32x4BinOpTest(kExprF32x4Max, Maximum);
}
-#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
-
-#if SIMD_LOWERING_TARGET
-WASM_EXEC_COMPILED_TEST(F32x4Div) { RunF32x4BinOpTest(kExprF32x4Div, Div); }
-#endif // SIMD_LOWERING_TARGET
-
-#if V8_TARGET_ARCH_ARM
-WASM_EXEC_COMPILED_TEST(F32x4RecipRefine) {
- RunF32x4BinOpTest(kExprF32x4RecipRefine, RecipRefine);
-}
-
-WASM_EXEC_COMPILED_TEST(F32x4RecipSqrtRefine) {
- RunF32x4BinOpTest(kExprF32x4RecipSqrtRefine, RecipSqrtRefine);
-}
-#endif // V8_TARGET_ARCH_ARM
+#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
+#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64
void RunF32x4CompareOpTest(WasmOpcode simd_op, FloatCompareOp expected_op) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, float, float, int32_t> r(kExecuteCompiled);
@@ -626,7 +621,8 @@ WASM_EXEC_COMPILED_TEST(F32x4Lt) { RunF32x4CompareOpTest(kExprF32x4Lt, Less); }
WASM_EXEC_COMPILED_TEST(F32x4Le) {
RunF32x4CompareOpTest(kExprF32x4Le, LessEqual);
}
-#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
+#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64
WASM_EXEC_COMPILED_TEST(I32x4Splat) {
FLAG_wasm_simd_prototype = true;
@@ -678,8 +674,8 @@ WASM_EXEC_COMPILED_TEST(I32x4ReplaceLane) {
CHECK_EQ(1, r.Call(1, 2));
}
-#if V8_TARGET_ARCH_ARM
-
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET || \
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
WASM_EXEC_COMPILED_TEST(I16x8Splat) {
FLAG_wasm_simd_prototype = true;
@@ -742,7 +738,11 @@ WASM_EXEC_COMPILED_TEST(I16x8ReplaceLane) {
CHECK_EQ(1, r.Call(1, 2));
}
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET ||
+ // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64 || SIMD_LOWERING_TARGET
WASM_EXEC_COMPILED_TEST(I8x16Splat) {
FLAG_wasm_simd_prototype = true;
@@ -860,9 +860,11 @@ WASM_EXEC_COMPILED_TEST(I8x16ReplaceLane) {
CHECK_EQ(1, r.Call(1, 2));
}
-#endif // V8_TARGET_ARCH_ARM
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64 || SIMD_LOWERING_TARGET
-#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
+#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64
// Determines if conversion from float to int will be valid.
bool CanRoundToZeroAndConvert(double val, bool unsigned_integer) {
const double max_uint = static_cast<double>(0xffffffffu);
@@ -928,6 +930,8 @@ WASM_EXEC_COMPILED_TEST(I32x4ConvertF32x4) {
CHECK_EQ(1, r.Call(*i, signed_value, unsigned_value));
}
}
+#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM
// Tests both signed and unsigned conversion from I16x8 (unpacking).
@@ -956,6 +960,8 @@ WASM_EXEC_COMPILED_TEST(I32x4ConvertI16x8) {
}
#endif // V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64
void RunI32x4UnOpTest(WasmOpcode simd_op, Int32UnOp expected_op) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled);
@@ -970,7 +976,10 @@ void RunI32x4UnOpTest(WasmOpcode simd_op, Int32UnOp expected_op) {
}
WASM_EXEC_COMPILED_TEST(I32x4Neg) { RunI32x4UnOpTest(kExprI32x4Neg, Negate); }
+#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64
+#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
WASM_EXEC_COMPILED_TEST(S128Not) { RunI32x4UnOpTest(kExprS128Not, Not); }
#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
@@ -1003,13 +1012,13 @@ WASM_EXEC_COMPILED_TEST(I32x4Mul) { RunI32x4BinOpTest(kExprI32x4Mul, Mul); }
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET ||
// V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
+#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_X64
WASM_EXEC_COMPILED_TEST(S128And) { RunI32x4BinOpTest(kExprS128And, And); }
WASM_EXEC_COMPILED_TEST(S128Or) { RunI32x4BinOpTest(kExprS128Or, Or); }
WASM_EXEC_COMPILED_TEST(S128Xor) { RunI32x4BinOpTest(kExprS128Xor, Xor); }
-#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
+#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_X64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET || \
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
@@ -1058,7 +1067,8 @@ WASM_EXEC_COMPILED_TEST(I32x4Ne) {
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET ||
// V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
+#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64
WASM_EXEC_COMPILED_TEST(I32x4LtS) {
RunI32x4CompareOpTest(kExprI32x4LtS, Less);
}
@@ -1090,7 +1100,8 @@ WASM_EXEC_COMPILED_TEST(I32x4GtU) {
WASM_EXEC_COMPILED_TEST(I32x4GeU) {
RunI32x4CompareOpTest(kExprI32x4GeU, UnsignedGreaterEqual);
}
-#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
+#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET || \
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
@@ -1148,7 +1159,10 @@ WASM_EXEC_COMPILED_TEST(I16x8ConvertI8x16) {
CHECK_EQ(1, r.Call(*i, unpacked_signed, unpacked_unsigned));
}
}
+#endif // V8_TARGET_ARCH_ARM
+#if SIMD_LOWERING_TARGET || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64
void RunI16x8UnOpTest(WasmOpcode simd_op, Int16UnOp expected_op) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled);
@@ -1163,7 +1177,10 @@ void RunI16x8UnOpTest(WasmOpcode simd_op, Int16UnOp expected_op) {
}
WASM_EXEC_COMPILED_TEST(I16x8Neg) { RunI16x8UnOpTest(kExprI16x8Neg, Negate); }
+#endif // SIMD_LOWERING_TARGET || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64
+#if V8_TARGET_ARCH_ARM
// Tests both signed and unsigned conversion from I32x4 (packing).
WASM_EXEC_COMPILED_TEST(I16x8ConvertI32x4) {
FLAG_wasm_simd_prototype = true;
@@ -1192,7 +1209,10 @@ WASM_EXEC_COMPILED_TEST(I16x8ConvertI32x4) {
CHECK_EQ(1, r.Call(*i, packed_signed, packed_unsigned));
}
}
+#endif // V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET || \
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
void RunI16x8BinOpTest(WasmOpcode simd_op, Int16BinOp expected_op) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteCompiled);
@@ -1223,7 +1243,11 @@ WASM_EXEC_COMPILED_TEST(I16x8Sub) { RunI16x8BinOpTest(kExprI16x8Sub, Sub); }
WASM_EXEC_COMPILED_TEST(I16x8SubSaturateS) {
RunI16x8BinOpTest(kExprI16x8SubSaturateS, SubSaturate);
}
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET ||
+ // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET || \
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
WASM_EXEC_COMPILED_TEST(I16x8Mul) { RunI16x8BinOpTest(kExprI16x8Mul, Mul); }
WASM_EXEC_COMPILED_TEST(I16x8MinS) {
@@ -1276,7 +1300,11 @@ WASM_EXEC_COMPILED_TEST(I16x8Eq) { RunI16x8CompareOpTest(kExprI16x8Eq, Equal); }
WASM_EXEC_COMPILED_TEST(I16x8Ne) {
RunI16x8CompareOpTest(kExprI16x8Ne, NotEqual);
}
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET ||
+ // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64
WASM_EXEC_COMPILED_TEST(I16x8LtS) {
RunI16x8CompareOpTest(kExprI16x8LtS, Less);
}
@@ -1308,7 +1336,11 @@ WASM_EXEC_COMPILED_TEST(I16x8LtU) {
WASM_EXEC_COMPILED_TEST(I16x8LeU) {
RunI16x8CompareOpTest(kExprI16x8LeU, UnsignedLessEqual);
}
+#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET || \
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
void RunI16x8ShiftOpTest(WasmOpcode simd_op, Int16ShiftOp expected_op,
int shift) {
FLAG_wasm_simd_prototype = true;
@@ -1335,7 +1367,11 @@ WASM_EXEC_COMPILED_TEST(I16x8ShrS) {
WASM_EXEC_COMPILED_TEST(I16x8ShrU) {
RunI16x8ShiftOpTest(kExprI16x8ShrU, LogicalShiftRight, 1);
}
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET ||
+ // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || \
+ SIMD_LOWERING_TARGET
void RunI8x16UnOpTest(WasmOpcode simd_op, Int8UnOp expected_op) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, int32_t, int32_t> r(kExecuteCompiled);
@@ -1350,7 +1386,10 @@ void RunI8x16UnOpTest(WasmOpcode simd_op, Int8UnOp expected_op) {
}
WASM_EXEC_COMPILED_TEST(I8x16Neg) { RunI8x16UnOpTest(kExprI8x16Neg, Negate); }
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 ||
+ // SIMD_LOWERING_TARGET
+#if V8_TARGET_ARCH_ARM
// Tests both signed and unsigned conversion from I16x8 (packing).
WASM_EXEC_COMPILED_TEST(I8x16ConvertI16x8) {
FLAG_wasm_simd_prototype = true;
@@ -1379,7 +1418,9 @@ WASM_EXEC_COMPILED_TEST(I8x16ConvertI16x8) {
CHECK_EQ(1, r.Call(*i, packed_signed, packed_unsigned));
}
}
+#endif // V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET
void RunI8x16BinOpTest(WasmOpcode simd_op, Int8BinOp expected_op) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, int32_t, int32_t, int32_t> r(kExecuteCompiled);
@@ -1411,8 +1452,6 @@ WASM_EXEC_COMPILED_TEST(I8x16SubSaturateS) {
RunI8x16BinOpTest(kExprI8x16SubSaturateS, SubSaturate);
}
-WASM_EXEC_COMPILED_TEST(I8x16Mul) { RunI8x16BinOpTest(kExprI8x16Mul, Mul); }
-
WASM_EXEC_COMPILED_TEST(I8x16MinS) {
RunI8x16BinOpTest(kExprI8x16MinS, Minimum);
}
@@ -1463,6 +1502,10 @@ WASM_EXEC_COMPILED_TEST(I8x16Eq) { RunI8x16CompareOpTest(kExprI8x16Eq, Equal); }
WASM_EXEC_COMPILED_TEST(I8x16Ne) {
RunI8x16CompareOpTest(kExprI8x16Ne, NotEqual);
}
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || SIMD_LOWERING_TARGET
+
+#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
+WASM_EXEC_COMPILED_TEST(I8x16Mul) { RunI8x16BinOpTest(kExprI8x16Mul, Mul); }
WASM_EXEC_COMPILED_TEST(I8x16GtS) {
RunI8x16CompareOpTest(kExprI8x16GtS, Greater);
@@ -1495,6 +1538,7 @@ WASM_EXEC_COMPILED_TEST(I8x16LtU) {
WASM_EXEC_COMPILED_TEST(I8x16LeU) {
RunI8x16CompareOpTest(kExprI8x16LeU, UnsignedLessEqual);
}
+#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
void RunI8x16ShiftOpTest(WasmOpcode simd_op, Int8ShiftOp expected_op,
int shift) {
@@ -1511,6 +1555,8 @@ void RunI8x16ShiftOpTest(WasmOpcode simd_op, Int8ShiftOp expected_op,
FOR_INT8_INPUTS(i) { CHECK_EQ(1, r.Call(*i, expected_op(*i, shift))); }
}
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || \
+ SIMD_LOWERING_TARGET
WASM_EXEC_COMPILED_TEST(I8x16Shl) {
RunI8x16ShiftOpTest(kExprI8x16Shl, LogicalShiftLeft, 1);
}
@@ -1518,11 +1564,14 @@ WASM_EXEC_COMPILED_TEST(I8x16Shl) {
WASM_EXEC_COMPILED_TEST(I8x16ShrS) {
RunI8x16ShiftOpTest(kExprI8x16ShrS, ArithmeticShiftRight, 1);
}
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 ||
+ // SIMD_LOWERING_TARGET
+#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
WASM_EXEC_COMPILED_TEST(I8x16ShrU) {
RunI8x16ShiftOpTest(kExprI8x16ShrU, LogicalShiftRight, 1);
}
-#endif // V8_TARGET_ARCH_ARM
+#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_MIPS64
@@ -1565,9 +1614,278 @@ WASM_SIMD_SELECT_TEST(32x4)
#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_MIPS ||
// V8_TARGET_ARCH_MIPS64
-#if V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_MIPS64
WASM_SIMD_SELECT_TEST(16x8)
+
WASM_SIMD_SELECT_TEST(8x16)
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_MIPS ||
+ // V8_TARGET_ARCH_MIPS64
+
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64
+// Test binary ops with two lane test patterns, all lanes distinct.
+template <typename T>
+void RunBinaryLaneOpTest(
+ WasmOpcode simd_op,
+ const std::array<T, kSimd128Size / sizeof(T)>& expected) {
+ FLAG_wasm_simd_prototype = true;
+ WasmRunner<int32_t> r(kExecuteCompiled);
+ // Set up two test patterns as globals, e.g. [0, 1, 2, 3] and [4, 5, 6, 7].
+ T* src0 = r.module().AddGlobal<T>(kWasmS128);
+ T* src1 = r.module().AddGlobal<T>(kWasmS128);
+ static const int kElems = kSimd128Size / sizeof(T);
+ for (int i = 0; i < kElems; i++) {
+ src0[i] = i;
+ src1[i] = kElems + i;
+ }
+ switch (simd_op) {
+ case kExprS32x4Shuffle: {
+ BUILD(r,
+ WASM_SET_GLOBAL(0, WASM_SIMD_S32x4_SHUFFLE_OP(simd_op, expected,
+ WASM_GET_GLOBAL(0),
+ WASM_GET_GLOBAL(1))),
+ WASM_ONE);
+ break;
+ }
+ case kExprS16x8Shuffle: {
+ BUILD(r,
+ WASM_SET_GLOBAL(0, WASM_SIMD_S16x8_SHUFFLE_OP(simd_op, expected,
+ WASM_GET_GLOBAL(0),
+ WASM_GET_GLOBAL(1))),
+ WASM_ONE);
+ break;
+ }
+ case kExprS8x16Shuffle: {
+ BUILD(r,
+ WASM_SET_GLOBAL(0, WASM_SIMD_S8x16_SHUFFLE_OP(simd_op, expected,
+ WASM_GET_GLOBAL(0),
+ WASM_GET_GLOBAL(1))),
+ WASM_ONE);
+ break;
+ }
+ default: {
+ BUILD(r,
+ WASM_SET_GLOBAL(0, WASM_SIMD_BINOP(simd_op, WASM_GET_GLOBAL(0),
+ WASM_GET_GLOBAL(1))),
+ WASM_ONE);
+ break;
+ }
+ }
+
+ CHECK_EQ(1, r.Call());
+ for (size_t i = 0; i < expected.size(); i++) {
+ CHECK_EQ(src0[i], expected[i]);
+ }
+}
+
+WASM_EXEC_COMPILED_TEST(I32x4AddHoriz) {
+ RunBinaryLaneOpTest<int32_t>(kExprI32x4AddHoriz, {{1, 5, 9, 13}});
+}
+
+WASM_EXEC_COMPILED_TEST(I16x8AddHoriz) {
+ RunBinaryLaneOpTest<int16_t>(kExprI16x8AddHoriz,
+ {{1, 5, 9, 13, 17, 21, 25, 29}});
+}
+#endif // V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X64
+
+#if V8_TARGET_ARCH_ARM
+WASM_EXEC_COMPILED_TEST(F32x4AddHoriz) {
+ RunBinaryLaneOpTest<float>(kExprF32x4AddHoriz, {{1.0f, 5.0f, 9.0f, 13.0f}});
+}
+
+// Test some regular shuffles that may have special handling on some targets.
+// Test a normal and unary versions (where second operand isn't used).
+WASM_EXEC_COMPILED_TEST(S32x4ZipLeft) {
+ RunBinaryLaneOpTest<int32_t>(kExprS32x4Shuffle, {{0, 4, 1, 5}});
+ RunBinaryLaneOpTest<int32_t>(kExprS32x4Shuffle, {{0, 0, 1, 1}});
+}
+
+WASM_EXEC_COMPILED_TEST(S32x4ZipRight) {
+ RunBinaryLaneOpTest<int32_t>(kExprS32x4Shuffle, {{2, 6, 3, 7}});
+ RunBinaryLaneOpTest<int32_t>(kExprS32x4Shuffle, {{2, 2, 3, 3}});
+}
+
+WASM_EXEC_COMPILED_TEST(S32x4UnzipLeft) {
+ RunBinaryLaneOpTest<int32_t>(kExprS32x4Shuffle, {{0, 2, 4, 6}});
+ RunBinaryLaneOpTest<int32_t>(kExprS32x4Shuffle, {{0, 2, 0, 2}});
+}
+
+WASM_EXEC_COMPILED_TEST(S32x4UnzipRight) {
+ RunBinaryLaneOpTest<int32_t>(kExprS32x4Shuffle, {{1, 3, 5, 7}});
+ RunBinaryLaneOpTest<int32_t>(kExprS32x4Shuffle, {{1, 3, 1, 3}});
+}
+
+WASM_EXEC_COMPILED_TEST(S32x4TransposeLeft) {
+ RunBinaryLaneOpTest<int32_t>(kExprS32x4Shuffle, {{0, 4, 2, 6}});
+ RunBinaryLaneOpTest<int32_t>(kExprS32x4Shuffle, {{0, 0, 2, 2}});
+}
+
+WASM_EXEC_COMPILED_TEST(S32x4TransposeRight) {
+ RunBinaryLaneOpTest<int32_t>(kExprS32x4Shuffle, {{1, 5, 3, 7}});
+ RunBinaryLaneOpTest<int32_t>(kExprS32x4Shuffle, {{1, 1, 3, 3}});
+}
+
+// Reverses are only unary.
+WASM_EXEC_COMPILED_TEST(S32x2Reverse) {
+ RunBinaryLaneOpTest<int32_t>(kExprS32x4Shuffle, {{1, 0, 3, 2}});
+}
+
+// Test irregular shuffle.
+WASM_EXEC_COMPILED_TEST(S32x4Irregular) {
+ RunBinaryLaneOpTest<int32_t>(kExprS32x4Shuffle, {{0, 4, 4, 5}});
+ RunBinaryLaneOpTest<int32_t>(kExprS32x4Shuffle, {{0, 0, 0, 1}});
+}
+
+WASM_EXEC_COMPILED_TEST(S16x8ZipLeft) {
+ RunBinaryLaneOpTest<int16_t>(kExprS16x8Shuffle, {{0, 8, 1, 9, 2, 10, 3, 11}});
+ RunBinaryLaneOpTest<int16_t>(kExprS16x8Shuffle, {{0, 0, 1, 1, 2, 2, 3, 3}});
+}
+
+WASM_EXEC_COMPILED_TEST(S16x8ZipRight) {
+ RunBinaryLaneOpTest<int16_t>(kExprS16x8Shuffle,
+ {{4, 12, 5, 13, 6, 14, 7, 15}});
+ RunBinaryLaneOpTest<int16_t>(kExprS16x8Shuffle, {{4, 4, 5, 5, 6, 6, 7, 7}});
+}
+
+WASM_EXEC_COMPILED_TEST(S16x8UnzipLeft) {
+ RunBinaryLaneOpTest<int16_t>(kExprS16x8Shuffle,
+ {{0, 2, 4, 6, 8, 10, 12, 14}});
+ RunBinaryLaneOpTest<int16_t>(kExprS16x8Shuffle, {{0, 2, 4, 6, 0, 2, 4, 6}});
+}
+
+WASM_EXEC_COMPILED_TEST(S16x8UnzipRight) {
+ RunBinaryLaneOpTest<int16_t>(kExprS16x8Shuffle,
+ {{1, 3, 5, 7, 9, 11, 13, 15}});
+ RunBinaryLaneOpTest<int16_t>(kExprS16x8Shuffle, {{1, 3, 5, 7, 1, 3, 5, 7}});
+}
+
+WASM_EXEC_COMPILED_TEST(S16x8TransposeLeft) {
+ RunBinaryLaneOpTest<int16_t>(kExprS16x8Shuffle,
+ {{0, 8, 2, 10, 4, 12, 6, 14}});
+ RunBinaryLaneOpTest<int16_t>(kExprS16x8Shuffle, {{0, 0, 2, 2, 4, 4, 6, 6}});
+}
+
+WASM_EXEC_COMPILED_TEST(S16x8TransposeRight) {
+ RunBinaryLaneOpTest<int16_t>(kExprS16x8Shuffle,
+ {{1, 9, 3, 11, 5, 13, 7, 15}});
+ RunBinaryLaneOpTest<int16_t>(kExprS16x8Shuffle, {{1, 1, 3, 3, 5, 5, 7, 7}});
+}
+
+WASM_EXEC_COMPILED_TEST(S16x4Reverse) {
+ RunBinaryLaneOpTest<int16_t>(kExprS16x8Shuffle, {{3, 2, 1, 0, 7, 6, 5, 4}});
+}
+
+WASM_EXEC_COMPILED_TEST(S16x2Reverse) {
+ RunBinaryLaneOpTest<int16_t>(kExprS16x8Shuffle, {{1, 0, 3, 2, 5, 4, 7, 6}});
+}
+
+WASM_EXEC_COMPILED_TEST(S16x8Irregular) {
+ RunBinaryLaneOpTest<int16_t>(kExprS16x8Shuffle, {{0, 8, 8, 0, 2, 10, 3, 11}});
+ RunBinaryLaneOpTest<int16_t>(kExprS16x8Shuffle, {{0, 0, 0, 0, 2, 2, 3, 3}});
+}
+
+WASM_EXEC_COMPILED_TEST(S8x16ZipLeft) {
+ RunBinaryLaneOpTest<int8_t>(
+ kExprS8x16Shuffle,
+ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}});
+ RunBinaryLaneOpTest<int8_t>(
+ kExprS8x16Shuffle, {{0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7}});
+}
+
+WASM_EXEC_COMPILED_TEST(S8x16ZipRight) {
+ RunBinaryLaneOpTest<int8_t>(
+ kExprS8x16Shuffle,
+ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31}});
+ RunBinaryLaneOpTest<int8_t>(
+ kExprS8x16Shuffle,
+ {{8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15}});
+}
+
+WASM_EXEC_COMPILED_TEST(S8x16UnzipLeft) {
+ RunBinaryLaneOpTest<int8_t>(
+ kExprS8x16Shuffle,
+ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30}});
+ RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{0, 2, 4, 6, 8, 10, 12, 14, 0,
+ 2, 4, 6, 8, 10, 12, 14}});
+}
+
+WASM_EXEC_COMPILED_TEST(S8x16UnzipRight) {
+ RunBinaryLaneOpTest<int8_t>(
+ kExprS8x16Shuffle,
+ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}});
+ RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{1, 3, 5, 7, 9, 11, 13, 15, 1,
+ 3, 5, 7, 9, 11, 13, 15}});
+}
+
+WASM_EXEC_COMPILED_TEST(S8x16TransposeLeft) {
+ RunBinaryLaneOpTest<int8_t>(
+ kExprS8x16Shuffle,
+ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30}});
+ RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{0, 0, 2, 2, 4, 4, 6, 6, 8, 8,
+ 10, 10, 12, 12, 14, 14}});
+}
+
+WASM_EXEC_COMPILED_TEST(S8x16TransposeRight) {
+ RunBinaryLaneOpTest<int8_t>(
+ kExprS8x16Shuffle,
+ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31}});
+ RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{1, 1, 3, 3, 5, 5, 7, 7, 9, 9,
+ 11, 11, 13, 13, 15, 15}});
+}
+
+WASM_EXEC_COMPILED_TEST(S8x8Reverse) {
+ RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{7, 6, 5, 4, 3, 2, 1, 0, 15,
+ 14, 13, 12, 11, 10, 9, 8}});
+}
+
+WASM_EXEC_COMPILED_TEST(S8x4Reverse) {
+ RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{3, 2, 1, 0, 7, 6, 5, 4, 11,
+ 10, 9, 8, 15, 14, 13, 12}});
+}
+
+WASM_EXEC_COMPILED_TEST(S8x2Reverse) {
+ RunBinaryLaneOpTest<int8_t>(kExprS8x16Shuffle, {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8,
+ 11, 10, 13, 12, 15, 14}});
+}
+
+WASM_EXEC_COMPILED_TEST(S8x16Irregular) {
+ RunBinaryLaneOpTest<int8_t>(
+ kExprS8x16Shuffle,
+ {{0, 16, 0, 16, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}});
+ RunBinaryLaneOpTest<int8_t>(
+ kExprS8x16Shuffle, {{0, 0, 0, 0, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7}});
+}
+
+// Test shuffles that concatenate the two vectors.
+template <typename T>
+void RunConcatOpTest(WasmOpcode simd_op) {
+ static const int kLanes = kSimd128Size / sizeof(T);
+ std::array<T, kLanes> expected;
+ for (int bias = 1; bias < kLanes; bias++) {
+ int i = 0;
+ // last kLanes - bias bytes of first vector.
+ for (int j = bias; j < kLanes; j++) {
+ expected[i++] = j;
+ }
+ // first bias lanes of second vector
+ for (int j = 0; j < bias; j++) {
+ expected[i++] = j + kLanes;
+ }
+ RunBinaryLaneOpTest<T>(simd_op, expected);
+ }
+}
+
+WASM_EXEC_COMPILED_TEST(S32x4Concat) {
+ RunConcatOpTest<int32_t>(kExprS32x4Shuffle);
+}
+
+WASM_EXEC_COMPILED_TEST(S16x8Concat) {
+ RunConcatOpTest<int16_t>(kExprS16x8Shuffle);
+}
+
+WASM_EXEC_COMPILED_TEST(S8x16Concat) {
+ RunConcatOpTest<int8_t>(kExprS8x16Shuffle);
+}
// Boolean unary operations are 'AllTrue' and 'AnyTrue', which return an integer
// result. Use relational ops on numeric vectors to create the boolean vector
@@ -1743,7 +2061,9 @@ WASM_EXEC_COMPILED_TEST(S1x16And) { RunS1x16BinOpTest(kExprS1x16And, And); }
WASM_EXEC_COMPILED_TEST(S1x16Or) { RunS1x16BinOpTest(kExprS1x16Or, Or); }
WASM_EXEC_COMPILED_TEST(S1x16Xor) { RunS1x16BinOpTest(kExprS1x16Xor, Xor); }
+#endif // !V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
WASM_EXEC_COMPILED_TEST(SimdI32x4ExtractWithF32x4) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t> r(kExecuteCompiled);
@@ -1801,7 +2121,9 @@ WASM_EXEC_COMPILED_TEST(SimdI32x4AddWithF32x4) {
WASM_I32V(1), WASM_I32V(0)));
CHECK_EQ(1, r.Call());
}
+#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
+#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_X64
WASM_EXEC_COMPILED_TEST(SimdI32x4Local) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t> r(kExecuteCompiled);
@@ -1858,7 +2180,9 @@ WASM_EXEC_COMPILED_TEST(SimdI32x4For) {
WASM_GET_LOCAL(0));
CHECK_EQ(1, r.Call());
}
+#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
WASM_EXEC_COMPILED_TEST(SimdF32x4For) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t> r(kExecuteCompiled);
@@ -1883,15 +2207,40 @@ WASM_EXEC_COMPILED_TEST(SimdF32x4For) {
WASM_GET_LOCAL(0));
CHECK_EQ(1, r.Call());
}
+#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
+
+#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_X64
+
+template <typename T, int numLanes = 4>
+void SetVectorByLanes(T* v, const std::array<T, numLanes>& arr) {
+ for (int lane = 0; lane < numLanes; lane++) {
+ const T& value = arr[lane];
+#if defined(V8_TARGET_BIG_ENDIAN)
+ v[numLanes - 1 - lane] = value;
+#else
+ v[lane] = value;
+#endif
+ }
+}
+
+template <typename T>
+const T& GetScalar(T* v, int lane) {
+ constexpr int kElems = kSimd128Size / sizeof(T);
+#if defined(V8_TARGET_BIG_ENDIAN)
+ const int index = kElems - 1 - lane;
+#else
+ const int index = lane;
+#endif
+ USE(kElems);
+ DCHECK(index >= 0 && index < kElems);
+ return v[index];
+}
WASM_EXEC_COMPILED_TEST(SimdI32x4GetGlobal) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, int32_t> r(kExecuteCompiled);
int32_t* global = r.module().AddGlobal<int32_t>(kWasmS128);
- *(global) = 0;
- *(global + 1) = 1;
- *(global + 2) = 2;
- *(global + 3) = 3;
+ SetVectorByLanes(global, {{0, 1, 2, 3}});
r.AllocateLocal(kWasmI32);
BUILD(
r, WASM_SET_LOCAL(1, WASM_I32V(1)),
@@ -1924,20 +2273,19 @@ WASM_EXEC_COMPILED_TEST(SimdI32x4SetGlobal) {
WASM_I32V(56))),
WASM_I32V(1));
CHECK_EQ(1, r.Call(0));
- CHECK_EQ(*global, 23);
- CHECK_EQ(*(global + 1), 34);
- CHECK_EQ(*(global + 2), 45);
- CHECK_EQ(*(global + 3), 56);
+ CHECK_EQ(GetScalar(global, 0), 23);
+ CHECK_EQ(GetScalar(global, 1), 34);
+ CHECK_EQ(GetScalar(global, 2), 45);
+ CHECK_EQ(GetScalar(global, 3), 56);
}
+#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_X64
+#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
WASM_EXEC_COMPILED_TEST(SimdF32x4GetGlobal) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t, int32_t> r(kExecuteCompiled);
float* global = r.module().AddGlobal<float>(kWasmS128);
- *(global) = 0.0;
- *(global + 1) = 1.5;
- *(global + 2) = 2.25;
- *(global + 3) = 3.5;
+ SetVectorByLanes<float>(global, {{0.0, 1.5, 2.25, 3.5}});
r.AllocateLocal(kWasmI32);
BUILD(
r, WASM_SET_LOCAL(1, WASM_I32V(1)),
@@ -1970,12 +2318,14 @@ WASM_EXEC_COMPILED_TEST(SimdF32x4SetGlobal) {
WASM_F32(65.0))),
WASM_I32V(1));
CHECK_EQ(1, r.Call(0));
- CHECK_EQ(*global, 13.5);
- CHECK_EQ(*(global + 1), 45.5);
- CHECK_EQ(*(global + 2), 32.25);
- CHECK_EQ(*(global + 3), 65.0);
+ CHECK_EQ(GetScalar(global, 0), 13.5f);
+ CHECK_EQ(GetScalar(global, 1), 45.5f);
+ CHECK_EQ(GetScalar(global, 2), 32.25f);
+ CHECK_EQ(GetScalar(global, 3), 65.0f);
}
+#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
+#if V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_X64
WASM_EXEC_COMPILED_TEST(SimdLoadStoreLoad) {
FLAG_wasm_simd_prototype = true;
WasmRunner<int32_t> r(kExecuteCompiled);
@@ -1993,4 +2343,4 @@ WASM_EXEC_COMPILED_TEST(SimdLoadStoreLoad) {
CHECK_EQ(expected, r.Call());
}
}
-#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET
+#endif // V8_TARGET_ARCH_ARM || SIMD_LOWERING_TARGET || V8_TARGET_ARCH_X64
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index a1eb0511ba..7596033768 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -9,11 +9,11 @@
#include "src/assembler-inl.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/utils.h"
-#include "src/wasm/wasm-macro-gen.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-macro-gen.h"
using namespace v8::base;
using namespace v8::internal;
@@ -1082,11 +1082,9 @@ WASM_EXEC_TEST(LoadMaxUint32Offset) {
WasmRunner<int32_t> r(execution_mode);
r.module().AddMemoryElems<int32_t>(8);
- BUILD(r, kExprI32Const, 0, // index
- static_cast<byte>(v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(
- MachineType::Int32(), false)), // --
- 0, // alignment
- U32V_5(0xffffffff)); // offset
+ BUILD(r, WASM_LOAD_MEM_OFFSET(MachineType::Int32(), // type
+ U32V_5(0xffffffff), // offset
+ WASM_ZERO)); // index
CHECK_TRAP32(r.Call());
}
@@ -1593,10 +1591,10 @@ WASM_EXEC_TEST(LoadMemI32_offset) {
WASM_EXEC_TEST(LoadMemI32_const_oob_misaligned) {
// TODO(eholk): Fix this test for the trap handler.
if (trap_handler::UseTrapHandler()) return;
- const int kMemSize = 12;
+ constexpr byte kMemSize = 12;
// TODO(titzer): Fix misaligned accesses on MIPS and re-enable.
- for (int offset = 0; offset < kMemSize + 5; ++offset) {
- for (int index = 0; index < kMemSize + 5; ++index) {
+ for (byte offset = 0; offset < kMemSize + 5; ++offset) {
+ for (byte index = 0; index < kMemSize + 5; ++index) {
WasmRunner<int32_t> r(execution_mode);
r.module().AddMemoryElems<byte>(kMemSize);
r.module().RandomizeMemory();
@@ -1604,7 +1602,7 @@ WASM_EXEC_TEST(LoadMemI32_const_oob_misaligned) {
BUILD(r, WASM_LOAD_MEM_OFFSET(MachineType::Int32(), offset,
WASM_I32V_2(index)));
- if ((offset + index) <= static_cast<int>((kMemSize - sizeof(int32_t)))) {
+ if (offset + index <= (kMemSize - sizeof(int32_t))) {
CHECK_EQ(r.module().raw_val_at<int32_t>(offset + index), r.Call());
} else {
CHECK_TRAP(r.Call());
@@ -1616,9 +1614,9 @@ WASM_EXEC_TEST(LoadMemI32_const_oob_misaligned) {
WASM_EXEC_TEST(LoadMemI32_const_oob) {
// TODO(eholk): Fix this test for the trap handler.
if (trap_handler::UseTrapHandler()) return;
- const int kMemSize = 24;
- for (int offset = 0; offset < kMemSize + 5; offset += 4) {
- for (int index = 0; index < kMemSize + 5; index += 4) {
+ constexpr byte kMemSize = 24;
+ for (byte offset = 0; offset < kMemSize + 5; offset += 4) {
+ for (byte index = 0; index < kMemSize + 5; index += 4) {
WasmRunner<int32_t> r(execution_mode);
r.module().AddMemoryElems<byte>(kMemSize);
r.module().RandomizeMemory();
@@ -1626,7 +1624,7 @@ WASM_EXEC_TEST(LoadMemI32_const_oob) {
BUILD(r, WASM_LOAD_MEM_OFFSET(MachineType::Int32(), offset,
WASM_I32V_2(index)));
- if ((offset + index) <= static_cast<int>((kMemSize - sizeof(int32_t)))) {
+ if (offset + index <= (kMemSize - sizeof(int32_t))) {
CHECK_EQ(r.module().raw_val_at<int32_t>(offset + index), r.Call());
} else {
CHECK_TRAP(r.Call());
@@ -2339,9 +2337,6 @@ static void Run_WasmMixedCall_N(WasmExecutionMode execution_mode, int start) {
// =========================================================================
std::vector<byte> code;
- // Load the offset for the store.
- ADD_CODE(code, WASM_ZERO);
-
// Load the arguments.
for (int i = 0; i < num_params; ++i) {
int offset = (i + 1) * kElemSize;
@@ -2351,10 +2346,13 @@ static void Run_WasmMixedCall_N(WasmExecutionMode execution_mode, int start) {
// Call the selector function.
ADD_CODE(code, WASM_CALL_FUNCTION0(t.function_index()));
+ // Store the result in a local.
+ byte local_index = r.AllocateLocal(WasmOpcodes::ValueTypeFor(result));
+ ADD_CODE(code, kExprSetLocal, local_index);
+
// Store the result in memory.
ADD_CODE(code,
- static_cast<byte>(WasmOpcodes::LoadStoreOpcodeOf(result, true)),
- ZERO_ALIGNMENT, ZERO_OFFSET);
+ WASM_STORE_MEM(result, WASM_ZERO, WASM_GET_LOCAL(local_index)));
// Return the expected value.
ADD_CODE(code, WASM_I32V_2(kExpected));
@@ -2972,3 +2970,83 @@ WASM_EXEC_TEST(Int32RemS_dead) {
CHECK_TRAP(r.Call(-1001, 0));
CHECK_TRAP(r.Call(kMin, 0));
}
+
+WASM_EXEC_TEST(BrToLoopWithValue) {
+ WasmRunner<int32_t, int32_t, int32_t> r(execution_mode);
+ // Subtracts <1> times 3 from <0> and returns the result.
+ BUILD(r,
+ // loop i32
+ kExprLoop, kLocalI32,
+ // decrement <0> by 3.
+ WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I32V_1(3))),
+ // decrement <1> by 1.
+ WASM_SET_LOCAL(1, WASM_I32_SUB(WASM_GET_LOCAL(1), WASM_ONE)),
+ // load return value <0>, br_if will drop if if the branch is taken.
+ WASM_GET_LOCAL(0),
+ // continue loop if <1> is != 0.
+ WASM_BR_IF(0, WASM_GET_LOCAL(1)),
+ // end of loop, value loaded above is the return value.
+ kExprEnd);
+ CHECK_EQ(12, r.Call(27, 5));
+}
+
+WASM_EXEC_TEST(BrToLoopWithoutValue) {
+ // This was broken in the interpreter, see http://crbug.com/715454
+ WasmRunner<int32_t, int32_t> r(execution_mode);
+ BUILD(
+ r, kExprLoop, kLocalI32, // loop i32
+ WASM_SET_LOCAL(0, WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_ONE)), // dec <0>
+ WASM_BR_IF(0, WASM_GET_LOCAL(0)), // br_if <0> != 0
+ kExprUnreachable, // unreachable
+ kExprEnd); // end
+ CHECK_TRAP32(r.Call(2));
+}
+
+WASM_EXEC_TEST(LoopsWithValues) {
+ WasmRunner<int32_t> r(execution_mode);
+ BUILD(r, WASM_LOOP_I(WASM_LOOP_I(WASM_ONE), WASM_ONE, kExprI32Add));
+ CHECK_EQ(2, r.Call());
+}
+
+WASM_EXEC_TEST(InvalidStackAfterUnreachable) {
+ WasmRunner<int32_t> r(execution_mode);
+ BUILD(r, kExprUnreachable, kExprI32Add);
+ CHECK_TRAP32(r.Call());
+}
+
+WASM_EXEC_TEST(InvalidStackAfterBr) {
+ WasmRunner<int32_t> r(execution_mode);
+ BUILD(r, WASM_BRV(0, WASM_I32V_1(27)), kExprI32Add);
+ CHECK_EQ(27, r.Call());
+}
+
+WASM_EXEC_TEST(InvalidStackAfterReturn) {
+ WasmRunner<int32_t> r(execution_mode);
+ BUILD(r, WASM_RETURN1(WASM_I32V_1(17)), kExprI32Add);
+ CHECK_EQ(17, r.Call());
+}
+
+WASM_EXEC_TEST(BranchOverUnreachableCode) {
+ WasmRunner<int32_t> r(execution_mode);
+ BUILD(r,
+ // Start a block which breaks in the middle (hence unreachable code
+ // afterwards) and continue execution after this block.
+ WASM_BLOCK_I(WASM_BRV(0, WASM_I32V_1(17)), kExprI32Add),
+ // Add one to the 17 returned from the block.
+ WASM_ONE, kExprI32Add);
+ CHECK_EQ(18, r.Call());
+}
+
+WASM_EXEC_TEST(BlockInsideUnreachable) {
+ WasmRunner<int32_t> r(execution_mode);
+ BUILD(r, WASM_RETURN1(WASM_I32V_1(17)), WASM_BLOCK(WASM_BR(0)));
+ CHECK_EQ(17, r.Call());
+}
+
+WASM_EXEC_TEST(IfInsideUnreachable) {
+ WasmRunner<int32_t> r(execution_mode);
+ BUILD(
+ r, WASM_RETURN1(WASM_I32V_1(17)),
+ WASM_IF_ELSE_I(WASM_ONE, WASM_BRV(0, WASM_ONE), WASM_RETURN1(WASM_ONE)));
+ CHECK_EQ(17, r.Call());
+}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
index 357f20f944..c9b9852e11 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-breakpoints.cc
@@ -7,13 +7,13 @@
#include "src/frames-inl.h"
#include "src/property-descriptor.h"
#include "src/utils.h"
-#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-objects.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-macro-gen.h"
using namespace v8::internal;
using namespace v8::internal::wasm;
@@ -158,6 +158,109 @@ void SetBreakpoint(WasmRunnerBase& runner, int function_index, int byte_offset,
WasmDebugInfo::SetBreakpoint(debug_info, function_index, set_byte_offset);
}
+// Wrapper with operator<<.
+struct WasmValWrapper {
+ WasmVal val;
+
+ bool operator==(const WasmValWrapper& other) const {
+ return val == other.val;
+ }
+};
+
+// Only needed in debug builds. Avoid unused warning otherwise.
+#ifdef DEBUG
+std::ostream& operator<<(std::ostream& out, const WasmValWrapper& wrapper) {
+ switch (wrapper.val.type) {
+ case kWasmI32:
+ out << "i32: " << wrapper.val.to<int32_t>();
+ break;
+ case kWasmI64:
+ out << "i64: " << wrapper.val.to<int64_t>();
+ break;
+ case kWasmF32:
+ out << "f32: " << wrapper.val.to<float>();
+ break;
+ case kWasmF64:
+ out << "f64: " << wrapper.val.to<double>();
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+ return out;
+}
+#endif
+
+class CollectValuesBreakHandler : public debug::DebugDelegate {
+ public:
+ struct BreakpointValues {
+ std::vector<WasmVal> locals;
+ std::vector<WasmVal> stack;
+ };
+
+ explicit CollectValuesBreakHandler(
+ Isolate* isolate, std::initializer_list<BreakpointValues> expected_values)
+ : isolate_(isolate), expected_values_(expected_values) {
+ v8::debug::SetDebugDelegate(reinterpret_cast<v8::Isolate*>(isolate_), this);
+ }
+ ~CollectValuesBreakHandler() {
+ v8::debug::SetDebugDelegate(reinterpret_cast<v8::Isolate*>(isolate_),
+ nullptr);
+ }
+
+ private:
+ Isolate* isolate_;
+ int count_ = 0;
+ std::vector<BreakpointValues> expected_values_;
+
+ void BreakProgramRequested(v8::Local<v8::Context> paused_context,
+ v8::Local<v8::Object> exec_state,
+ v8::Local<v8::Value> break_points_hit) override {
+ printf("Break #%d\n", count_);
+ CHECK_GT(expected_values_.size(), count_);
+ auto& expected = expected_values_[count_];
+ ++count_;
+
+ HandleScope handles(isolate_);
+
+ StackTraceFrameIterator frame_it(isolate_);
+ auto summ = FrameSummary::GetTop(frame_it.frame()).AsWasmInterpreted();
+ Handle<WasmInstanceObject> instance = summ.wasm_instance();
+
+ auto frame =
+ instance->debug_info()->GetInterpretedFrame(frame_it.frame()->fp(), 0);
+ CHECK_EQ(expected.locals.size(), frame->GetLocalCount());
+ for (int i = 0; i < frame->GetLocalCount(); ++i) {
+ CHECK_EQ(WasmValWrapper{expected.locals[i]},
+ WasmValWrapper{frame->GetLocalValue(i)});
+ }
+
+ CHECK_EQ(expected.stack.size(), frame->GetStackHeight());
+ for (int i = 0; i < frame->GetStackHeight(); ++i) {
+ CHECK_EQ(WasmValWrapper{expected.stack[i]},
+ WasmValWrapper{frame->GetStackValue(i)});
+ }
+
+ isolate_->debug()->PrepareStep(StepAction::StepIn);
+ }
+};
+
+// Special template to explicitly cast to WasmVal.
+template <typename Arg>
+WasmVal MakeWasmVal(Arg arg) {
+ return WasmVal(arg);
+}
+// Translate long to i64 (ambiguous otherwise).
+template <>
+WasmVal MakeWasmVal(long arg) { // NOLINT: allow long parameter
+ return WasmVal(static_cast<int64_t>(arg));
+}
+
+template <typename... Args>
+std::vector<WasmVal> wasmVec(Args... args) {
+ std::array<WasmVal, sizeof...(args)> arr{{MakeWasmVal(args)...}};
+ return std::vector<WasmVal>{arr.begin(), arr.end()};
+}
+
} // namespace
TEST(WasmCollectPossibleBreakpoints) {
@@ -272,3 +375,48 @@ TEST(WasmStepInAndOut) {
CHECK(!Execution::Call(isolate, main_fun_wrapper, global, 0, nullptr)
.is_null());
}
+
+TEST(WasmGetLocalsAndStack) {
+ WasmRunner<void, int> runner(kExecuteCompiled);
+ runner.AllocateLocal(ValueType::kWord64);
+ runner.AllocateLocal(ValueType::kFloat32);
+ runner.AllocateLocal(ValueType::kFloat64);
+
+ BUILD(runner,
+ // set [1] to 17
+ WASM_SET_LOCAL(1, WASM_I64V_1(17)),
+ // set [2] to <arg0> = 7
+ WASM_SET_LOCAL(2, WASM_F32_SCONVERT_I32(WASM_GET_LOCAL(0))),
+ // set [3] to <arg1>/2 = 8.5
+ WASM_SET_LOCAL(3, WASM_F64_DIV(WASM_F64_SCONVERT_I64(WASM_GET_LOCAL(1)),
+ WASM_F64(2))));
+
+ Isolate* isolate = runner.main_isolate();
+ Handle<JSFunction> main_fun_wrapper =
+ runner.module().WrapCode(runner.function_index());
+
+ // Set breakpoint at the first instruction (7 bytes for local decls: num
+ // entries + 3x<count, type>).
+ SetBreakpoint(runner, runner.function_index(), 7, 7);
+
+ CollectValuesBreakHandler break_handler(
+ isolate,
+ {
+ // params + locals stack
+ {wasmVec(7, 0L, 0.f, 0.), wasmVec()}, // 0: i64.const[17]
+ {wasmVec(7, 0L, 0.f, 0.), wasmVec(17L)}, // 1: set_local[1]
+ {wasmVec(7, 17L, 0.f, 0.), wasmVec()}, // 2: get_local[0]
+ {wasmVec(7, 17L, 0.f, 0.), wasmVec(7)}, // 3: f32.convert_s
+ {wasmVec(7, 17L, 0.f, 0.), wasmVec(7.f)}, // 4: set_local[2]
+ {wasmVec(7, 17L, 7.f, 0.), wasmVec()}, // 5: get_local[1]
+ {wasmVec(7, 17L, 7.f, 0.), wasmVec(17L)}, // 6: f64.convert_s
+ {wasmVec(7, 17L, 7.f, 0.), wasmVec(17.)}, // 7: f64.const[2]
+ {wasmVec(7, 17L, 7.f, 0.), wasmVec(17., 2.)}, // 8: f64.div
+ {wasmVec(7, 17L, 7.f, 0.), wasmVec(8.5)}, // 9: set_local[3]
+ {wasmVec(7, 17L, 7.f, 8.5), wasmVec()}, // 10: end
+ });
+
+ Handle<Object> global(isolate->context()->global_object(), isolate);
+ Handle<Object> args[]{handle(Smi::FromInt(7), isolate)};
+ CHECK(!Execution::Call(isolate, main_fun_wrapper, global, 1, args).is_null());
+}
diff --git a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
index 6ae806d831..d1024e62a2 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-interpreter-entry.cc
@@ -6,11 +6,11 @@
#include "src/assembler-inl.h"
#include "src/objects-inl.h"
-#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-objects.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/wasm-macro-gen.h"
using namespace v8::internal;
using namespace v8::internal::wasm;
diff --git a/deps/v8/test/cctest/wasm/test-wasm-stack.cc b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
index 3dc7839667..4d64fbb67b 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-stack.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
@@ -3,11 +3,11 @@
// found in the LICENSE file.
#include "src/assembler-inl.h"
-#include "src/wasm/wasm-macro-gen.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-macro-gen.h"
using namespace v8::base;
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
index 89872c5f17..e489594ef3 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
@@ -4,11 +4,11 @@
#include "src/assembler-inl.h"
#include "src/trap-handler/trap-handler.h"
-#include "src/wasm/wasm-macro-gen.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
#include "test/cctest/wasm/wasm-run-utils.h"
#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-macro-gen.h"
using namespace v8::base;
using namespace v8::internal;
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.h b/deps/v8/test/cctest/wasm/wasm-run-utils.h
index d530e484a1..a97d5701a0 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.h
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.h
@@ -13,8 +13,7 @@
#include <memory>
#include "src/base/utils/random-number-generator.h"
-#include "src/zone/accounting-allocator.h"
-
+#include "src/code-stubs.h"
#include "src/compiler/compiler-source-position-table.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/int64-lowering.h"
@@ -25,10 +24,10 @@
#include "src/compiler/zone-stats.h"
#include "src/trap-handler/trap-handler.h"
#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/local-decl-encoder.h"
#include "src/wasm/wasm-external-refs.h"
#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-js.h"
-#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/wasm-opcodes.h"
@@ -76,16 +75,10 @@ class TestingModule : public ModuleEnv {
public:
explicit TestingModule(Zone* zone, WasmExecutionMode mode = kExecuteCompiled)
: ModuleEnv(&module_, &instance_),
- execution_mode_(mode),
instance_(&module_),
isolate_(CcTest::InitIsolateOnce()),
global_offset(0),
- interpreter_(
- mode == kExecuteInterpreted
- ? new WasmInterpreter(
- isolate_, ModuleBytesEnv(&module_, &instance_,
- Vector<const byte>::empty()))
- : nullptr) {
+ interpreter_(nullptr) {
WasmJs::Install(isolate_);
instance->module = &module_;
instance->globals_start = global_data;
@@ -94,22 +87,10 @@ class TestingModule : public ModuleEnv {
instance->mem_size = 0;
memset(global_data, 0, sizeof(global_data));
instance_object_ = InitInstanceObject();
- }
-
- ~TestingModule() {
- if (instance->mem_start) {
- if (EnableGuardRegions() && module_.is_wasm()) {
- // See the corresponding code in AddMemory. We use a different
- // allocation path when guard regions are enabled, which means we have
- // to free it differently too.
- const size_t alloc_size =
- RoundUp(kWasmMaxHeapOffset, v8::base::OS::CommitPageSize());
- v8::base::OS::Free(instance->mem_start, alloc_size);
- } else {
- free(instance->mem_start);
- }
+ if (mode == kExecuteInterpreted) {
+ interpreter_ =
+ WasmDebugInfo::SetupForTesting(instance_object_, &instance_);
}
- if (interpreter_) delete interpreter_;
}
void ChangeOriginToAsmjs() { module_.set_origin(kAsmJsOrigin); }
@@ -118,22 +99,20 @@ class TestingModule : public ModuleEnv {
CHECK(!module_.has_memory);
CHECK_NULL(instance->mem_start);
CHECK_EQ(0, instance->mem_size);
+ DCHECK(!instance_object_->has_memory_buffer());
module_.has_memory = true;
- if (EnableGuardRegions() && module_.is_wasm()) {
- const size_t alloc_size =
- RoundUp(kWasmMaxHeapOffset, v8::base::OS::CommitPageSize());
- instance->mem_start = reinterpret_cast<byte*>(
- v8::base::OS::AllocateGuarded(alloc_size * 2));
- instance->mem_start += alloc_size;
- const size_t guard_size = RoundUp(size, v8::base::OS::CommitPageSize());
- v8::base::OS::Unprotect(instance->mem_start, guard_size);
- } else {
- instance->mem_start = reinterpret_cast<byte*>(malloc(size));
- }
+ bool enable_guard_regions = EnableGuardRegions() && module_.is_wasm();
+ uint32_t alloc_size =
+ enable_guard_regions ? RoundUp(size, OS::CommitPageSize()) : size;
+ Handle<JSArrayBuffer> new_buffer =
+ wasm::NewArrayBuffer(isolate_, alloc_size, enable_guard_regions);
+ CHECK(!new_buffer.is_null());
+ instance_object_->set_memory_buffer(*new_buffer);
+ instance->mem_start = reinterpret_cast<byte*>(new_buffer->backing_store());
CHECK(size == 0 || instance->mem_start);
memset(instance->mem_start, 0, size);
instance->mem_size = size;
- return raw_mem_start<byte>();
+ return instance->mem_start;
}
template <typename T>
@@ -289,7 +268,7 @@ class TestingModule : public ModuleEnv {
}
void PopulateIndirectFunctionTable() {
- if (execution_mode_ == kExecuteInterpreted) return;
+ if (interpret()) return;
// Initialize the fixed arrays in instance->function_tables.
for (uint32_t i = 0; i < instance->function_tables.size(); i++) {
WasmIndirectFunctionTable& table = module_.function_tables[i];
@@ -324,12 +303,11 @@ class TestingModule : public ModuleEnv {
WasmFunction* GetFunctionAt(int index) { return &module_.functions[index]; }
WasmInterpreter* interpreter() { return interpreter_; }
- WasmExecutionMode execution_mode() { return execution_mode_; }
+ bool interpret() { return interpreter_ != nullptr; }
Isolate* isolate() { return isolate_; }
Handle<WasmInstanceObject> instance_object() { return instance_object_; }
private:
- WasmExecutionMode execution_mode_;
WasmModule module_;
WasmInstance instance_;
Isolate* isolate_;
@@ -378,8 +356,9 @@ inline void TestBuildingGraph(Zone* zone, JSGraph* jsgraph, ModuleEnv* module,
FunctionSig* sig,
SourcePositionTable* source_position_table,
const byte* start, const byte* end) {
- compiler::WasmGraphBuilder builder(module, zone, jsgraph, sig,
- source_position_table);
+ compiler::WasmGraphBuilder builder(
+ module, zone, jsgraph, CEntryStub(jsgraph->isolate(), 1).GetCode(), sig,
+ source_position_table);
DecodeResult result =
BuildTFGraph(zone->allocator(), &builder, sig, start, end);
if (result.failed()) {
@@ -389,10 +368,10 @@ inline void TestBuildingGraph(Zone* zone, JSGraph* jsgraph, ModuleEnv* module,
result = BuildTFGraph(zone->allocator(), &builder, sig, start, end);
}
- uint32_t pc = result.error_offset;
+ uint32_t pc = result.error_offset();
std::ostringstream str;
str << "Verification failed; pc = +" << pc
- << ", msg = " << result.error_msg.c_str();
+ << ", msg = " << result.error_msg().c_str();
FATAL(str.str().c_str());
}
builder.Int64LoweringForTesting();
@@ -566,7 +545,6 @@ class WasmFunctionCompiler : private GraphAndBuilders {
if (interpreter_) {
// Add the code to the interpreter.
interpreter_->SetFunctionCodeForTesting(function_, start, end);
- return;
}
// Build the TurboFan graph.
@@ -714,7 +692,10 @@ class WasmRunnerBase : public HandleAndZoneScope {
uint32_t function_index() { return functions_[0]->function_index(); }
WasmFunction* function() { return functions_[0]->function_; }
- WasmInterpreter* interpreter() { return functions_[0]->interpreter_; }
+ WasmInterpreter* interpreter() {
+ DCHECK(interpret());
+ return functions_[0]->interpreter_;
+ }
bool possible_nondeterminism() { return possible_nondeterminism_; }
TestingModule& module() { return module_; }
Zone* zone() { return &zone_; }
@@ -729,6 +710,8 @@ class WasmRunnerBase : public HandleAndZoneScope {
module_.instance->context = main_isolate()->native_context();
}
+ bool interpret() { return module_.interpret(); }
+
private:
FunctionSig* CreateSig(MachineType return_type,
Vector<MachineType> param_types) {
@@ -768,8 +751,6 @@ class WasmRunnerBase : public HandleAndZoneScope {
bool compiled_ = false;
bool possible_nondeterminism_ = false;
- bool interpret() { return module_.execution_mode() == kExecuteInterpreted; }
-
public:
// This field has to be static. Otherwise, gcc complains about the use in
// the lambda context below.
diff --git a/deps/v8/src/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index a0b083bbdc..81dc87eb66 100644
--- a/deps/v8/src/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -144,10 +144,10 @@
#define WASM_ZERO kExprI32Const, 0
#define WASM_ONE kExprI32Const, 1
-#define I32V_MIN(length) -(1 << (6 + (7 * ((length) - 1))))
-#define I32V_MAX(length) ((1 << (6 + (7 * ((length) - 1)))) - 1)
-#define I64V_MIN(length) -(1LL << (6 + (7 * ((length) - 1))))
-#define I64V_MAX(length) ((1LL << (6 + 7 * ((length) - 1))) - 1)
+#define I32V_MIN(length) -(1 << (6 + (7 * ((length)-1))))
+#define I32V_MAX(length) ((1 << (6 + (7 * ((length)-1)))) - 1)
+#define I64V_MIN(length) -(1LL << (6 + (7 * ((length)-1))))
+#define I64V_MAX(length) ((1LL << (6 + 7 * ((length)-1))) - 1)
#define I32V_IN_RANGE(value, length) \
((value) >= I32V_MIN(length) && (value) <= I32V_MAX(length))
@@ -170,90 +170,30 @@ inline void CheckI64v(int64_t value, int length) {
DCHECK(length == 10 || I64V_IN_RANGE(value, length));
}
-// A helper for encoding local declarations prepended to the body of a
-// function.
-// TODO(titzer): move this to an appropriate header.
-class LocalDeclEncoder {
- public:
- explicit LocalDeclEncoder(Zone* zone, FunctionSig* s = nullptr)
- : sig(s), local_decls(zone), total(0) {}
-
- // Prepend local declarations by creating a new buffer and copying data
- // over. The new buffer must be delete[]'d by the caller.
- void Prepend(Zone* zone, const byte** start, const byte** end) const {
- size_t size = (*end - *start);
- byte* buffer = reinterpret_cast<byte*>(zone->New(Size() + size));
- size_t pos = Emit(buffer);
- memcpy(buffer + pos, *start, size);
- pos += size;
- *start = buffer;
- *end = buffer + pos;
- }
-
- size_t Emit(byte* buffer) const {
- size_t pos = 0;
- pos = WriteUint32v(buffer, pos, static_cast<uint32_t>(local_decls.size()));
- for (size_t i = 0; i < local_decls.size(); ++i) {
- pos = WriteUint32v(buffer, pos, local_decls[i].first);
- buffer[pos++] = WasmOpcodes::ValueTypeCodeFor(local_decls[i].second);
- }
- DCHECK_EQ(Size(), pos);
- return pos;
- }
-
- // Add locals declarations to this helper. Return the index of the newly added
- // local(s), with an optional adjustment for the parameters.
- uint32_t AddLocals(uint32_t count, ValueType type) {
- uint32_t result =
- static_cast<uint32_t>(total + (sig ? sig->parameter_count() : 0));
- total += count;
- if (local_decls.size() > 0 && local_decls.back().second == type) {
- count += local_decls.back().first;
- local_decls.pop_back();
- }
- local_decls.push_back(std::pair<uint32_t, ValueType>(count, type));
- return result;
- }
-
- size_t Size() const {
- size_t size = SizeofUint32v(static_cast<uint32_t>(local_decls.size()));
- for (auto p : local_decls) size += 1 + SizeofUint32v(p.first);
- return size;
- }
-
- bool has_sig() const { return sig != nullptr; }
- FunctionSig* get_sig() const { return sig; }
- void set_sig(FunctionSig* s) { sig = s; }
-
- private:
- FunctionSig* sig;
- ZoneVector<std::pair<uint32_t, ValueType>> local_decls;
- size_t total;
-
- size_t SizeofUint32v(uint32_t val) const {
- size_t size = 1;
- while (true) {
- byte b = val & MASK_7;
- if (b == val) return size;
- size++;
- val = val >> 7;
- }
+inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
+ switch (type.representation()) {
+ case MachineRepresentation::kWord8:
+ return store ? kExprI32StoreMem8
+ : type.IsSigned() ? kExprI32LoadMem8S : kExprI32LoadMem8U;
+ case MachineRepresentation::kWord16:
+ return store ? kExprI32StoreMem16
+ : type.IsSigned() ? kExprI32LoadMem16S : kExprI32LoadMem16U;
+ case MachineRepresentation::kWord32:
+ return store ? kExprI32StoreMem : kExprI32LoadMem;
+ case MachineRepresentation::kWord64:
+ return store ? kExprI64StoreMem : kExprI64LoadMem;
+ case MachineRepresentation::kFloat32:
+ return store ? kExprF32StoreMem : kExprF32LoadMem;
+ case MachineRepresentation::kFloat64:
+ return store ? kExprF64StoreMem : kExprF64LoadMem;
+ case MachineRepresentation::kSimd128:
+ return store ? kExprS128StoreMem : kExprS128LoadMem;
+ default:
+ UNREACHABLE();
+ return kExprNop;
}
+}
- // TODO(titzer): lift encoding of u32v to a common place.
- size_t WriteUint32v(byte* buffer, size_t pos, uint32_t val) const {
- while (true) {
- byte b = val & MASK_7;
- if (b == val) {
- buffer[pos++] = b;
- break;
- }
- buffer[pos++] = 0x80 | b;
- val = val >> 7;
- }
- return pos;
- }
-};
} // namespace wasm
} // namespace internal
} // namespace v8
@@ -389,32 +329,29 @@ class LocalDeclEncoder {
#define WASM_GET_GLOBAL(index) kExprGetGlobal, static_cast<byte>(index)
#define WASM_SET_GLOBAL(index, val) \
val, kExprSetGlobal, static_cast<byte>(index)
-#define WASM_LOAD_MEM(type, index) \
- index, static_cast<byte>( \
- v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
+#define WASM_LOAD_MEM(type, index) \
+ index, \
+ static_cast<byte>(v8::internal::wasm::LoadStoreOpcodeOf(type, false)), \
ZERO_ALIGNMENT, ZERO_OFFSET
-#define WASM_STORE_MEM(type, index, val) \
- index, val, \
- static_cast<byte>( \
- v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
+#define WASM_STORE_MEM(type, index, val) \
+ index, val, \
+ static_cast<byte>(v8::internal::wasm::LoadStoreOpcodeOf(type, true)), \
ZERO_ALIGNMENT, ZERO_OFFSET
-#define WASM_LOAD_MEM_OFFSET(type, offset, index) \
- index, static_cast<byte>( \
- v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
- ZERO_ALIGNMENT, static_cast<byte>(offset)
-#define WASM_STORE_MEM_OFFSET(type, offset, index, val) \
- index, val, \
- static_cast<byte>( \
- v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
- ZERO_ALIGNMENT, static_cast<byte>(offset)
-#define WASM_LOAD_MEM_ALIGNMENT(type, index, alignment) \
- index, static_cast<byte>( \
- v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, false)), \
+#define WASM_LOAD_MEM_OFFSET(type, offset, index) \
+ index, \
+ static_cast<byte>(v8::internal::wasm::LoadStoreOpcodeOf(type, false)), \
+ ZERO_ALIGNMENT, offset
+#define WASM_STORE_MEM_OFFSET(type, offset, index, val) \
+ index, val, \
+ static_cast<byte>(v8::internal::wasm::LoadStoreOpcodeOf(type, true)), \
+ ZERO_ALIGNMENT, offset
+#define WASM_LOAD_MEM_ALIGNMENT(type, index, alignment) \
+ index, \
+ static_cast<byte>(v8::internal::wasm::LoadStoreOpcodeOf(type, false)), \
alignment, ZERO_OFFSET
-#define WASM_STORE_MEM_ALIGNMENT(type, index, alignment, val) \
- index, val, \
- static_cast<byte>( \
- v8::internal::wasm::WasmOpcodes::LoadStoreOpcodeOf(type, true)), \
+#define WASM_STORE_MEM_ALIGNMENT(type, index, alignment, val) \
+ index, val, \
+ static_cast<byte>(v8::internal::wasm::LoadStoreOpcodeOf(type, true)), \
alignment, ZERO_OFFSET
#define WASM_CALL_FUNCTION0(index) kExprCallFunction, static_cast<byte>(index)
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.cc b/deps/v8/test/common/wasm/wasm-module-runner.cc
index 6b53cdbf0c..fb2066a4bf 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.cc
+++ b/deps/v8/test/common/wasm/wasm-module-runner.cc
@@ -25,7 +25,7 @@ uint32_t GetMinModuleMemSize(const WasmModule* module) {
return WasmModule::kPageSize * module->min_mem_pages;
}
-const WasmModule* DecodeWasmModuleForTesting(
+std::unique_ptr<WasmModule> DecodeWasmModuleForTesting(
Isolate* isolate, ErrorThrower* thrower, const byte* module_start,
const byte* module_end, ModuleOrigin origin, bool verify_functions) {
// Decode the module, but don't verify function bodies, since we'll
@@ -36,14 +36,10 @@ const WasmModule* DecodeWasmModuleForTesting(
if (decoding_result.failed()) {
// Module verification failed. throw.
thrower->CompileError("WASM.compileRun() failed: %s",
- decoding_result.error_msg.c_str());
+ decoding_result.error_msg().c_str());
}
- if (thrower->error()) {
- if (decoding_result.val) delete decoding_result.val;
- return nullptr;
- }
- return decoding_result.val;
+ return std::move(decoding_result.val);
}
const Handle<WasmInstanceObject> InstantiateModuleForTesting(
@@ -78,8 +74,8 @@ const Handle<WasmInstanceObject> InstantiateModuleForTesting(
const Handle<WasmInstanceObject> CompileInstantiateWasmModuleForTesting(
Isolate* isolate, ErrorThrower* thrower, const byte* module_start,
const byte* module_end, ModuleOrigin origin) {
- std::unique_ptr<const WasmModule> module(DecodeWasmModuleForTesting(
- isolate, thrower, module_start, module_end, origin));
+ std::unique_ptr<WasmModule> module = DecodeWasmModuleForTesting(
+ isolate, thrower, module_start, module_end, origin);
if (module == nullptr) {
thrower->CompileError("Wasm module decoding failed");
diff --git a/deps/v8/test/common/wasm/wasm-module-runner.h b/deps/v8/test/common/wasm/wasm-module-runner.h
index 4d58513c14..b3d9b8f908 100644
--- a/deps/v8/test/common/wasm/wasm-module-runner.h
+++ b/deps/v8/test/common/wasm/wasm-module-runner.h
@@ -23,7 +23,7 @@ namespace wasm {
namespace testing {
// Decodes the given encoded module.
-const WasmModule* DecodeWasmModuleForTesting(
+std::unique_ptr<WasmModule> DecodeWasmModuleForTesting(
Isolate* isolate, ErrorThrower* thrower, const byte* module_start,
const byte* module_end, ModuleOrigin origin, bool verify_functions = false);
diff --git a/deps/v8/test/debugger/debug/debug-compile-optimized.js b/deps/v8/test/debugger/debug/debug-compile-optimized.js
index a9d5d6add4..c25bdfd4c0 100644
--- a/deps/v8/test/debugger/debug/debug-compile-optimized.js
+++ b/deps/v8/test/debugger/debug/debug-compile-optimized.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --crankshaft
+// Flags: --opt
Debug = debug.Debug;
diff --git a/deps/v8/test/debugger/debug/debug-evaluate-nested-let.js b/deps/v8/test/debugger/debug/debug-evaluate-nested-let.js
index 691dd2c3f2..726c28f5e5 100644
--- a/deps/v8/test/debugger/debug/debug-evaluate-nested-let.js
+++ b/deps/v8/test/debugger/debug/debug-evaluate-nested-let.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --crankshaft
+// Flags: --opt
Debug = debug.Debug
diff --git a/deps/v8/test/debugger/debug/debug-optimize.js b/deps/v8/test/debugger/debug/debug-optimize.js
index 5501b3fe93..f296816aa2 100644
--- a/deps/v8/test/debugger/debug/debug-optimize.js
+++ b/deps/v8/test/debugger/debug/debug-optimize.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --crankshaft --no-always-opt
+// Flags: --opt --no-always-opt
var Debug = debug.Debug;
diff --git a/deps/v8/test/debugger/debug/debug-scopes.js b/deps/v8/test/debugger/debug/debug-scopes.js
index b2f9e33825..7ee7a8d6e6 100644
--- a/deps/v8/test/debugger/debug/debug-scopes.js
+++ b/deps/v8/test/debugger/debug/debug-scopes.js
@@ -1144,7 +1144,7 @@ listener_delegate = function(exec_state) {
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
CheckScopeChainPositions(
- [{start: 52, end: 111}, {start: 22, end: 145}, {}, {}], exec_state);
+ [{start: 42, end: 111}, {start: 22, end: 145}, {}, {}], exec_state);
}
eval(code3);
EndTest();
@@ -1165,7 +1165,7 @@ listener_delegate = function(exec_state) {
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
CheckScopeChainPositions([{start: 66, end: 147},
- {start: 52, end: 147},
+ {start: 42, end: 147},
{start: 22, end: 181},
{}, {}], exec_state);
}
diff --git a/deps/v8/test/debugger/debug/debug-stepin-accessor.js b/deps/v8/test/debugger/debug/debug-stepin-accessor.js
index 14da5584f4..d0b49dd847 100644
--- a/deps/v8/test/debugger/debug/debug-stepin-accessor.js
+++ b/deps/v8/test/debugger/debug/debug-stepin-accessor.js
@@ -233,7 +233,8 @@ function testProtoSetter1_2() {
}
for (var n in this) {
- if (n.substr(0, 4) != 'test') {
+ if (n.substr(0, 4) != 'test' ||
+ n == 'testRunner') {
continue;
}
state = 1;
diff --git a/deps/v8/test/debugger/debug/debug-stepin-property-function-call.js b/deps/v8/test/debugger/debug/debug-stepin-property-function-call.js
index 0fdb94ed2b..8dd8334c9e 100644
--- a/deps/v8/test/debugger/debug/debug-stepin-property-function-call.js
+++ b/deps/v8/test/debugger/debug/debug-stepin-property-function-call.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --nocrankshaft
+// Flags: --noopt
Debug = debug.Debug
var exception = null;
diff --git a/deps/v8/test/debugger/debug/es6/debug-blockscopes.js b/deps/v8/test/debugger/debug/es6/debug-blockscopes.js
index bc00df7129..1cb279bacc 100644
--- a/deps/v8/test/debugger/debug/es6/debug-blockscopes.js
+++ b/deps/v8/test/debugger/debug/es6/debug-blockscopes.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --noanalyze-environment-liveness --crankshaft
+// Flags: --noanalyze-environment-liveness --opt
// The functions used for testing backtraces. They are at the top to make the
// testing of source line/column easier.
diff --git a/deps/v8/test/debugger/debug/harmony/modules-debug-scopes1.js b/deps/v8/test/debugger/debug/harmony/modules-debug-scopes1.js
index e67c42de61..cbecca9d6e 100644
--- a/deps/v8/test/debugger/debug/harmony/modules-debug-scopes1.js
+++ b/deps/v8/test/debugger/debug/harmony/modules-debug-scopes1.js
@@ -751,7 +751,7 @@ listener_delegate = function(exec_state) {
debug.ScopeType.Module,
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
- CheckScopeChainPositions([{start: 52, end: 111}, {start: 22, end: 145}],
+ CheckScopeChainPositions([{start: 42, end: 111}, {start: 22, end: 145}],
exec_state);
}
eval(code3);
@@ -774,7 +774,7 @@ listener_delegate = function(exec_state) {
debug.ScopeType.Script,
debug.ScopeType.Global], exec_state);
CheckScopeChainPositions([{start: 66, end: 147},
- {start: 52, end: 147},
+ {start: 42, end: 147},
{start: 22, end: 181}], exec_state);
}
eval(code4);
diff --git a/deps/v8/test/debugger/debug/regress/regress-crbug-465298.js b/deps/v8/test/debugger/debug/regress/regress-crbug-465298.js
index 7ccdcd882f..512a9e053f 100644
--- a/deps/v8/test/debugger/debug/regress/regress-crbug-465298.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-465298.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --noturbo-osr --noturbo-inlining
+// Flags: --noturbo-inlining
var stdlib = this;
var buffer = new ArrayBuffer(64 * 1024);
diff --git a/deps/v8/test/debugger/debug/regress/regress-crbug-517592.js b/deps/v8/test/debugger/debug/regress/regress-crbug-517592.js
index c552cfa4f0..e4a905d7c5 100644
--- a/deps/v8/test/debugger/debug/regress/regress-crbug-517592.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-517592.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --min-preparse-length=10
-
var source =
"var foo = function foo() {\n" +
" return 1;\n" +
diff --git a/deps/v8/test/debugger/debug/regress/regress-crbug-633999.js b/deps/v8/test/debugger/debug/regress/regress-crbug-633999.js
index e5f56166d5..ebaabd7104 100644
--- a/deps/v8/test/debugger/debug/regress/regress-crbug-633999.js
+++ b/deps/v8/test/debugger/debug/regress/regress-crbug-633999.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --crankshaft --no-turbo
+// Flags: --opt --no-turbo
var Debug = debug.Debug
var exception = null;
diff --git a/deps/v8/test/debugger/debugger.status b/deps/v8/test/debugger/debugger.status
index ccb1d51f78..d76bd5017d 100644
--- a/deps/v8/test/debugger/debugger.status
+++ b/deps/v8/test/debugger/debugger.status
@@ -73,6 +73,19 @@
'*': [SKIP],
}], # variant == wasm_traps
+##############################################################################
+['arch == arm and not simulator_run', {
+ # Too slow on chromebooks.
+ 'debug/ignition/debug-step-prefix-bytecodes': [SKIP],
+}], # 'arch == arm and not simulator_run'
+
+##############################################################################
+['variant in [noturbofan, noturbofan_stress] and system == macos and asan', {
+ # Too slow for old pipeline and mac asan.
+ 'debug/*': [SKIP],
+}], # variant in [noturbofan, noturbofan_stress] and system == macos and asan
+
+##############################################################################
['arch == s390 or arch == s390x', {
# Stack manipulations in LiveEdit is not implemented for this arch.
diff --git a/deps/v8/test/default.gyp b/deps/v8/test/default.gyp
index fe4e47f29d..2c6429bada 100644
--- a/deps/v8/test/default.gyp
+++ b/deps/v8/test/default.gyp
@@ -18,6 +18,7 @@
'mjsunit/mjsunit.gyp:mjsunit_run',
'preparser/preparser.gyp:preparser_run',
'unittests/unittests.gyp:unittests_run',
+ 'wasm-spec-tests/wasm-spec-tests.gyp:wasm_spec_tests_run',
],
'includes': [
'../gypfiles/features.gypi',
diff --git a/deps/v8/test/default.isolate b/deps/v8/test/default.isolate
index 8ef69c3b15..e9104631d6 100644
--- a/deps/v8/test/default.isolate
+++ b/deps/v8/test/default.isolate
@@ -15,7 +15,9 @@
'intl/intl.isolate',
'message/message.isolate',
'mjsunit/mjsunit.isolate',
+ 'mkgrokdump/mkgrokdump.isolate',
'preparser/preparser.isolate',
'unittests/unittests.isolate',
+ 'wasm-spec-tests/wasm-spec-tests.isolate',
],
}
diff --git a/deps/v8/test/fuzzer/fuzzer.gyp b/deps/v8/test/fuzzer/fuzzer.gyp
index 3b93808533..302cea377e 100644
--- a/deps/v8/test/fuzzer/fuzzer.gyp
+++ b/deps/v8/test/fuzzer/fuzzer.gyp
@@ -169,6 +169,8 @@
'../common/wasm/test-signatures.h',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
+ 'wasm-fuzzer-common.cc',
+ 'wasm-fuzzer-common.h',
],
},
{
@@ -198,6 +200,8 @@
'../common/wasm/test-signatures.h',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
+ 'wasm-fuzzer-common.cc',
+ 'wasm-fuzzer-common.h',
],
},
{
@@ -227,6 +231,8 @@
'../common/wasm/test-signatures.h',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
+ 'wasm-fuzzer-common.cc',
+ 'wasm-fuzzer-common.h',
],
},
{
@@ -255,8 +261,8 @@
'wasm-data-section.cc',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
- 'wasm-section-fuzzers.cc',
- 'wasm-section-fuzzers.h',
+ 'wasm-fuzzer-common.cc',
+ 'wasm-fuzzer-common.h',
],
},
{
@@ -285,8 +291,8 @@
'wasm-function-sigs-section.cc',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
- 'wasm-section-fuzzers.cc',
- 'wasm-section-fuzzers.h',
+ 'wasm-fuzzer-common.cc',
+ 'wasm-fuzzer-common.h',
],
},
{
@@ -315,8 +321,8 @@
'wasm-globals-section.cc',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
- 'wasm-section-fuzzers.cc',
- 'wasm-section-fuzzers.h',
+ 'wasm-fuzzer-common.cc',
+ 'wasm-fuzzer-common.h',
],
},
{
@@ -345,8 +351,8 @@
'wasm-imports-section.cc',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
- 'wasm-section-fuzzers.cc',
- 'wasm-section-fuzzers.h',
+ 'wasm-fuzzer-common.cc',
+ 'wasm-fuzzer-common.h',
],
},
{
@@ -375,8 +381,8 @@
'wasm-memory-section.cc',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
- 'wasm-section-fuzzers.cc',
- 'wasm-section-fuzzers.h',
+ 'wasm-fuzzer-common.cc',
+ 'wasm-fuzzer-common.h',
],
},
{
@@ -405,8 +411,8 @@
'wasm-names-section.cc',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
- 'wasm-section-fuzzers.cc',
- 'wasm-section-fuzzers.h',
+ 'wasm-fuzzer-common.cc',
+ 'wasm-fuzzer-common.h',
],
},
{
@@ -435,8 +441,8 @@
'wasm-types-section.cc',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
- 'wasm-section-fuzzers.cc',
- 'wasm-section-fuzzers.h',
+ 'wasm-fuzzer-common.cc',
+ 'wasm-fuzzer-common.h',
],
},
{
diff --git a/deps/v8/test/fuzzer/parser.cc b/deps/v8/test/fuzzer/parser.cc
index e364d83149..76666e85be 100644
--- a/deps/v8/test/fuzzer/parser.cc
+++ b/deps/v8/test/fuzzer/parser.cc
@@ -14,7 +14,51 @@
#include "src/parsing/preparser.h"
#include "test/fuzzer/fuzzer-support.h"
+#include <cctype>
+#include <list>
+
+bool IsValidInput(const uint8_t* data, size_t size) {
+ std::list<char> parentheses;
+ const char* ptr = reinterpret_cast<const char*>(data);
+
+ for (size_t i = 0; i != size; ++i) {
+ // Check that all characters in the data are valid.
+ if (!(std::isspace(ptr[i]) || std::isprint(ptr[i]))) {
+ return false;
+ }
+
+ // Check balance of parentheses in the data.
+ switch (ptr[i]) {
+ case '(':
+ case '[':
+ case '{':
+ parentheses.push_back(ptr[i]);
+ break;
+ case ')':
+ if (parentheses.back() != '(') return false;
+ parentheses.pop_back();
+ break;
+ case ']':
+ if (parentheses.back() != '[') return false;
+ parentheses.pop_back();
+ break;
+ case '}':
+ if (parentheses.back() != '{') return false;
+ parentheses.pop_back();
+ break;
+ default:
+ break;
+ }
+ }
+
+ return parentheses.empty();
+}
+
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ if (!IsValidInput(data, size)) {
+ return 0;
+ }
+
v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
v8::Isolate* isolate = support->GetIsolate();
@@ -36,7 +80,9 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
v8::internal::Handle<v8::internal::Script> script =
factory->NewScript(source.ToHandleChecked());
v8::internal::ParseInfo info(script);
- v8::internal::parsing::ParseProgram(&info, i_isolate);
+ if (!v8::internal::parsing::ParseProgram(&info, i_isolate)) {
+ i_isolate->OptionalRescheduleException(true);
+ }
isolate->RequestGarbageCollectionForTesting(
v8::Isolate::kFullGarbageCollection);
return 0;
diff --git a/deps/v8/test/fuzzer/testcfg.py b/deps/v8/test/fuzzer/testcfg.py
index 0e07fb574e..0732ddebe0 100644
--- a/deps/v8/test/fuzzer/testcfg.py
+++ b/deps/v8/test/fuzzer/testcfg.py
@@ -32,6 +32,9 @@ class FuzzerTestSuite(testsuite.TestSuite):
for subtest in FuzzerTestSuite.SUB_TESTS:
shell = 'v8_simple_%s_fuzzer' % subtest
for fname in os.listdir(os.path.join(self.root, subtest)):
+ if subtest in ["wasm", "wasm_asmjs"] and fname.endswith(".wasm"):
+ os.remove(os.path.join(self.root, subtest, fname))
+ continue
if not os.path.isfile(os.path.join(self.root, subtest, fname)):
continue
test = testcase.TestCase(self, '%s/%s' % (subtest, fname),
diff --git a/deps/v8/test/fuzzer/wasm-call.cc b/deps/v8/test/fuzzer/wasm-call.cc
index 3291d9eab1..42a7635d7a 100644
--- a/deps/v8/test/fuzzer/wasm-call.cc
+++ b/deps/v8/test/fuzzer/wasm-call.cc
@@ -16,176 +16,113 @@
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-module-runner.h"
#include "test/fuzzer/fuzzer-support.h"
+#include "test/fuzzer/wasm-fuzzer-common.h"
-#define WASM_CODE_FUZZER_HASH_SEED 83
#define MAX_NUM_FUNCTIONS 3
#define MAX_NUM_PARAMS 3
+using namespace v8::internal;
using namespace v8::internal::wasm;
-
-template <typename V>
-static inline V read_value(const uint8_t** data, size_t* size, bool* ok) {
- // The status flag {ok} checks that the decoding up until now was okay, and
- // that a value of type V can be read without problems.
- *ok &= (*size > sizeof(V));
- if (!(*ok)) return 0;
- V result = v8::internal::ReadLittleEndianValue<V>(*data);
- *data += sizeof(V);
- *size -= sizeof(V);
- return result;
-}
-
-static void add_argument(
- v8::internal::Isolate* isolate, ValueType type, WasmVal* interpreter_args,
- v8::internal::Handle<v8::internal::Object>* compiled_args, int* argc,
- const uint8_t** data, size_t* size, bool* ok) {
- if (!(*ok)) return;
- switch (type) {
- case kWasmF32: {
- float value = read_value<float>(data, size, ok);
- interpreter_args[*argc] = WasmVal(value);
- compiled_args[*argc] =
- isolate->factory()->NewNumber(static_cast<double>(value));
- break;
- }
- case kWasmF64: {
- double value = read_value<double>(data, size, ok);
- interpreter_args[*argc] = WasmVal(value);
- compiled_args[*argc] = isolate->factory()->NewNumber(value);
- break;
- }
- case kWasmI32: {
- int32_t value = read_value<int32_t>(data, size, ok);
- interpreter_args[*argc] = WasmVal(value);
- compiled_args[*argc] =
- isolate->factory()->NewNumber(static_cast<double>(value));
- break;
- }
- default:
- UNREACHABLE();
+using namespace v8::internal::wasm::fuzzer;
+
+class WasmCallFuzzer : public WasmExecutionFuzzer {
+ template <typename V>
+ static inline V read_value(const uint8_t** data, size_t* size, bool* ok) {
+ // The status flag {ok} checks that the decoding up until now was okay, and
+ // that a value of type V can be read without problems.
+ *ok &= (*size > sizeof(V));
+ if (!(*ok)) return 0;
+ V result = v8::internal::ReadLittleEndianValue<V>(*data);
+ *data += sizeof(V);
+ *size -= sizeof(V);
+ return result;
}
- (*argc)++;
-}
-
-extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
- v8::Isolate* isolate = support->GetIsolate();
- v8::internal::Isolate* i_isolate =
- reinterpret_cast<v8::internal::Isolate*>(isolate);
- // Clear any pending exceptions from a prior run.
- if (i_isolate->has_pending_exception()) {
- i_isolate->clear_pending_exception();
+ static void add_argument(
+ v8::internal::Isolate* isolate, ValueType type, WasmVal* interpreter_args,
+ v8::internal::Handle<v8::internal::Object>* compiler_args, int* argc,
+ const uint8_t** data, size_t* size, bool* ok) {
+ if (!(*ok)) return;
+ switch (type) {
+ case kWasmF32: {
+ float value = read_value<float>(data, size, ok);
+ interpreter_args[*argc] = WasmVal(value);
+ compiler_args[*argc] =
+ isolate->factory()->NewNumber(static_cast<double>(value));
+ break;
+ }
+ case kWasmF64: {
+ double value = read_value<double>(data, size, ok);
+ interpreter_args[*argc] = WasmVal(value);
+ compiler_args[*argc] = isolate->factory()->NewNumber(value);
+ break;
+ }
+ case kWasmI32: {
+ int32_t value = read_value<int32_t>(data, size, ok);
+ interpreter_args[*argc] = WasmVal(value);
+ compiler_args[*argc] =
+ isolate->factory()->NewNumber(static_cast<double>(value));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ (*argc)++;
}
- v8::Isolate::Scope isolate_scope(isolate);
- v8::HandleScope handle_scope(isolate);
- v8::Context::Scope context_scope(support->GetContext());
- v8::TryCatch try_catch(isolate);
-
- v8::internal::AccountingAllocator allocator;
- v8::internal::Zone zone(&allocator, ZONE_NAME);
-
- bool ok = true;
- uint8_t num_functions =
- (read_value<uint8_t>(&data, &size, &ok) % MAX_NUM_FUNCTIONS) + 1;
-
- ValueType types[] = {kWasmF32, kWasmF64, kWasmI32, kWasmI64};
- WasmVal interpreter_args[3];
- v8::internal::Handle<v8::internal::Object> compiled_args[3];
- int argc = 0;
-
- WasmModuleBuilder builder(&zone);
- for (int fun = 0; fun < num_functions; fun++) {
- size_t num_params = static_cast<size_t>(
- (read_value<uint8_t>(&data, &size, &ok) % MAX_NUM_PARAMS) + 1);
- FunctionSig::Builder sig_builder(&zone, 1, num_params);
- sig_builder.AddReturn(kWasmI32);
- for (size_t param = 0; param < num_params; param++) {
- // The main function cannot handle int64 parameters.
- ValueType param_type = types[(read_value<uint8_t>(&data, &size, &ok) %
- (arraysize(types) - (fun == 0 ? 1 : 0)))];
- sig_builder.AddParam(param_type);
+ virtual bool GenerateModule(
+ Isolate* isolate, Zone* zone, const uint8_t* data, size_t size,
+ ZoneBuffer& buffer, int32_t& num_args,
+ std::unique_ptr<WasmVal[]>& interpreter_args,
+ std::unique_ptr<Handle<Object>[]>& compiler_args) override {
+ bool ok = true;
+ uint8_t num_functions =
+ (read_value<uint8_t>(&data, &size, &ok) % MAX_NUM_FUNCTIONS) + 1;
+
+ ValueType types[] = {kWasmF32, kWasmF64, kWasmI32, kWasmI64};
+
+ interpreter_args.reset(new WasmVal[3]);
+ compiler_args.reset(new Handle<Object>[3]);
+
+ WasmModuleBuilder builder(zone);
+ for (int fun = 0; fun < num_functions; fun++) {
+ size_t num_params = static_cast<size_t>(
+ (read_value<uint8_t>(&data, &size, &ok) % MAX_NUM_PARAMS) + 1);
+ FunctionSig::Builder sig_builder(zone, 1, num_params);
+ sig_builder.AddReturn(kWasmI32);
+ for (size_t param = 0; param < num_params; param++) {
+ // The main function cannot handle int64 parameters.
+ ValueType param_type = types[(read_value<uint8_t>(&data, &size, &ok) %
+ (arraysize(types) - (fun == 0 ? 1 : 0)))];
+ sig_builder.AddParam(param_type);
+ if (fun == 0) {
+ add_argument(isolate, param_type, interpreter_args.get(),
+ compiler_args.get(), &num_args, &data, &size, &ok);
+ }
+ }
+ v8::internal::wasm::WasmFunctionBuilder* f =
+ builder.AddFunction(sig_builder.Build());
+ uint32_t code_size = static_cast<uint32_t>(size / num_functions);
+ f->EmitCode(data, code_size);
+ uint8_t end_opcode = kExprEnd;
+ f->EmitCode(&end_opcode, 1);
+ data += code_size;
+ size -= code_size;
if (fun == 0) {
- add_argument(i_isolate, param_type, interpreter_args, compiled_args,
- &argc, &data, &size, &ok);
+ builder.AddExport(v8::internal::CStrVector("main"), f);
}
}
- v8::internal::wasm::WasmFunctionBuilder* f =
- builder.AddFunction(sig_builder.Build());
- uint32_t code_size = static_cast<uint32_t>(size / num_functions);
- f->EmitCode(data, code_size);
- uint8_t end_opcode = kExprEnd;
- f->EmitCode(&end_opcode, 1);
- data += code_size;
- size -= code_size;
- if (fun == 0) {
- f->ExportAs(v8::internal::CStrVector("main"));
- }
- }
-
- ZoneBuffer buffer(&zone);
- builder.WriteTo(buffer);
-
- if (!ok) {
- // The input data was too short.
- return 0;
- }
- v8::internal::wasm::testing::SetupIsolateForWasmModule(i_isolate);
+ builder.WriteTo(buffer);
- v8::internal::HandleScope scope(i_isolate);
-
- ErrorThrower interpreter_thrower(i_isolate, "Interpreter");
- std::unique_ptr<const WasmModule> module(testing::DecodeWasmModuleForTesting(
- i_isolate, &interpreter_thrower, buffer.begin(), buffer.end(),
- v8::internal::wasm::ModuleOrigin::kWasmOrigin, true));
-
- if (module == nullptr) {
- return 0;
- }
- ModuleWireBytes wire_bytes(buffer.begin(), buffer.end());
- int32_t result_interpreted;
- bool possible_nondeterminism = false;
- {
- result_interpreted = testing::InterpretWasmModule(
- i_isolate, &interpreter_thrower, module.get(), wire_bytes, 0,
- interpreter_args, &possible_nondeterminism);
- }
-
- ErrorThrower compiler_thrower(i_isolate, "Compiler");
- v8::internal::Handle<v8::internal::JSObject> instance =
- testing::InstantiateModuleForTesting(i_isolate, &compiler_thrower,
- module.get(), wire_bytes);
-
- if (!interpreter_thrower.error()) {
- CHECK(!instance.is_null());
- } else {
- return 0;
- }
- int32_t result_compiled;
- {
- result_compiled = testing::CallWasmFunctionForTesting(
- i_isolate, instance, &compiler_thrower, "main", argc, compiled_args,
- v8::internal::wasm::ModuleOrigin::kWasmOrigin);
- }
-
- // The WebAssembly spec allows the sign bit of NaN to be non-deterministic.
- // This sign bit may cause result_interpreted to be different than
- // result_compiled. Therefore we do not check the equality of the results
- // if the execution may have produced a NaN at some point.
- if (possible_nondeterminism) return 0;
-
- if (result_interpreted == bit_cast<int32_t>(0xdeadbeef)) {
- CHECK(i_isolate->has_pending_exception());
- i_isolate->clear_pending_exception();
- } else {
- CHECK(!i_isolate->has_pending_exception());
- if (result_interpreted != result_compiled) {
- V8_Fatal(__FILE__, __LINE__, "WasmCodeFuzzerHash=%x",
- v8::internal::StringHasher::HashSequentialString(
- data, static_cast<int>(size), WASM_CODE_FUZZER_HASH_SEED));
+ if (!ok) {
+ // The input data was too short.
+ return 0;
}
+ return true;
}
- return 0;
+};
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ return WasmCallFuzzer().FuzzWasmModule(data, size);
}
diff --git a/deps/v8/test/fuzzer/wasm-code.cc b/deps/v8/test/fuzzer/wasm-code.cc
index a80cfcfaca..ec6db6a7c6 100644
--- a/deps/v8/test/fuzzer/wasm-code.cc
+++ b/deps/v8/test/fuzzer/wasm-code.cc
@@ -5,159 +5,43 @@
#include <stddef.h>
#include <stdint.h>
-#include "include/v8.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects.h"
-#include "src/ostreams.h"
#include "src/wasm/wasm-interpreter.h"
#include "src/wasm/wasm-module-builder.h"
-#include "src/wasm/wasm-module.h"
#include "test/common/wasm/test-signatures.h"
-#include "test/common/wasm/wasm-module-runner.h"
-#include "test/fuzzer/fuzzer-support.h"
-
-#define WASM_CODE_FUZZER_HASH_SEED 83
+#include "test/fuzzer/wasm-fuzzer-common.h"
+using namespace v8::internal;
using namespace v8::internal::wasm;
+using namespace v8::internal::wasm::fuzzer;
+
+class WasmCodeFuzzer : public WasmExecutionFuzzer {
+ virtual bool GenerateModule(
+ Isolate* isolate, Zone* zone, const uint8_t* data, size_t size,
+ ZoneBuffer& buffer, int32_t& num_args,
+ std::unique_ptr<WasmVal[]>& interpreter_args,
+ std::unique_ptr<Handle<Object>[]>& compiler_args) override {
+ TestSignatures sigs;
+ WasmModuleBuilder builder(zone);
+ WasmFunctionBuilder* f = builder.AddFunction(sigs.i_iii());
+ f->EmitCode(data, static_cast<uint32_t>(size));
+ uint8_t end_opcode = kExprEnd;
+ f->EmitCode(&end_opcode, 1);
+ builder.AddExport(CStrVector("main"), f);
+
+ builder.WriteTo(buffer);
+ num_args = 3;
+ interpreter_args.reset(new WasmVal[3]{WasmVal(1), WasmVal(2), WasmVal(3)});
+
+ compiler_args.reset(new Handle<Object>[3]{
+ handle(Smi::FromInt(1), isolate), handle(Smi::FromInt(1), isolate),
+ handle(Smi::FromInt(1), isolate)});
+ return true;
+ }
+};
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- // Save the flag so that we can change it and restore it later.
- bool generate_test = v8::internal::FLAG_wasm_code_fuzzer_gen_test;
- if (generate_test) {
- v8::internal::OFStream os(stdout);
-
- os << "// Copyright 2017 the V8 project authors. All rights reserved."
- << std::endl;
- os << "// Use of this source code is governed by a BSD-style license that "
- "can be"
- << std::endl;
- os << "// found in the LICENSE file." << std::endl;
- os << std::endl;
- os << "load(\"test/mjsunit/wasm/wasm-constants.js\");" << std::endl;
- os << "load(\"test/mjsunit/wasm/wasm-module-builder.js\");" << std::endl;
- os << std::endl;
- os << "(function() {" << std::endl;
- os << " var builder = new WasmModuleBuilder();" << std::endl;
- os << " builder.addMemory(16, 32, false);" << std::endl;
- os << " builder.addFunction(\"test\", kSig_i_iii)" << std::endl;
- os << " .addBodyWithEnd([" << std::endl;
- }
- v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
- v8::Isolate* isolate = support->GetIsolate();
- v8::internal::Isolate* i_isolate =
- reinterpret_cast<v8::internal::Isolate*>(isolate);
-
- // Clear any pending exceptions from a prior run.
- if (i_isolate->has_pending_exception()) {
- i_isolate->clear_pending_exception();
- }
-
- v8::Isolate::Scope isolate_scope(isolate);
- v8::HandleScope handle_scope(isolate);
- v8::Context::Scope context_scope(support->GetContext());
- v8::TryCatch try_catch(isolate);
-
- v8::internal::AccountingAllocator allocator;
- v8::internal::Zone zone(&allocator, ZONE_NAME);
-
- TestSignatures sigs;
-
- WasmModuleBuilder builder(&zone);
-
- v8::internal::wasm::WasmFunctionBuilder* f =
- builder.AddFunction(sigs.i_iii());
- f->EmitCode(data, static_cast<uint32_t>(size));
- uint8_t end_opcode = kExprEnd;
- f->EmitCode(&end_opcode, 1);
- f->ExportAs(v8::internal::CStrVector("main"));
-
- ZoneBuffer buffer(&zone);
- builder.WriteTo(buffer);
-
- v8::internal::wasm::testing::SetupIsolateForWasmModule(i_isolate);
-
- v8::internal::HandleScope scope(i_isolate);
-
- ErrorThrower interpreter_thrower(i_isolate, "Interpreter");
- std::unique_ptr<const WasmModule> module(testing::DecodeWasmModuleForTesting(
- i_isolate, &interpreter_thrower, buffer.begin(), buffer.end(),
- v8::internal::wasm::ModuleOrigin::kWasmOrigin, true));
-
- // Clear the flag so that the WebAssembly code is not printed twice.
- v8::internal::FLAG_wasm_code_fuzzer_gen_test = false;
- if (module == nullptr) {
- if (generate_test) {
- v8::internal::OFStream os(stdout);
- os << " ])" << std::endl;
- os << " .exportFunc();" << std::endl;
- os << " assertThrows(function() { builder.instantiate(); });"
- << std::endl;
- os << "})();" << std::endl;
- }
- return 0;
- }
- if (generate_test) {
- v8::internal::OFStream os(stdout);
- os << " ])" << std::endl;
- os << " .exportFunc();" << std::endl;
- os << " var module = builder.instantiate();" << std::endl;
- os << " module.exports.test(1, 2, 3);" << std::endl;
- os << "})();" << std::endl;
- }
-
- ModuleWireBytes wire_bytes(buffer.begin(), buffer.end());
- int32_t result_interpreted;
- bool possible_nondeterminism = false;
- {
- WasmVal args[] = {WasmVal(1), WasmVal(2), WasmVal(3)};
- result_interpreted = testing::InterpretWasmModule(
- i_isolate, &interpreter_thrower, module.get(), wire_bytes, 0, args,
- &possible_nondeterminism);
- }
-
- ErrorThrower compiler_thrower(i_isolate, "Compiler");
- v8::internal::Handle<v8::internal::JSObject> instance =
- testing::InstantiateModuleForTesting(i_isolate, &compiler_thrower,
- module.get(), wire_bytes);
- // Restore the flag.
- v8::internal::FLAG_wasm_code_fuzzer_gen_test = generate_test;
- if (!interpreter_thrower.error()) {
- CHECK(!instance.is_null());
- } else {
- return 0;
- }
- int32_t result_compiled;
- {
- v8::internal::Handle<v8::internal::Object> arguments[] = {
- v8::internal::handle(v8::internal::Smi::FromInt(1), i_isolate),
- v8::internal::handle(v8::internal::Smi::FromInt(2), i_isolate),
- v8::internal::handle(v8::internal::Smi::FromInt(3), i_isolate)};
- result_compiled = testing::CallWasmFunctionForTesting(
- i_isolate, instance, &compiler_thrower, "main", arraysize(arguments),
- arguments, v8::internal::wasm::ModuleOrigin::kWasmOrigin);
- }
-
- // The WebAssembly spec allows the sign bit of NaN to be non-deterministic.
- // This sign bit may cause result_interpreted to be different than
- // result_compiled. Therefore we do not check the equality of the results
- // if the execution may have produced a NaN at some point.
- if (possible_nondeterminism) return 0;
-
- if (result_interpreted == bit_cast<int32_t>(0xdeadbeef)) {
- CHECK(i_isolate->has_pending_exception());
- i_isolate->clear_pending_exception();
- } else {
- CHECK(!i_isolate->has_pending_exception());
- // The WebAssembly spec allows the sign bit of NaN to be non-deterministic.
- // This sign bit may cause result_interpreted to be different than
- // result_compiled. Therefore we do not check the equality of the results
- // if the execution may have produced a NaN at some point.
- if (result_interpreted != result_compiled) {
- V8_Fatal(__FILE__, __LINE__, "WasmCodeFuzzerHash=%x",
- v8::internal::StringHasher::HashSequentialString(
- data, static_cast<int>(size), WASM_CODE_FUZZER_HASH_SEED));
- }
- }
- return 0;
+ return WasmCodeFuzzer().FuzzWasmModule(data, size);
}
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index 0b01ce2357..2a99d1546c 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -19,12 +19,13 @@
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-module-runner.h"
#include "test/fuzzer/fuzzer-support.h"
-
-#define WASM_CODE_FUZZER_HASH_SEED 83
+#include "test/fuzzer/wasm-fuzzer-common.h"
typedef uint8_t byte;
+using namespace v8::internal;
using namespace v8::internal::wasm;
+using namespace v8::internal::wasm::fuzzer;
namespace {
@@ -104,7 +105,7 @@ class WasmGenerator {
const ValueType break_type = blocks_[target_block];
Generate(break_type, data);
- builder_->EmitWithVarInt(kExprBr, target_block);
+ builder_->EmitWithI32V(kExprBr, target_block);
builder_->Emit(kExprEnd);
blocks_.pop_back();
};
@@ -205,8 +206,7 @@ void WasmGenerator::Generate<kWasmI32>(DataRange data) {
template <>
void WasmGenerator::Generate<kWasmI64>(DataRange data) {
if (data.size() <= sizeof(uint64_t)) {
- const uint8_t bytes[] = {WASM_I64V(data.get<uint64_t>())};
- builder_->EmitCode(bytes, arraysize(bytes));
+ builder_->EmitI64Const(data.get<int64_t>());
} else {
const std::function<void(DataRange)> alternates[] = {
op<kExprI64Add, kWasmI64, kWasmI64>(),
@@ -244,10 +244,8 @@ void WasmGenerator::Generate<kWasmI64>(DataRange data) {
template <>
void WasmGenerator::Generate<kWasmF32>(DataRange data) {
- if (data.size() <= sizeof(uint32_t)) {
- const uint32_t i = data.get<uint32_t>();
- builder_->Emit(kExprF32Const);
- builder_->EmitCode(reinterpret_cast<const uint8_t*>(&i), sizeof(i));
+ if (data.size() <= sizeof(float)) {
+ builder_->EmitF32Const(data.get<float>());
} else {
const std::function<void(DataRange)> alternates[] = {
op<kExprF32Add, kWasmF32, kWasmF32>(),
@@ -266,15 +264,8 @@ void WasmGenerator::Generate<kWasmF32>(DataRange data) {
template <>
void WasmGenerator::Generate<kWasmF64>(DataRange data) {
- if (data.size() <= sizeof(uint64_t)) {
- // TODO (eholk): generate full 64-bit constants
- uint64_t i = 0;
- while (data.size() > 0) {
- i <<= 8;
- i |= data.get<uint8_t>();
- }
- builder_->Emit(kExprF64Const);
- builder_->EmitCode(reinterpret_cast<uint8_t*>(&i), sizeof(i));
+ if (data.size() <= sizeof(double)) {
+ builder_->EmitF64Const(data.get<double>());
} else {
const std::function<void(DataRange)> alternates[] = {
op<kExprF64Add, kWasmF64, kWasmF64>(),
@@ -307,141 +298,38 @@ void WasmGenerator::Generate(ValueType type, DataRange data) {
}
}
-extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- // Save the flag so that we can change it and restore it later.
- bool generate_test = v8::internal::FLAG_wasm_code_fuzzer_gen_test;
- if (generate_test) {
- v8::internal::OFStream os(stdout);
-
- os << "// Copyright 2017 the V8 project authors. All rights reserved."
- << std::endl;
- os << "// Use of this source code is governed by a BSD-style license that "
- "can be"
- << std::endl;
- os << "// found in the LICENSE file." << std::endl;
- os << std::endl;
- os << "load(\"test/mjsunit/wasm/wasm-constants.js\");" << std::endl;
- os << "load(\"test/mjsunit/wasm/wasm-module-builder.js\");" << std::endl;
- os << std::endl;
- os << "(function() {" << std::endl;
- os << " var builder = new WasmModuleBuilder();" << std::endl;
- os << " builder.addMemory(16, 32, false);" << std::endl;
- os << " builder.addFunction(\"test\", kSig_i_iii)" << std::endl;
- os << " .addBodyWithEnd([" << std::endl;
- }
- v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
- v8::Isolate* isolate = support->GetIsolate();
- v8::internal::Isolate* i_isolate =
- reinterpret_cast<v8::internal::Isolate*>(isolate);
-
- // Clear any pending exceptions from a prior run.
- if (i_isolate->has_pending_exception()) {
- i_isolate->clear_pending_exception();
- }
-
- v8::Isolate::Scope isolate_scope(isolate);
- v8::HandleScope handle_scope(isolate);
- v8::Context::Scope context_scope(support->GetContext());
- v8::TryCatch try_catch(isolate);
-
- v8::internal::AccountingAllocator allocator;
- v8::internal::Zone zone(&allocator, ZONE_NAME);
-
- TestSignatures sigs;
-
- WasmModuleBuilder builder(&zone);
-
- v8::internal::wasm::WasmFunctionBuilder* f =
- builder.AddFunction(sigs.i_iii());
+class WasmCompileFuzzer : public WasmExecutionFuzzer {
+ virtual bool GenerateModule(
+ Isolate* isolate, Zone* zone, const uint8_t* data, size_t size,
+ ZoneBuffer& buffer, int32_t& num_args,
+ std::unique_ptr<WasmVal[]>& interpreter_args,
+ std::unique_ptr<Handle<Object>[]>& compiler_args) override {
+ TestSignatures sigs;
- WasmGenerator gen(f);
- gen.Generate<kWasmI32>(DataRange(data, static_cast<uint32_t>(size)));
+ WasmModuleBuilder builder(zone);
- uint8_t end_opcode = kExprEnd;
- f->EmitCode(&end_opcode, 1);
- f->ExportAs(v8::internal::CStrVector("main"));
+ v8::internal::wasm::WasmFunctionBuilder* f =
+ builder.AddFunction(sigs.i_iii());
- ZoneBuffer buffer(&zone);
- builder.WriteTo(buffer);
+ WasmGenerator gen(f);
+ gen.Generate<kWasmI32>(DataRange(data, static_cast<uint32_t>(size)));
- v8::internal::wasm::testing::SetupIsolateForWasmModule(i_isolate);
+ uint8_t end_opcode = kExprEnd;
+ f->EmitCode(&end_opcode, 1);
+ builder.AddExport(v8::internal::CStrVector("main"), f);
- v8::internal::HandleScope scope(i_isolate);
+ builder.WriteTo(buffer);
- ErrorThrower interpreter_thrower(i_isolate, "Interpreter");
- std::unique_ptr<const WasmModule> module(testing::DecodeWasmModuleForTesting(
- i_isolate, &interpreter_thrower, buffer.begin(), buffer.end(),
- v8::internal::wasm::ModuleOrigin::kWasmOrigin, true));
+ num_args = 3;
+ interpreter_args.reset(new WasmVal[3]{WasmVal(1), WasmVal(2), WasmVal(3)});
- // Clear the flag so that the WebAssembly code is not printed twice.
- v8::internal::FLAG_wasm_code_fuzzer_gen_test = false;
- if (module == nullptr) {
- if (generate_test) {
- v8::internal::OFStream os(stdout);
- os << " ])" << std::endl;
- os << " .exportFunc();" << std::endl;
- os << " assertThrows(function() { builder.instantiate(); });"
- << std::endl;
- os << "})();" << std::endl;
- }
- return 0;
- }
- if (generate_test) {
- v8::internal::OFStream os(stdout);
- os << " ])" << std::endl;
- os << " .exportFunc();" << std::endl;
- os << " var module = builder.instantiate();" << std::endl;
- os << " module.exports.test(1, 2, 3);" << std::endl;
- os << "})();" << std::endl;
- }
-
- ModuleWireBytes wire_bytes(buffer.begin(), buffer.end());
- int32_t result_interpreted;
- bool possible_nondeterminism = false;
- {
- WasmVal args[] = {WasmVal(1), WasmVal(2), WasmVal(3)};
- result_interpreted = testing::InterpretWasmModule(
- i_isolate, &interpreter_thrower, module.get(), wire_bytes, 0, args,
- &possible_nondeterminism);
+ compiler_args.reset(new Handle<Object>[3]{
+ handle(Smi::FromInt(1), isolate), handle(Smi::FromInt(1), isolate),
+ handle(Smi::FromInt(1), isolate)});
+ return true;
}
+};
- ErrorThrower compiler_thrower(i_isolate, "Compiler");
- v8::internal::Handle<v8::internal::JSObject> instance =
- testing::InstantiateModuleForTesting(i_isolate, &compiler_thrower,
- module.get(), wire_bytes);
- // Restore the flag.
- v8::internal::FLAG_wasm_code_fuzzer_gen_test = generate_test;
- if (!interpreter_thrower.error()) {
- CHECK(!instance.is_null());
- } else {
- return 0;
- }
- int32_t result_compiled;
- {
- v8::internal::Handle<v8::internal::Object> arguments[] = {
- v8::internal::handle(v8::internal::Smi::FromInt(1), i_isolate),
- v8::internal::handle(v8::internal::Smi::FromInt(2), i_isolate),
- v8::internal::handle(v8::internal::Smi::FromInt(3), i_isolate)};
- result_compiled = testing::CallWasmFunctionForTesting(
- i_isolate, instance, &compiler_thrower, "main", arraysize(arguments),
- arguments, v8::internal::wasm::ModuleOrigin::kWasmOrigin);
- }
- if (result_interpreted == bit_cast<int32_t>(0xdeadbeef) &&
- !possible_nondeterminism) {
- CHECK(i_isolate->has_pending_exception());
- i_isolate->clear_pending_exception();
- } else {
- // The WebAssembly spec allows the sign bit of NaN to be non-deterministic.
- // This sign bit may cause result_interpreted to be different than
- // result_compiled. Therefore we do not check the equality of the results
- // if the execution may have produced a NaN at some point.
- if (!possible_nondeterminism && (result_interpreted != result_compiled)) {
- printf("\nInterpreter returned 0x%x but compiled code returned 0x%x\n",
- result_interpreted, result_compiled);
- V8_Fatal(__FILE__, __LINE__, "WasmCodeFuzzerHash=%x",
- v8::internal::StringHasher::HashSequentialString(
- data, static_cast<int>(size), WASM_CODE_FUZZER_HASH_SEED));
- }
- }
- return 0;
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ return WasmCompileFuzzer().FuzzWasmModule(data, size);
}
diff --git a/deps/v8/test/fuzzer/wasm-data-section.cc b/deps/v8/test/fuzzer/wasm-data-section.cc
index 30b702fe8d..91c3fb586e 100644
--- a/deps/v8/test/fuzzer/wasm-data-section.cc
+++ b/deps/v8/test/fuzzer/wasm-data-section.cc
@@ -3,8 +3,10 @@
// found in the LICENSE file.
#include "src/objects-inl.h"
-#include "test/fuzzer/wasm-section-fuzzers.h"
+#include "test/fuzzer/wasm-fuzzer-common.h"
+
+using namespace v8::internal::wasm::fuzzer;
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- return fuzz_wasm_section(v8::internal::wasm::kDataSectionCode, data, size);
+ return FuzzWasmSection(v8::internal::wasm::kDataSectionCode, data, size);
}
diff --git a/deps/v8/test/fuzzer/wasm-function-sigs-section.cc b/deps/v8/test/fuzzer/wasm-function-sigs-section.cc
index e621aa820e..fc1fe2b987 100644
--- a/deps/v8/test/fuzzer/wasm-function-sigs-section.cc
+++ b/deps/v8/test/fuzzer/wasm-function-sigs-section.cc
@@ -3,9 +3,10 @@
// found in the LICENSE file.
#include "src/objects-inl.h"
-#include "test/fuzzer/wasm-section-fuzzers.h"
+#include "test/fuzzer/wasm-fuzzer-common.h"
+
+using namespace v8::internal::wasm::fuzzer;
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- return fuzz_wasm_section(v8::internal::wasm::kFunctionSectionCode, data,
- size);
+ return FuzzWasmSection(v8::internal::wasm::kFunctionSectionCode, data, size);
}
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
new file mode 100644
index 0000000000..136e3f25a7
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -0,0 +1,194 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/fuzzer/wasm-fuzzer-common.h"
+
+#include "include/v8.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+#include "src/wasm/wasm-module-builder.h"
+#include "src/wasm/wasm-module.h"
+#include "src/zone/accounting-allocator.h"
+#include "src/zone/zone.h"
+#include "test/common/wasm/wasm-module-runner.h"
+#include "test/fuzzer/fuzzer-support.h"
+
+#define WASM_CODE_FUZZER_HASH_SEED 83
+
+using namespace v8::internal;
+using namespace v8::internal::wasm;
+using namespace v8::internal::wasm::fuzzer;
+
+static const char* kNameString = "name";
+static const size_t kNameStringLength = 4;
+
+int v8::internal::wasm::fuzzer::FuzzWasmSection(SectionCode section,
+ const uint8_t* data,
+ size_t size) {
+ v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
+ v8::Isolate* isolate = support->GetIsolate();
+ v8::internal::Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+
+ // Clear any pending exceptions from a prior run.
+ if (i_isolate->has_pending_exception()) {
+ i_isolate->clear_pending_exception();
+ }
+
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::Scope context_scope(support->GetContext());
+ v8::TryCatch try_catch(isolate);
+
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ ZoneBuffer buffer(&zone);
+ buffer.write_u32(kWasmMagic);
+ buffer.write_u32(kWasmVersion);
+ if (section == kNameSectionCode) {
+ buffer.write_u8(kUnknownSectionCode);
+ buffer.write_size(size + kNameStringLength + 1);
+ buffer.write_u8(kNameStringLength);
+ buffer.write(reinterpret_cast<const uint8_t*>(kNameString),
+ kNameStringLength);
+ buffer.write(data, size);
+ } else {
+ buffer.write_u8(section);
+ buffer.write_size(size);
+ buffer.write(data, size);
+ }
+
+ ErrorThrower thrower(i_isolate, "decoder");
+
+ std::unique_ptr<const WasmModule> module(testing::DecodeWasmModuleForTesting(
+ i_isolate, &thrower, buffer.begin(), buffer.end(), kWasmOrigin));
+
+ return 0;
+}
+
+int WasmExecutionFuzzer::FuzzWasmModule(
+
+ const uint8_t* data, size_t size) {
+ // Save the flag so that we can change it and restore it later.
+ bool generate_test = FLAG_wasm_code_fuzzer_gen_test;
+ if (generate_test) {
+ OFStream os(stdout);
+
+ os << "// Copyright 2017 the V8 project authors. All rights reserved."
+ << std::endl;
+ os << "// Use of this source code is governed by a BSD-style license that "
+ "can be"
+ << std::endl;
+ os << "// found in the LICENSE file." << std::endl;
+ os << std::endl;
+ os << "load(\"test/mjsunit/wasm/wasm-constants.js\");" << std::endl;
+ os << "load(\"test/mjsunit/wasm/wasm-module-builder.js\");" << std::endl;
+ os << std::endl;
+ os << "(function() {" << std::endl;
+ os << " var builder = new WasmModuleBuilder();" << std::endl;
+ os << " builder.addMemory(16, 32, false);" << std::endl;
+ os << " builder.addFunction(\"test\", kSig_i_iii)" << std::endl;
+ os << " .addBodyWithEnd([" << std::endl;
+ }
+ v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
+ v8::Isolate* isolate = support->GetIsolate();
+ Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
+
+ // Clear any pending exceptions from a prior run.
+ if (i_isolate->has_pending_exception()) {
+ i_isolate->clear_pending_exception();
+ }
+
+ v8::Isolate::Scope isolate_scope(isolate);
+ v8::HandleScope handle_scope(isolate);
+ v8::Context::Scope context_scope(support->GetContext());
+ v8::TryCatch try_catch(isolate);
+ HandleScope scope(i_isolate);
+
+ AccountingAllocator allocator;
+ Zone zone(&allocator, ZONE_NAME);
+
+ ZoneBuffer buffer(&zone);
+ int32_t num_args = 0;
+ std::unique_ptr<WasmVal[]> interpreter_args;
+ std::unique_ptr<Handle<Object>[]> compiler_args;
+ if (!GenerateModule(i_isolate, &zone, data, size, buffer, num_args,
+ interpreter_args, compiler_args)) {
+ return 0;
+ }
+
+ v8::internal::wasm::testing::SetupIsolateForWasmModule(i_isolate);
+
+ ErrorThrower interpreter_thrower(i_isolate, "Interpreter");
+ std::unique_ptr<const WasmModule> module(testing::DecodeWasmModuleForTesting(
+ i_isolate, &interpreter_thrower, buffer.begin(), buffer.end(),
+ ModuleOrigin::kWasmOrigin, true));
+
+ // Clear the flag so that the WebAssembly code is not printed twice.
+ FLAG_wasm_code_fuzzer_gen_test = false;
+ if (module == nullptr) {
+ if (generate_test) {
+ OFStream os(stdout);
+ os << " ])" << std::endl;
+ os << " .exportFunc();" << std::endl;
+ os << " assertThrows(function() { builder.instantiate(); });"
+ << std::endl;
+ os << "})();" << std::endl;
+ }
+ return 0;
+ }
+ if (generate_test) {
+ OFStream os(stdout);
+ os << " ])" << std::endl;
+ os << " .exportFunc();" << std::endl;
+ os << " var module = builder.instantiate();" << std::endl;
+ os << " module.exports.test(1, 2, 3);" << std::endl;
+ os << "})();" << std::endl;
+ }
+
+ ModuleWireBytes wire_bytes(buffer.begin(), buffer.end());
+ int32_t result_interpreted;
+ bool possible_nondeterminism = false;
+ {
+ result_interpreted = testing::InterpretWasmModule(
+ i_isolate, &interpreter_thrower, module.get(), wire_bytes, 0,
+ interpreter_args.get(), &possible_nondeterminism);
+ }
+
+ ErrorThrower compiler_thrower(i_isolate, "Compiler");
+ Handle<JSObject> instance = testing::InstantiateModuleForTesting(
+ i_isolate, &compiler_thrower, module.get(), wire_bytes);
+ // Restore the flag.
+ FLAG_wasm_code_fuzzer_gen_test = generate_test;
+ if (!interpreter_thrower.error()) {
+ CHECK(!instance.is_null());
+ } else {
+ return 0;
+ }
+ int32_t result_compiled;
+ {
+ result_compiled = testing::CallWasmFunctionForTesting(
+ i_isolate, instance, &compiler_thrower, "main", num_args,
+ compiler_args.get(), ModuleOrigin::kWasmOrigin);
+ }
+
+ // The WebAssembly spec allows the sign bit of NaN to be non-deterministic.
+ // This sign bit may cause result_interpreted to be different than
+ // result_compiled. Therefore we do not check the equality of the results
+ // if the execution may have produced a NaN at some point.
+ if (possible_nondeterminism) return 0;
+
+ if (result_interpreted == bit_cast<int32_t>(0xdeadbeef)) {
+ CHECK(i_isolate->has_pending_exception());
+ i_isolate->clear_pending_exception();
+ } else {
+ CHECK(!i_isolate->has_pending_exception());
+ if (result_interpreted != result_compiled) {
+ V8_Fatal(__FILE__, __LINE__, "WasmCodeFuzzerHash=%x",
+ StringHasher::HashSequentialString(data, static_cast<int>(size),
+ WASM_CODE_FUZZER_HASH_SEED));
+ }
+ }
+ return 0;
+}
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.h b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
new file mode 100644
index 0000000000..c75ae74962
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.h
@@ -0,0 +1,40 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef WASM_SECTION_FUZZERS_H_
+#define WASM_SECTION_FUZZERS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-interpreter.h"
+#include "src/wasm/wasm-module-builder.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace fuzzer {
+
+int FuzzWasmSection(v8::internal::wasm::SectionCode section,
+ const uint8_t* data, size_t size);
+
+class WasmExecutionFuzzer {
+ public:
+ virtual ~WasmExecutionFuzzer() {}
+ int FuzzWasmModule(const uint8_t* data, size_t size);
+
+ protected:
+ virtual bool GenerateModule(
+ Isolate* isolate, Zone* zone, const uint8_t* data, size_t size,
+ ZoneBuffer& buffer, int32_t& num_args,
+ std::unique_ptr<WasmVal[]>& interpreter_args,
+ std::unique_ptr<Handle<Object>[]>& compiler_args) = 0;
+};
+
+} // namespace fuzzer
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+#endif // WASM_SECTION_FUZZERS_H_
diff --git a/deps/v8/test/fuzzer/wasm-globals-section.cc b/deps/v8/test/fuzzer/wasm-globals-section.cc
index dccdc1079c..3aab373a5a 100644
--- a/deps/v8/test/fuzzer/wasm-globals-section.cc
+++ b/deps/v8/test/fuzzer/wasm-globals-section.cc
@@ -3,8 +3,10 @@
// found in the LICENSE file.
#include "src/objects-inl.h"
-#include "test/fuzzer/wasm-section-fuzzers.h"
+#include "test/fuzzer/wasm-fuzzer-common.h"
+
+using namespace v8::internal::wasm::fuzzer;
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- return fuzz_wasm_section(v8::internal::wasm::kGlobalSectionCode, data, size);
+ return FuzzWasmSection(v8::internal::wasm::kGlobalSectionCode, data, size);
}
diff --git a/deps/v8/test/fuzzer/wasm-imports-section.cc b/deps/v8/test/fuzzer/wasm-imports-section.cc
index 4690597bba..587d091417 100644
--- a/deps/v8/test/fuzzer/wasm-imports-section.cc
+++ b/deps/v8/test/fuzzer/wasm-imports-section.cc
@@ -3,8 +3,10 @@
// found in the LICENSE file.
#include "src/objects-inl.h"
-#include "test/fuzzer/wasm-section-fuzzers.h"
+#include "test/fuzzer/wasm-fuzzer-common.h"
+
+using namespace v8::internal::wasm::fuzzer;
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- return fuzz_wasm_section(v8::internal::wasm::kImportSectionCode, data, size);
+ return FuzzWasmSection(v8::internal::wasm::kImportSectionCode, data, size);
}
diff --git a/deps/v8/test/fuzzer/wasm-memory-section.cc b/deps/v8/test/fuzzer/wasm-memory-section.cc
index 4736d6e68d..261fd75b78 100644
--- a/deps/v8/test/fuzzer/wasm-memory-section.cc
+++ b/deps/v8/test/fuzzer/wasm-memory-section.cc
@@ -3,8 +3,10 @@
// found in the LICENSE file.
#include "src/objects-inl.h"
-#include "test/fuzzer/wasm-section-fuzzers.h"
+#include "test/fuzzer/wasm-fuzzer-common.h"
+
+using namespace v8::internal::wasm::fuzzer;
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- return fuzz_wasm_section(v8::internal::wasm::kMemorySectionCode, data, size);
+ return FuzzWasmSection(v8::internal::wasm::kMemorySectionCode, data, size);
}
diff --git a/deps/v8/test/fuzzer/wasm-names-section.cc b/deps/v8/test/fuzzer/wasm-names-section.cc
index 9a3797cf80..8cfbd6c903 100644
--- a/deps/v8/test/fuzzer/wasm-names-section.cc
+++ b/deps/v8/test/fuzzer/wasm-names-section.cc
@@ -3,9 +3,11 @@
// found in the LICENSE file.
#include "src/objects-inl.h"
-#include "test/fuzzer/wasm-section-fuzzers.h"
+#include "test/fuzzer/wasm-fuzzer-common.h"
+
+using namespace v8::internal::wasm::fuzzer;
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
// TODO(titzer): Names section requires a preceding function section.
- return fuzz_wasm_section(v8::internal::wasm::kNameSectionCode, data, size);
+ return FuzzWasmSection(v8::internal::wasm::kNameSectionCode, data, size);
}
diff --git a/deps/v8/test/fuzzer/wasm-section-fuzzers.cc b/deps/v8/test/fuzzer/wasm-section-fuzzers.cc
deleted file mode 100644
index d883c26ec9..0000000000
--- a/deps/v8/test/fuzzer/wasm-section-fuzzers.cc
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "test/fuzzer/wasm-section-fuzzers.h"
-
-#include "include/v8.h"
-#include "src/isolate.h"
-#include "src/objects-inl.h"
-#include "src/wasm/wasm-module-builder.h"
-#include "src/wasm/wasm-module.h"
-#include "src/zone/accounting-allocator.h"
-#include "src/zone/zone.h"
-#include "test/common/wasm/wasm-module-runner.h"
-#include "test/fuzzer/fuzzer-support.h"
-
-using namespace v8::internal::wasm;
-
-static const char* kNameString = "name";
-static const size_t kNameStringLength = 4;
-
-int fuzz_wasm_section(SectionCode section, const uint8_t* data, size_t size) {
- v8_fuzzer::FuzzerSupport* support = v8_fuzzer::FuzzerSupport::Get();
- v8::Isolate* isolate = support->GetIsolate();
- v8::internal::Isolate* i_isolate =
- reinterpret_cast<v8::internal::Isolate*>(isolate);
-
- // Clear any pending exceptions from a prior run.
- if (i_isolate->has_pending_exception()) {
- i_isolate->clear_pending_exception();
- }
-
- v8::Isolate::Scope isolate_scope(isolate);
- v8::HandleScope handle_scope(isolate);
- v8::Context::Scope context_scope(support->GetContext());
- v8::TryCatch try_catch(isolate);
-
- v8::internal::AccountingAllocator allocator;
- v8::internal::Zone zone(&allocator, ZONE_NAME);
-
- ZoneBuffer buffer(&zone);
- buffer.write_u32(kWasmMagic);
- buffer.write_u32(kWasmVersion);
- if (section == kNameSectionCode) {
- buffer.write_u8(kUnknownSectionCode);
- buffer.write_size(size + kNameStringLength + 1);
- buffer.write_u8(kNameStringLength);
- buffer.write(reinterpret_cast<const uint8_t*>(kNameString),
- kNameStringLength);
- buffer.write(data, size);
- } else {
- buffer.write_u8(section);
- buffer.write_size(size);
- buffer.write(data, size);
- }
-
- ErrorThrower thrower(i_isolate, "decoder");
-
- std::unique_ptr<const WasmModule> module(testing::DecodeWasmModuleForTesting(
- i_isolate, &thrower, buffer.begin(), buffer.end(), kWasmOrigin));
-
- return 0;
-}
diff --git a/deps/v8/test/fuzzer/wasm-section-fuzzers.h b/deps/v8/test/fuzzer/wasm-section-fuzzers.h
deleted file mode 100644
index 5d38981cf4..0000000000
--- a/deps/v8/test/fuzzer/wasm-section-fuzzers.h
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef WASM_SECTION_FUZZERS_H_
-#define WASM_SECTION_FUZZERS_H_
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include "src/wasm/module-decoder.h"
-
-int fuzz_wasm_section(v8::internal::wasm::SectionCode section,
- const uint8_t* data, size_t size);
-
-#endif // WASM_SECTION_FUZZERS_H_
diff --git a/deps/v8/test/fuzzer/wasm-types-section.cc b/deps/v8/test/fuzzer/wasm-types-section.cc
index 2d7e91e32a..cdd99067e7 100644
--- a/deps/v8/test/fuzzer/wasm-types-section.cc
+++ b/deps/v8/test/fuzzer/wasm-types-section.cc
@@ -3,8 +3,10 @@
// found in the LICENSE file.
#include "src/objects-inl.h"
-#include "test/fuzzer/wasm-section-fuzzers.h"
+#include "test/fuzzer/wasm-fuzzer-common.h"
+
+using namespace v8::internal::wasm::fuzzer;
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
- return fuzz_wasm_section(v8::internal::wasm::kTypeSectionCode, data, size);
+ return FuzzWasmSection(v8::internal::wasm::kTypeSectionCode, data, size);
}
diff --git a/deps/v8/test/fuzzer/wasm.tar.gz.sha1 b/deps/v8/test/fuzzer/wasm.tar.gz.sha1
deleted file mode 100644
index 9fc4cf50d2..0000000000
--- a/deps/v8/test/fuzzer/wasm.tar.gz.sha1
+++ /dev/null
@@ -1 +0,0 @@
-43dbe4810e9b08a5add1dd4076e26410e18c828c \ No newline at end of file
diff --git a/deps/v8/test/fuzzer/wasm/foo b/deps/v8/test/fuzzer/wasm/foo
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm/foo
diff --git a/deps/v8/test/fuzzer/wasm_asmjs.tar.gz.sha1 b/deps/v8/test/fuzzer/wasm_asmjs.tar.gz.sha1
deleted file mode 100644
index b8cf779dee..0000000000
--- a/deps/v8/test/fuzzer/wasm_asmjs.tar.gz.sha1
+++ /dev/null
@@ -1 +0,0 @@
-3a2c9658f3f644c7b8c309201b964fedc2766f9c \ No newline at end of file
diff --git a/deps/v8/test/fuzzer/wasm_asmjs/foo b/deps/v8/test/fuzzer/wasm_asmjs/foo
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm_asmjs/foo
diff --git a/deps/v8/test/fuzzer/wasm_asmjs_corpus.tar.gz.sha1 b/deps/v8/test/fuzzer/wasm_asmjs_corpus.tar.gz.sha1
new file mode 100644
index 0000000000..865ca915f2
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm_asmjs_corpus.tar.gz.sha1
@@ -0,0 +1 @@
+cf1777646f8d4557504442e9bd59e908519ffec8 \ No newline at end of file
diff --git a/deps/v8/test/fuzzer/wasm_corpus.tar.gz.sha1 b/deps/v8/test/fuzzer/wasm_corpus.tar.gz.sha1
new file mode 100644
index 0000000000..32bfeceb0a
--- /dev/null
+++ b/deps/v8/test/fuzzer/wasm_corpus.tar.gz.sha1
@@ -0,0 +1 @@
+f6b95b7dd8300efa84b6382f16cfcae4ec9fa108 \ No newline at end of file
diff --git a/deps/v8/test/inspector/BUILD.gn b/deps/v8/test/inspector/BUILD.gn
index 5298c09a89..cffcd294bd 100644
--- a/deps/v8/test/inspector/BUILD.gn
+++ b/deps/v8/test/inspector/BUILD.gn
@@ -11,6 +11,8 @@ v8_executable("inspector-test") {
"inspector-impl.cc",
"inspector-impl.h",
"inspector-test.cc",
+ "isolate-data.cc",
+ "isolate-data.h",
"task-runner.cc",
"task-runner.h",
]
@@ -24,7 +26,7 @@ v8_executable("inspector-test") {
"../..:v8",
"../..:v8_libbase",
"../..:v8_libplatform",
- "//build/config/sanitizers:deps",
+ "//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
]
diff --git a/deps/v8/test/inspector/console/destroy-context-during-log-expected.txt b/deps/v8/test/inspector/console/destroy-context-during-log-expected.txt
index c8e9293ff2..d345b1204c 100644
--- a/deps/v8/test/inspector/console/destroy-context-during-log-expected.txt
+++ b/deps/v8/test/inspector/console/destroy-context-during-log-expected.txt
@@ -1,3 +1,4 @@
+Tests that destroying context from inside of console.log does not crash
{
type : string
value : First inspector activity after attaching inspector
diff --git a/deps/v8/test/inspector/console/destroy-context-during-log.js b/deps/v8/test/inspector/console/destroy-context-during-log.js
index 9c5753ffc0..0fa7c16d7c 100644
--- a/deps/v8/test/inspector/console/destroy-context-during-log.js
+++ b/deps/v8/test/inspector/console/destroy-context-during-log.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests that destroying context from inside of console.log does not crash');
+
const expression = `
Object.defineProperty(Object.prototype, 'RemoteObject', {
configurable: true,
@@ -10,11 +12,11 @@ const expression = `
delete Object.prototype.RemoteObject;
this.RemoteObject = v;
- detachInspector();
+ inspector.fireContextDestroyed();
setTimeout(function() {
// Attach the inspector again for the sake of establishing a
// communication channel with the frontend test runner.
- attachInspector();
+ inspector.fireContextCreated();
console.log("End of test");
}, 0);
},
@@ -23,8 +25,8 @@ const expression = `
// Before the whole script runs, the inspector is already attached.
// Re-attach the inspector and trigger the console API to make sure that the
// injected inspector script runs again (and triggers the above setter).
- detachInspector();
- attachInspector();
+ inspector.fireContextDestroyed();
+ inspector.fireContextCreated();
console.log("First inspector activity after attaching inspector");
console.log("End of test");
`;
diff --git a/deps/v8/test/inspector/console/let-const-with-api-expected.txt b/deps/v8/test/inspector/console/let-const-with-api-expected.txt
index a5b889632d..0ea6476c4a 100644
--- a/deps/v8/test/inspector/console/let-const-with-api-expected.txt
+++ b/deps/v8/test/inspector/console/let-const-with-api-expected.txt
@@ -1,3 +1,4 @@
+Tests how let and const interact with command line api
first "let a = 1;" result: wasThrown = false
second "let a = 1;" result: wasThrown = true
exception message: Uncaught SyntaxError: Identifier 'a' has already been declared
@@ -16,4 +17,4 @@ function debug(function) { [Command Line API] }
function undebug(function) { [Command Line API] }
function monitor(function) { [Command Line API] }
function unmonitor(function) { [Command Line API] }
-function table(data, [columns]) { [Command Line API] } \ No newline at end of file
+function table(data, [columns]) { [Command Line API] }
diff --git a/deps/v8/test/inspector/console/let-const-with-api.js b/deps/v8/test/inspector/console/let-const-with-api.js
index 0280fe1174..b51572d08e 100644
--- a/deps/v8/test/inspector/console/let-const-with-api.js
+++ b/deps/v8/test/inspector/console/let-const-with-api.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests how let and const interact with command line api');
+
Protocol.Runtime.evaluate({ expression: "let a = 42;" }).then(step2);
function step2(response)
diff --git a/deps/v8/test/inspector/cpu-profiler/console-profile-end-parameterless-crash-expected.txt b/deps/v8/test/inspector/cpu-profiler/console-profile-end-parameterless-crash-expected.txt
index a28765a100..a2988ad4c3 100644
--- a/deps/v8/test/inspector/cpu-profiler/console-profile-end-parameterless-crash-expected.txt
+++ b/deps/v8/test/inspector/cpu-profiler/console-profile-end-parameterless-crash-expected.txt
@@ -1,3 +1,3 @@
Tests that "console.profileEnd()" does not cause crash. (webkit:105759)
SUCCESS: found 2 profile headers
-SUCCESS: titled profile found \ No newline at end of file
+SUCCESS: titled profile found
diff --git a/deps/v8/test/inspector/cpu-profiler/console-profile-end-parameterless-crash.js b/deps/v8/test/inspector/cpu-profiler/console-profile-end-parameterless-crash.js
index 415b703c36..d0d995e37e 100644
--- a/deps/v8/test/inspector/cpu-profiler/console-profile-end-parameterless-crash.js
+++ b/deps/v8/test/inspector/cpu-profiler/console-profile-end-parameterless-crash.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Tests that \"console.profileEnd()\" does not cause crash. (webkit:105759)");
+let {session, contextGroup, Protocol} = InspectorTest.start("Tests that \"console.profileEnd()\" does not cause crash. (webkit:105759)");
-InspectorTest.addScript(`
+contextGroup.addScript(`
function collectProfiles()
{
console.profile();
diff --git a/deps/v8/test/inspector/cpu-profiler/console-profile-expected.txt b/deps/v8/test/inspector/cpu-profiler/console-profile-expected.txt
index b3da7ba0c1..7bb8bc940d 100644
--- a/deps/v8/test/inspector/cpu-profiler/console-profile-expected.txt
+++ b/deps/v8/test/inspector/cpu-profiler/console-profile-expected.txt
@@ -1,3 +1,3 @@
Tests that console.profile/profileEnd will record CPU profile when inspector front-end is connected.
SUCCESS: retrieved '42' profile
-SUCCESS: found 'collectProfiles' function in the profile \ No newline at end of file
+SUCCESS: found 'collectProfiles' function in the profile
diff --git a/deps/v8/test/inspector/cpu-profiler/console-profile.js b/deps/v8/test/inspector/cpu-profiler/console-profile.js
index b8e36b6861..9aa7542cea 100644
--- a/deps/v8/test/inspector/cpu-profiler/console-profile.js
+++ b/deps/v8/test/inspector/cpu-profiler/console-profile.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Tests that console.profile/profileEnd will record CPU profile when inspector front-end is connected.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Tests that console.profile/profileEnd will record CPU profile when inspector front-end is connected.");
-InspectorTest.addScript(`
+contextGroup.addScript(`
function collectProfiles()
{
console.profile("outer");
diff --git a/deps/v8/test/inspector/cpu-profiler/coverage.js b/deps/v8/test/inspector/cpu-profiler/coverage.js
index a059467b2e..ce4995fd1c 100644
--- a/deps/v8/test/inspector/cpu-profiler/coverage.js
+++ b/deps/v8/test/inspector/cpu-profiler/coverage.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --no-always-opt --crankshaft
+// Flags: --allow-natives-syntax --no-always-opt --opt
var source =
`
@@ -50,7 +50,7 @@ var f = (function outer() {
f()()();
`;
-InspectorTest.log("Test collecting code coverage data with Profiler.collectCoverage.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Test collecting code coverage data with Profiler.collectCoverage.");
function ClearAndGC() {
return Protocol.Runtime.evaluate({ expression: "fib = g = f = h = is_optimized = null;" })
diff --git a/deps/v8/test/inspector/cpu-profiler/enable-disable-expected.txt b/deps/v8/test/inspector/cpu-profiler/enable-disable-expected.txt
index 05d3fd3d5f..aa3507c934 100644
--- a/deps/v8/test/inspector/cpu-profiler/enable-disable-expected.txt
+++ b/deps/v8/test/inspector/cpu-profiler/enable-disable-expected.txt
@@ -5,4 +5,4 @@ PASS: console initiated profile started
PASS: didStartConsoleProfile
PASS: didDisableProfiler
PASS: no front-end initiated profiles found
-PASS: didStopConsoleProfile \ No newline at end of file
+PASS: didStopConsoleProfile
diff --git a/deps/v8/test/inspector/cpu-profiler/enable-disable.js b/deps/v8/test/inspector/cpu-profiler/enable-disable.js
index b342a162c4..0e9b94dc5c 100644
--- a/deps/v8/test/inspector/cpu-profiler/enable-disable.js
+++ b/deps/v8/test/inspector/cpu-profiler/enable-disable.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Test that profiling can only be started when Profiler was enabled and that Profiler.disable command will stop recording all profiles.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Test that profiling can only be started when Profiler was enabled and that Profiler.disable command will stop recording all profiles.");
Protocol.Profiler.start().then(didFailToStartWhenDisabled);
disallowConsoleProfiles();
@@ -31,7 +31,7 @@ function allowConsoleProfiles()
}
function didFailToStartWhenDisabled(messageObject)
{
- if (!InspectorTest.expectedError("didFailToStartWhenDisabled", messageObject))
+ if (!expectedError("didFailToStartWhenDisabled", messageObject))
return;
allowConsoleProfiles();
Protocol.Profiler.enable();
@@ -39,21 +39,21 @@ function didFailToStartWhenDisabled(messageObject)
}
function didStartFrontendProfile(messageObject)
{
- if (!InspectorTest.expectedSuccess("didStartFrontendProfile", messageObject))
+ if (!expectedSuccess("didStartFrontendProfile", messageObject))
return;
Protocol.Runtime.evaluate({expression: "console.profile('p1');"}).then(didStartConsoleProfile);
}
function didStartConsoleProfile(messageObject)
{
- if (!InspectorTest.expectedSuccess("didStartConsoleProfile", messageObject))
+ if (!expectedSuccess("didStartConsoleProfile", messageObject))
return;
Protocol.Profiler.disable().then(didDisableProfiler);
}
function didDisableProfiler(messageObject)
{
- if (!InspectorTest.expectedSuccess("didDisableProfiler", messageObject))
+ if (!expectedSuccess("didDisableProfiler", messageObject))
return;
Protocol.Profiler.enable();
Protocol.Profiler.stop().then(didStopFrontendProfile);
@@ -61,7 +61,7 @@ function didDisableProfiler(messageObject)
function didStopFrontendProfile(messageObject)
{
- if (!InspectorTest.expectedError("no front-end initiated profiles found", messageObject))
+ if (!expectedError("no front-end initiated profiles found", messageObject))
return;
disallowConsoleProfiles();
Protocol.Runtime.evaluate({expression: "console.profileEnd();"}).then(didStopConsoleProfile);
@@ -69,7 +69,21 @@ function didStopFrontendProfile(messageObject)
function didStopConsoleProfile(messageObject)
{
- if (!InspectorTest.expectedSuccess("didStopConsoleProfile", messageObject))
+ if (!expectedSuccess("didStopConsoleProfile", messageObject))
return;
InspectorTest.completeTest();
}
+
+function checkExpectation(fail, name, messageObject)
+{
+ if (fail === !!messageObject.error) {
+ InspectorTest.log("PASS: " + name);
+ return true;
+ }
+
+ InspectorTest.log("FAIL: " + name + ": " + JSON.stringify(messageObject));
+ InspectorTest.completeTest();
+ return false;
+}
+var expectedSuccess = checkExpectation.bind(null, false);
+var expectedError = checkExpectation.bind(null, true);
diff --git a/deps/v8/test/inspector/cpu-profiler/record-cpu-profile-expected.txt b/deps/v8/test/inspector/cpu-profiler/record-cpu-profile-expected.txt
index d810093968..4ff20a253d 100644
--- a/deps/v8/test/inspector/cpu-profiler/record-cpu-profile-expected.txt
+++ b/deps/v8/test/inspector/cpu-profiler/record-cpu-profile-expected.txt
@@ -4,4 +4,4 @@ PASS: startConsoleProfile
PASS: stopConsoleProfile
PASS: stoppedFrontendProfile
PASS: startFrontendProfileSecondTime
-PASS: stopFrontendProfileSecondTime \ No newline at end of file
+PASS: stopFrontendProfileSecondTime
diff --git a/deps/v8/test/inspector/cpu-profiler/record-cpu-profile.js b/deps/v8/test/inspector/cpu-profiler/record-cpu-profile.js
index c87d600124..3799cf74f5 100644
--- a/deps/v8/test/inspector/cpu-profiler/record-cpu-profile.js
+++ b/deps/v8/test/inspector/cpu-profiler/record-cpu-profile.js
@@ -2,47 +2,61 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Test that profiler is able to record a profile. Also it tests that profiler returns an error when it unable to find the profile.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Test that profiler is able to record a profile. Also it tests that profiler returns an error when it unable to find the profile.");
Protocol.Profiler.enable();
Protocol.Profiler.start().then(didStartFrontendProfile);
function didStartFrontendProfile(messageObject)
{
- if (!InspectorTest.expectedSuccess("startFrontendProfile", messageObject))
+ if (!expectedSuccess("startFrontendProfile", messageObject))
return;
Protocol.Runtime.evaluate({expression: "console.profile('Profile 1');"}).then(didStartConsoleProfile);
}
function didStartConsoleProfile(messageObject)
{
- if (!InspectorTest.expectedSuccess("startConsoleProfile", messageObject))
+ if (!expectedSuccess("startConsoleProfile", messageObject))
return;
Protocol.Runtime.evaluate({expression: "console.profileEnd('Profile 1');"}).then(didStopConsoleProfile);
}
function didStopConsoleProfile(messageObject)
{
- if (!InspectorTest.expectedSuccess("stopConsoleProfile", messageObject))
+ if (!expectedSuccess("stopConsoleProfile", messageObject))
return;
Protocol.Profiler.stop().then(didStopFrontendProfile);
}
function didStopFrontendProfile(messageObject)
{
- if (!InspectorTest.expectedSuccess("stoppedFrontendProfile", messageObject))
+ if (!expectedSuccess("stoppedFrontendProfile", messageObject))
return;
Protocol.Profiler.start().then(didStartFrontendProfile2);
}
function didStartFrontendProfile2(messageObject)
{
- if (!InspectorTest.expectedSuccess("startFrontendProfileSecondTime", messageObject))
+ if (!expectedSuccess("startFrontendProfileSecondTime", messageObject))
return;
Protocol.Profiler.stop().then(didStopFrontendProfile2);
}
function didStopFrontendProfile2(messageObject)
{
- InspectorTest.expectedSuccess("stopFrontendProfileSecondTime", messageObject)
+ expectedSuccess("stopFrontendProfileSecondTime", messageObject)
InspectorTest.completeTest();
}
+
+function checkExpectation(fail, name, messageObject)
+{
+ if (fail === !!messageObject.error) {
+ InspectorTest.log("PASS: " + name);
+ return true;
+ }
+
+ InspectorTest.log("FAIL: " + name + ": " + JSON.stringify(messageObject));
+ InspectorTest.completeTest();
+ return false;
+}
+var expectedSuccess = checkExpectation.bind(null, false);
+var expectedError = checkExpectation.bind(null, true);
diff --git a/deps/v8/test/inspector/cpu-profiler/stop-without-preceeding-start-expected.txt b/deps/v8/test/inspector/cpu-profiler/stop-without-preceeding-start-expected.txt
index 91b5c9e6e2..2c6dd1e38f 100644
--- a/deps/v8/test/inspector/cpu-profiler/stop-without-preceeding-start-expected.txt
+++ b/deps/v8/test/inspector/cpu-profiler/stop-without-preceeding-start-expected.txt
@@ -1,2 +1,2 @@
Test that profiler doesn't crash when we call stop without preceeding start.
-PASS: ProfileAgent.stop \ No newline at end of file
+PASS: ProfileAgent.stop
diff --git a/deps/v8/test/inspector/cpu-profiler/stop-without-preceeding-start.js b/deps/v8/test/inspector/cpu-profiler/stop-without-preceeding-start.js
index 5fdf098fe1..5f21e653f1 100644
--- a/deps/v8/test/inspector/cpu-profiler/stop-without-preceeding-start.js
+++ b/deps/v8/test/inspector/cpu-profiler/stop-without-preceeding-start.js
@@ -2,11 +2,25 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Test that profiler doesn't crash when we call stop without preceeding start.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Test that profiler doesn't crash when we call stop without preceeding start.");
Protocol.Profiler.stop().then(didStopProfile);
function didStopProfile(messageObject)
{
- InspectorTest.expectedError("ProfileAgent.stop", messageObject);
+ expectedError("ProfileAgent.stop", messageObject);
InspectorTest.completeTest();
}
+
+function checkExpectation(fail, name, messageObject)
+{
+ if (fail === !!messageObject.error) {
+ InspectorTest.log("PASS: " + name);
+ return true;
+ }
+
+ InspectorTest.log("FAIL: " + name + ": " + JSON.stringify(messageObject));
+ InspectorTest.completeTest();
+ return false;
+}
+var expectedSuccess = checkExpectation.bind(null, false);
+var expectedError = checkExpectation.bind(null, true);
diff --git a/deps/v8/test/inspector/debugger/access-obsolete-frame-expected.txt b/deps/v8/test/inspector/debugger/access-obsolete-frame-expected.txt
index 643d382f24..7093d76c2e 100644
--- a/deps/v8/test/inspector/debugger/access-obsolete-frame-expected.txt
+++ b/deps/v8/test/inspector/debugger/access-obsolete-frame-expected.txt
@@ -1,3 +1,4 @@
+Tests that accessing no longer valid call frames returns an error
Paused on 'debugger;'
resume
restartFrame
@@ -5,4 +6,4 @@ PASS, error message as expected
evaluateOnFrame
PASS, error message as expected
setVariableValue
-PASS, error message as expected \ No newline at end of file
+PASS, error message as expected
diff --git a/deps/v8/test/inspector/debugger/access-obsolete-frame.js b/deps/v8/test/inspector/debugger/access-obsolete-frame.js
index b5a96e1c3c..9d498e041d 100644
--- a/deps/v8/test/inspector/debugger/access-obsolete-frame.js
+++ b/deps/v8/test/inspector/debugger/access-obsolete-frame.js
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.addScript(`
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests that accessing no longer valid call frames returns an error');
+
+contextGroup.addScript(`
function testFunction()
{
debugger;
diff --git a/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec.js b/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec.js
index 78a7b8e57e..ec760ecf08 100644
--- a/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec.js
+++ b/deps/v8/test/inspector/debugger/asm-js-breakpoint-before-exec.js
@@ -4,7 +4,7 @@
// Flags: --validate-asm --allow-natives-syntax
-InspectorTest.log(
+let {session, contextGroup, Protocol} = InspectorTest.start(
'This test runs asm.js which calls back to JS. Before executing (after ' +
'the script is parsed) we set breakpoints in the asm.js code.');
@@ -50,7 +50,7 @@ InspectorTest.runTestSuite([
function addScript(next) {
afterScriptParsedCallback = next;
- InspectorTest.addScript(testFunction.toString());
+ contextGroup.addScript(testFunction.toString());
},
function runTestFunction(next) {
diff --git a/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec.js b/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec.js
index 2a29dc1ea4..af3ac518b3 100644
--- a/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec.js
+++ b/deps/v8/test/inspector/debugger/asm-js-breakpoint-during-exec.js
@@ -4,7 +4,7 @@
// Flags: --validate-asm --allow-natives-syntax
-InspectorTest.log(
+let {session, contextGroup, Protocol} = InspectorTest.start(
'This test runs asm.js which calls back to JS. JS triggers a break, on ' +
'pause we set breakpoints in the asm.js code.');
@@ -53,7 +53,7 @@ InspectorTest.runTestSuite([
function addScript(next) {
afterScriptParsedCallback = next;
- InspectorTest.addScript(testFunction.toString());
+ contextGroup.addScript(testFunction.toString());
},
function runTestFunction(next) {
diff --git a/deps/v8/test/inspector/debugger/asm-js-stack-expected.txt b/deps/v8/test/inspector/debugger/asm-js-stack-expected.txt
index f3bfd8de6a..e028f2a595 100644
--- a/deps/v8/test/inspector/debugger/asm-js-stack-expected.txt
+++ b/deps/v8/test/inspector/debugger/asm-js-stack-expected.txt
@@ -1,3 +1,4 @@
+Tests that asm-js scripts produce correct stack
Paused on 'debugger;'
Number of frames: 5
- [0] {"functionName":"call_debugger","function_lineNumber":13,"function_columnNumber":24,"lineNumber":14,"columnNumber":4}
diff --git a/deps/v8/test/inspector/debugger/asm-js-stack.js b/deps/v8/test/inspector/debugger/asm-js-stack.js
index 37db088ba1..916ac5f22a 100644
--- a/deps/v8/test/inspector/debugger/asm-js-stack.js
+++ b/deps/v8/test/inspector/debugger/asm-js-stack.js
@@ -4,6 +4,8 @@
// Flags: --validate-asm
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests that asm-js scripts produce correct stack');
+
function testFunction() {
function generateAsmJs(stdlib, foreign, heap) {
'use asm';
@@ -25,7 +27,7 @@ function testFunction() {
fun();
}
-InspectorTest.addScript(testFunction.toString());
+contextGroup.addScript(testFunction.toString());
Protocol.Debugger.enable();
Protocol.Debugger.oncePaused().then(handleDebuggerPaused);
diff --git a/deps/v8/test/inspector/debugger/async-console-count-doesnt-crash.js b/deps/v8/test/inspector/debugger/async-console-count-doesnt-crash.js
index 57c308878d..14f09b31a7 100644
--- a/deps/v8/test/inspector/debugger/async-console-count-doesnt-crash.js
+++ b/deps/v8/test/inspector/debugger/async-console-count-doesnt-crash.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("setTimeout(console.count, 0) doesn't crash with enabled async stacks.")
+let {session, contextGroup, Protocol} = InspectorTest.start("setTimeout(console.count, 0) doesn't crash with enabled async stacks.")
Protocol.Debugger.enable();
Protocol.Debugger.setAsyncCallStackDepth({ maxDepth: 1 });
Protocol.Runtime.evaluate({ expression: "setTimeout(console.count, 0)" });
-InspectorTest.completeTestAfterPendingTimeouts();
+InspectorTest.waitForPendingTasks().then(InspectorTest.completeTest);
diff --git a/deps/v8/test/inspector/debugger/async-for-await-of-promise-stack-expected.txt b/deps/v8/test/inspector/debugger/async-for-await-of-promise-stack-expected.txt
index fb1403917a..0cfa3c4349 100644
--- a/deps/v8/test/inspector/debugger/async-for-await-of-promise-stack-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-for-await-of-promise-stack-expected.txt
@@ -1,57 +1,58 @@
Checks that async chains for for-await-of are correct.
Running test: testBasic
-Debugger (test.js:10:2)
-Basic (test.js:48:4)
--- async function (test.js:46:20)--
-Basic (test.js:46:20)
+Debugger (test.js:12:2)
+Basic (test.js:50:4)
+-- async function --
+Basic (test.js:48:20)
(anonymous) (testBasic.js:0:0)
Running test: testUncaughtReject
-Debugger (test.js:10:2)
--- async function (test.js:52:29)--
-UncaughtReject (test.js:52:29)
+Debugger (test.js:12:2)
+-- async function --
+UncaughtReject (test.js:54:29)
(anonymous) (testUncaughtReject.js:0:0)
Running test: testUncaughtThrow
-Debugger (test.js:10:2)
--- async function (test.js:61:28)--
-UncaughtThrow (test.js:61:28)
+Debugger (test.js:12:2)
+-- async function --
+UncaughtThrow (test.js:63:28)
(anonymous) (testUncaughtThrow.js:0:0)
Running test: testCaughtReject
-Debugger (test.js:10:2)
-CaughtReject (test.js:76:4)
--- async function (test.js:70:27)--
-CaughtReject (test.js:70:27)
+Debugger (test.js:12:2)
+CaughtReject (test.js:78:4)
+-- async function --
+CaughtReject (test.js:72:27)
(anonymous) (testCaughtReject.js:0:0)
Running test: testCaughtThrow
-Debugger (test.js:10:2)
-CaughtThrow (test.js:86:4)
--- async function (test.js:80:26)--
-CaughtThrow (test.js:80:26)
+Debugger (test.js:12:2)
+CaughtThrow (test.js:88:4)
+-- async function --
+CaughtThrow (test.js:82:26)
(anonymous) (testCaughtThrow.js:0:0)
Running test: testUncaughtRejectOnBreak
Running test: testUncaughtThrowOnBreak
-Debugger (test.js:10:2)
--- async function (test.js:99:35)--
-UncaughtThrowOnBreak (test.js:99:35)
+Debugger (test.js:12:2)
+-- async function --
+UncaughtThrowOnBreak (test.js:101:35)
(anonymous) (testUncaughtThrowOnBreak.js:0:0)
Running test: testCaughtRejectOnBreak
Running test: testCaughtThrowOnBreak
-Debugger (test.js:10:2)
-CaughtThrowOnBreak (test.js:124:4)
--- async function (test.js:118:33)--
-CaughtThrowOnBreak (test.js:118:33)
-(anonymous) (testCaughtThrowOnBreak.js:0:0) \ No newline at end of file
+Debugger (test.js:12:2)
+CaughtThrowOnBreak (test.js:126:4)
+-- async function --
+CaughtThrowOnBreak (test.js:120:33)
+(anonymous) (testCaughtThrowOnBreak.js:0:0)
+
diff --git a/deps/v8/test/inspector/debugger/async-for-await-of-promise-stack.js b/deps/v8/test/inspector/debugger/async-for-await-of-promise-stack.js
index 4e6c0bf15e..6a2f4ce972 100644
--- a/deps/v8/test/inspector/debugger/async-for-await-of-promise-stack.js
+++ b/deps/v8/test/inspector/debugger/async-for-await-of-promise-stack.js
@@ -4,9 +4,9 @@
// Flags: --harmony-async-iteration
-InspectorTest.log('Checks that async chains for for-await-of are correct.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks that async chains for for-await-of are correct.');
-InspectorTest.addScript(`
+contextGroup.addScript(`
function Debugger(value) {
debugger;
@@ -50,7 +50,7 @@ async function Basic() {
Debugger();
}
}
-
+// TODO(kozyatinskiy): this stack trace is suspicious.
async function UncaughtReject() {
async function loop() {
for await (let x of [Reject(new Error("boop"))]) {
@@ -59,7 +59,7 @@ async function UncaughtReject() {
}
return loop().catch(Debugger);
}
-
+// TODO(kozyatinskiy): this stack trace is suspicious.
async function UncaughtThrow() {
async function loop() {
for await (let x of [Throw(new Error("boop"))]) {
@@ -88,7 +88,7 @@ async function CaughtThrow() {
Debugger(e);
}
}
-
+// TODO(kozyatinskiy): this stack trace is suspicious.
async function UncaughtRejectOnBreak() {
async function loop() {
for await (let x of RejectOnReturn(["0", "1"])) {
@@ -97,7 +97,7 @@ async function UncaughtRejectOnBreak() {
}
return loop().catch(Debugger);
}
-
+// TODO(kozyatinskiy): this stack trace is suspicious.
async function UncaughtThrowOnBreak() {
async function loop() {
for await (let x of ThrowOnReturn(["0", "1"])) {
@@ -106,7 +106,7 @@ async function UncaughtThrowOnBreak() {
}
return loop().catch(Debugger);
}
-
+// TODO(kozyatinskiy): this stack trace is suspicious.
async function CaughtRejectOnBreak() {
try {
for await (let x of RejectOnReturn(["0", "1"])) {
@@ -126,12 +126,12 @@ async function CaughtThrowOnBreak() {
Debugger(e);
}
}
-//# sourceURL=test.js`, 7, 129);
+//# sourceURL=test.js`, 9, 26);
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
Protocol.Debugger.onPaused(message => {
- InspectorTest.logCallFrames(message.params.callFrames);
- InspectorTest.logAsyncStackTrace(message.params.asyncStackTrace);
+ session.logCallFrames(message.params.callFrames);
+ session.logAsyncStackTrace(message.params.asyncStackTrace);
InspectorTest.log('');
Protocol.Debugger.resume();
});
diff --git a/deps/v8/test/inspector/debugger/async-instrumentation-expected.txt b/deps/v8/test/inspector/debugger/async-instrumentation-expected.txt
index 2a538879ae..3b9f58aefc 100644
--- a/deps/v8/test/inspector/debugger/async-instrumentation-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-instrumentation-expected.txt
@@ -9,12 +9,12 @@ test (test.js:21:2)
foo (test.js:10:2)
-- Promise.resolve --
-test (test.js:20:2)
+test (test.js:19:14)
(anonymous) (expr1.js:0:0)
foo (test.js:12:2)
-- Promise.resolve --
-test (test.js:20:2)
+test (test.js:19:14)
(anonymous) (expr1.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-instrumentation.js b/deps/v8/test/inspector/debugger/async-instrumentation.js
index 6997c17ffe..6de2ce7d2f 100644
--- a/deps/v8/test/inspector/debugger/async-instrumentation.js
+++ b/deps/v8/test/inspector/debugger/async-instrumentation.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks async instrumentation enabled in the middle.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks async instrumentation enabled in the middle.');
-InspectorTest.addScript(`
+contextGroup.addScript(`
function foo() {
// asyncTaskStarted
debugger;
@@ -24,15 +24,15 @@ function test() {
//# sourceURL=test.js`, 7, 26);
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
Protocol.Debugger.onPaused(message => {
if (enableOnPause-- === 0)
Protocol.Debugger.setAsyncCallStackDepth({ maxDepth: 128 });
- InspectorTest.logCallFrames(message.params.callFrames);
+ session.logCallFrames(message.params.callFrames);
var asyncStackTrace = message.params.asyncStackTrace;
while (asyncStackTrace) {
InspectorTest.log(`-- ${asyncStackTrace.description} --`);
- InspectorTest.logCallFrames(asyncStackTrace.callFrames);
+ session.logCallFrames(asyncStackTrace.callFrames);
asyncStackTrace = asyncStackTrace.parent;
}
InspectorTest.log('');
diff --git a/deps/v8/test/inspector/debugger/async-promise-late-then-expected.txt b/deps/v8/test/inspector/debugger/async-promise-late-then-expected.txt
index ee91377178..dfdf81fe8c 100644
--- a/deps/v8/test/inspector/debugger/async-promise-late-then-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-promise-late-then-expected.txt
@@ -1,16 +1,16 @@
Checks async stack for late .then handlers with gc
foo1 (test.js:11:2)
-- Promise.resolve --
-test (test.js:20:2)
+test (test.js:18:14)
(anonymous) (expr.js:0:0)
foo1 (test.js:11:2)
-- Promise.resolve --
-test (test.js:20:2)
+test (test.js:22:14)
(anonymous) (expr.js:0:0)
foo1 (test.js:11:2)
-- Promise.resolve --
-test (test.js:20:2)
+test (test.js:24:14)
(anonymous) (expr.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-promise-late-then.js b/deps/v8/test/inspector/debugger/async-promise-late-then.js
index 83c610b921..cad3c7ed86 100644
--- a/deps/v8/test/inspector/debugger/async-promise-late-then.js
+++ b/deps/v8/test/inspector/debugger/async-promise-late-then.js
@@ -3,9 +3,9 @@
// found in the LICENSE file.
// Flags: --expose-gc
-InspectorTest.log('Checks async stack for late .then handlers with gc');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks async stack for late .then handlers with gc');
-InspectorTest.addScript(`
+contextGroup.addScript(`
function foo1() {
gc();
debugger;
@@ -27,13 +27,13 @@ function test() {
}
//# sourceURL=test.js`, 8, 26);
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
Protocol.Debugger.onPaused(message => {
- InspectorTest.logCallFrames(message.params.callFrames);
+ session.logCallFrames(message.params.callFrames);
var asyncStackTrace = message.params.asyncStackTrace;
while (asyncStackTrace) {
InspectorTest.log(`-- ${asyncStackTrace.description} --`);
- InspectorTest.logCallFrames(asyncStackTrace.callFrames);
+ session.logCallFrames(asyncStackTrace.callFrames);
asyncStackTrace = asyncStackTrace.parent;
}
InspectorTest.log('');
diff --git a/deps/v8/test/inspector/debugger/async-set-timeout.js b/deps/v8/test/inspector/debugger/async-set-timeout.js
index 31712329d3..30096b637f 100644
--- a/deps/v8/test/inspector/debugger/async-set-timeout.js
+++ b/deps/v8/test/inspector/debugger/async-set-timeout.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks that async stack contains setTimeout');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks that async stack contains setTimeout');
-InspectorTest.addScript(`
+contextGroup.addScript(`
var resolveCallback;
function foo1() {
function inner1() {
@@ -29,13 +29,13 @@ function foo3() {
}
//# sourceURL=test.js`, 7, 26);
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
Protocol.Debugger.onPaused(message => {
- InspectorTest.logCallFrames(message.params.callFrames);
+ session.logCallFrames(message.params.callFrames);
var asyncStackTrace = message.params.asyncStackTrace;
while (asyncStackTrace) {
InspectorTest.log(`-- ${asyncStackTrace.description} --`);
- InspectorTest.logCallFrames(asyncStackTrace.callFrames);
+ session.logCallFrames(asyncStackTrace.callFrames);
asyncStackTrace = asyncStackTrace.parent;
}
InspectorTest.log('');
diff --git a/deps/v8/test/inspector/debugger/async-stack-await-expected.txt b/deps/v8/test/inspector/debugger/async-stack-await-expected.txt
index 506b9a268a..4ebcefadad 100644
--- a/deps/v8/test/inspector/debugger/async-stack-await-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-stack-await-expected.txt
@@ -1,34 +1,33 @@
Checks that async stacks works for async/await
foo2 (test.js:15:2)
--- async function (test.js:13:19)--
+-- async function --
foo2 (test.js:13:19)
test (test.js:24:8)
(anonymous) (expr.js:0:0)
foo2 (test.js:17:2)
--- async function (test.js:13:19)--
+-- async function --
foo2 (test.js:13:19)
test (test.js:24:8)
(anonymous) (expr.js:0:0)
foo1 (test.js:9:2)
foo2 (test.js:18:8)
--- async function (test.js:13:19)--
+-- async function --
foo2 (test.js:13:19)
test (test.js:24:8)
(anonymous) (expr.js:0:0)
foo1 (test.js:9:2)
--- Promise.resolve (test.js:19:43)--
--- Promise.resolve (test.js:19:16)--
-foo2 (test.js:19:30)
--- async function (test.js:13:19)--
+-- Promise.resolve --
+foo2 (test.js:19:43)
+-- async function --
foo2 (test.js:13:19)
test (test.js:24:8)
(anonymous) (expr.js:0:0)
foo2 (test.js:20:2)
--- async function (test.js:13:19)--
+-- async function --
foo2 (test.js:13:19)
test (test.js:24:8)
(anonymous) (expr.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-stack-await.js b/deps/v8/test/inspector/debugger/async-stack-await.js
index 155ff4a978..8f4b162807 100644
--- a/deps/v8/test/inspector/debugger/async-stack-await.js
+++ b/deps/v8/test/inspector/debugger/async-stack-await.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks that async stacks works for async/await');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks that async stacks works for async/await');
-InspectorTest.addScript(`
+contextGroup.addScript(`
async function foo1() {
debugger;
return Promise.resolve();
@@ -25,10 +25,10 @@ async function test() {
}
//# sourceURL=test.js`, 7, 26);
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
Protocol.Debugger.onPaused(message => {
- InspectorTest.logCallFrames(message.params.callFrames);
- InspectorTest.logAsyncStackTrace(message.params.asyncStackTrace);
+ session.logCallFrames(message.params.callFrames);
+ session.logAsyncStackTrace(message.params.asyncStackTrace);
InspectorTest.log('');
Protocol.Debugger.resume();
});
diff --git a/deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt b/deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt
index 6e61d1aca4..f5197a1669 100644
--- a/deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-stack-created-frame-expected.txt
@@ -2,88 +2,79 @@ Checks created frame for async call chain
Running test: testPromise
foo1 (test.js:10:2)
--- Promise.resolve (test.js:20:14)--
-promise (test.js:21:2)
+-- Promise.resolve --
+promise (test.js:20:14)
(anonymous) (expr.js:0:0)
Running test: testPromiseThen
foo1 (test.js:10:2)
--- Promise.resolve (test.js:28:14)--
-promiseThen (test.js:30:2)
+-- Promise.resolve --
+promiseThen (test.js:28:14)
(anonymous) (expr.js:0:0)
foo2 (test.js:14:2)
--- Promise.resolve (test.js:29:14)--
--- Promise.resolve (test.js:28:14)--
-promiseThen (test.js:30:2)
+-- Promise.resolve --
+promiseThen (test.js:29:14)
(anonymous) (expr.js:0:0)
Running test: testPromiseThenThen
foo1 (test.js:10:2)
--- Promise.resolve (test.js:37:14)--
-promiseThenThen (test.js:39:2)
+-- Promise.resolve --
+promiseThenThen (test.js:37:14)
(anonymous) (expr.js:0:0)
foo1 (test.js:10:2)
--- Promise.resolve (test.js:38:14)--
-promiseThenThen (test.js:39:2)
+-- Promise.resolve --
+promiseThenThen (test.js:38:14)
(anonymous) (expr.js:0:0)
foo2 (test.js:14:2)
--- Promise.resolve (test.js:37:25)--
--- Promise.resolve (test.js:37:14)--
-promiseThenThen (test.js:39:2)
+-- Promise.resolve --
+promiseThenThen (test.js:37:25)
(anonymous) (expr.js:0:0)
Running test: testPromiseResolve
foo1 (test.js:10:2)
--- Promise.resolve (test.js:44:27)--
-promiseResolve (test.js:44:17)
+-- Promise.resolve --
+promiseResolve (test.js:44:27)
(anonymous) (expr.js:0:0)
Running test: testPromiseReject
foo1 (test.js:10:2)
--- Promise.reject (test.js:48:31)--
-promiseReject (test.js:48:17)
+-- Promise.reject --
+promiseReject (test.js:48:31)
(anonymous) (expr.js:0:0)
Running test: testPromiseAll
foo1 (test.js:10:2)
--- Promise.resolve (test.js:52:44)--
--- Promise.resolve (test.js:52:17)--
-promiseAll (test.js:52:31)
+-- Promise.resolve --
+promiseAll (test.js:52:44)
(anonymous) (expr.js:0:0)
Running test: testPromiseRace
foo1 (test.js:10:2)
--- Promise.resolve (test.js:56:45)--
--- Promise.resolve (test.js:56:17)--
-promiseRace (test.js:56:32)
+-- Promise.resolve --
+promiseRace (test.js:56:45)
(anonymous) (expr.js:0:0)
Running test: testThenableJob1
foo1 (test.js:10:2)
--- Promise.resolve (test.js:60:72)--
--- Promise.resolve (test.js:60:56)--
-Promise.resolve.then (test.js:60:46)
--- Promise.resolve (test.js:60:27)--
-thenableJob1 (test.js:60:17)
+-- Promise.resolve --
+thenableJob1 (test.js:60:72)
(anonymous) (expr.js:0:0)
Running test: testThenableJob2
foo1 (test.js:10:2)
--- Promise.resolve (test.js:64:57)--
-Promise.resolve.then (test.js:64:46)
--- Promise.resolve (test.js:64:27)--
-thenableJob2 (test.js:64:17)
+-- Promise.resolve --
+thenableJob2 (test.js:64:57)
(anonymous) (expr.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-stack-created-frame.js b/deps/v8/test/inspector/debugger/async-stack-created-frame.js
index f73591dc29..0f2c7a1e78 100644
--- a/deps/v8/test/inspector/debugger/async-stack-created-frame.js
+++ b/deps/v8/test/inspector/debugger/async-stack-created-frame.js
@@ -2,9 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks created frame for async call chain');
+// TODO(kozyatinskiy): fix this test.
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks created frame for async call chain');
-InspectorTest.addScript(
+contextGroup.addScript(
`
function foo1() {
debugger;
@@ -76,10 +77,10 @@ function setTimeouts() {
//# sourceURL=test.js`,
8, 4);
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
Protocol.Debugger.onPaused(message => {
- InspectorTest.logCallFrames(message.params.callFrames);
- InspectorTest.logAsyncStackTrace(message.params.asyncStackTrace);
+ session.logCallFrames(message.params.callFrames);
+ session.logAsyncStackTrace(message.params.asyncStackTrace);
InspectorTest.log('');
Protocol.Debugger.resume();
});
diff --git a/deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt b/deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt
index a948803f28..81640849ec 100644
--- a/deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt
+++ b/deps/v8/test/inspector/debugger/async-stack-for-promise-expected.txt
@@ -2,173 +2,118 @@ Checks that async chains for promises are correct.
Running test: testPromise
foo1 (test.js:9:2)
--- Promise.resolve (test.js:19:14)--
-promise (test.js:20:2)
+-- Promise.resolve --
+promise (test.js:19:14)
(anonymous) (testPromise.js:0:0)
Running test: testPromiseResolvedBySetTimeout
foo1 (test.js:9:2)
--- Promise.resolve (test.js:27:14)--
--- setTimeout --
-promiseResolvedBySetTimeout (test.js:28:2)
+-- Promise.resolve --
+promiseResolvedBySetTimeout (test.js:27:14)
(anonymous) (testPromiseResolvedBySetTimeout.js:0:0)
Running test: testPromiseAll
foo1 (test.js:9:2)
--- Promise.resolve (test.js:37:35)--
--- Promise.resolve (test.js:37:19)--
-promiseAll (test.js:39:2)
+-- Promise.resolve --
+promiseAll (test.js:37:35)
(anonymous) (testPromiseAll.js:0:0)
Running test: testPromiseAllReverseOrder
foo1 (test.js:9:2)
--- Promise.resolve (test.js:48:35)--
--- Promise.resolve (test.js:48:19)--
-promiseAllReverseOrder (test.js:50:2)
+-- Promise.resolve --
+promiseAllReverseOrder (test.js:48:35)
(anonymous) (testPromiseAllReverseOrder.js:0:0)
Running test: testPromiseRace
foo1 (test.js:9:2)
--- Promise.resolve (test.js:59:36)--
--- Promise.resolve (test.js:59:19)--
-promiseRace (test.js:60:2)
+-- Promise.resolve --
+promiseRace (test.js:59:36)
(anonymous) (testPromiseRace.js:0:0)
Running test: testTwoChainedCallbacks
foo1 (test.js:9:2)
--- Promise.resolve (test.js:68:14)--
-twoChainedCallbacks (test.js:69:2)
+-- Promise.resolve --
+twoChainedCallbacks (test.js:68:14)
(anonymous) (testTwoChainedCallbacks.js:0:0)
foo2 (test.js:13:2)
--- Promise.resolve (test.js:68:25)--
--- Promise.resolve (test.js:68:14)--
-twoChainedCallbacks (test.js:69:2)
+-- Promise.resolve --
+twoChainedCallbacks (test.js:68:25)
(anonymous) (testTwoChainedCallbacks.js:0:0)
Running test: testPromiseResolve
foo1 (test.js:9:2)
--- Promise.resolve (test.js:74:27)--
-promiseResolve (test.js:74:17)
+-- Promise.resolve --
+promiseResolve (test.js:74:27)
(anonymous) (testPromiseResolve.js:0:0)
foo2 (test.js:13:2)
--- Promise.resolve (test.js:74:38)--
--- Promise.resolve (test.js:74:27)--
-promiseResolve (test.js:74:17)
+-- Promise.resolve --
+promiseResolve (test.js:74:38)
(anonymous) (testPromiseResolve.js:0:0)
Running test: testThenableJobResolvedInSetTimeout
foo1 (test.js:9:2)
--- Promise.resolve (test.js:86:40)--
--- setTimeout --
-thenableJob (test.js:81:4)
-p1.then (test.js:86:25)
--- Promise.resolve (test.js:86:14)--
-thenableJobResolvedInSetTimeout (test.js:87:2)
+-- Promise.resolve --
+thenableJobResolvedInSetTimeout (test.js:86:40)
(anonymous) (testThenableJobResolvedInSetTimeout.js:0:0)
Running test: testThenableJobResolvedInSetTimeoutWithStack
foo1 (test.js:9:2)
--- Promise.resolve (test.js:104:40)--
-inner (test.js:94:6)
--- setTimeout --
-thenableJob (test.js:99:4)
-p1.then (test.js:104:25)
--- Promise.resolve (test.js:104:14)--
-thenableJobResolvedInSetTimeoutWithStack (test.js:105:2)
+-- Promise.resolve --
+thenableJobResolvedInSetTimeoutWithStack (test.js:104:40)
(anonymous) (testThenableJobResolvedInSetTimeoutWithStack.js:0:0)
Running test: testThenableJobResolvedByPromise
foo1 (test.js:9:2)
--- Promise.resolve (test.js:118:40)--
--- Promise.resolve (test.js:113:22)--
-thenableJob (test.js:113:12)
-p1.then (test.js:118:25)
--- Promise.resolve (test.js:118:14)--
-thenableJobResolvedByPromise (test.js:119:2)
+-- Promise.resolve --
+thenableJobResolvedByPromise (test.js:118:40)
(anonymous) (testThenableJobResolvedByPromise.js:0:0)
Running test: testThenableJobResolvedByPromiseWithStack
foo1 (test.js:9:2)
--- Promise.resolve (test.js:136:40)--
-inner (test.js:126:6)
--- Promise.resolve (test.js:131:22)--
-thenableJob (test.js:131:12)
-p1.then (test.js:136:25)
--- Promise.resolve (test.js:136:14)--
-thenableJobResolvedByPromiseWithStack (test.js:137:2)
+-- Promise.resolve --
+thenableJobResolvedByPromiseWithStack (test.js:136:40)
(anonymous) (testThenableJobResolvedByPromiseWithStack.js:0:0)
Running test: testLateThenCallback
foo1 (test.js:9:2)
--- Promise.resolve (test.js:145:12)--
-lateThenCallback (test.js:144:2)
+-- Promise.resolve --
+lateThenCallback (test.js:145:12)
(anonymous) (testLateThenCallback.js:0:0)
Running test: testComplex
inner1 (test.js:154:6)
foo1 (test.js:156:4)
--- Promise.resolve (test.js:202:5)--
-inner2 (test.js:162:6)
--- Promise.resolve (test.js:165:22)--
-foo2 (test.js:165:12)
--- Promise.resolve (test.js:201:5)--
-inner3 (test.js:172:6)
--- setTimeout --
-foo3 (test.js:175:4)
--- Promise.resolve (test.js:200:5)--
--- Promise.resolve (test.js:199:5)--
--- Promise.resolve (test.js:188:7)--
--- Promise.resolve (test.js:187:19)--
-foo5 (test.js:187:52)
--- Promise.resolve (test.js:198:5)--
--- Promise.resolve (test.js:193:7)--
--- Promise.resolve (test.js:192:19)--
-foo6 (test.js:192:34)
--- Promise.resolve (test.js:197:5)--
-complex (test.js:196:18)
+-- Promise.resolve --
+complex (test.js:202:5)
(anonymous) (testComplex.js:0:0)
p.then (test.js:207:8)
--- Promise.resolve (test.js:206:8)--
--- Promise.resolve (test.js:202:5)--
-inner2 (test.js:162:6)
--- Promise.resolve (test.js:165:22)--
-foo2 (test.js:165:12)
--- Promise.resolve (test.js:201:5)--
-inner3 (test.js:172:6)
+-- Promise.resolve --
+p.then (test.js:206:8)
+-- Promise.resolve --
+setTimeout (test.js:205:6)
-- setTimeout --
-foo3 (test.js:175:4)
--- Promise.resolve (test.js:200:5)--
--- Promise.resolve (test.js:199:5)--
--- Promise.resolve (test.js:188:7)--
--- Promise.resolve (test.js:187:19)--
-foo5 (test.js:187:52)
--- Promise.resolve (test.js:198:5)--
--- Promise.resolve (test.js:193:7)--
--- Promise.resolve (test.js:192:19)--
-foo6 (test.js:192:34)
--- Promise.resolve (test.js:197:5)--
-complex (test.js:196:18)
+complex (test.js:204:2)
(anonymous) (testComplex.js:0:0)
Running test: testReject
foo1 (test.js:9:2)
--- Promise.reject (test.js:217:31)--
-reject (test.js:217:17)
+-- Promise.reject --
+reject (test.js:217:31)
(anonymous) (testReject.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/async-stack-for-promise.js b/deps/v8/test/inspector/debugger/async-stack-for-promise.js
index 9865d7a767..198c971e12 100644
--- a/deps/v8/test/inspector/debugger/async-stack-for-promise.js
+++ b/deps/v8/test/inspector/debugger/async-stack-for-promise.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks that async chains for promises are correct.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks that async chains for promises are correct.');
-InspectorTest.addScript(`
+contextGroup.addScript(`
function foo1() {
debugger;
}
@@ -219,10 +219,10 @@ function reject() {
//# sourceURL=test.js`, 7, 26);
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
Protocol.Debugger.onPaused(message => {
- InspectorTest.logCallFrames(message.params.callFrames);
- InspectorTest.logAsyncStackTrace(message.params.asyncStackTrace);
+ session.logCallFrames(message.params.callFrames);
+ session.logAsyncStackTrace(message.params.asyncStackTrace);
InspectorTest.log('');
Protocol.Debugger.resume();
});
diff --git a/deps/v8/test/inspector/debugger/async-stacks-limit-expected.txt b/deps/v8/test/inspector/debugger/async-stacks-limit-expected.txt
deleted file mode 100644
index 8dd1456990..0000000000
--- a/deps/v8/test/inspector/debugger/async-stacks-limit-expected.txt
+++ /dev/null
@@ -1,140 +0,0 @@
-Checks that async stacks works good with different limits
-
-Running test: testZeroLimit
-foo1 (test.js:11:2)
-
-
-Running test: testTwoLimit
-foo1 (test.js:11:2)
--- Promise.resolve --
-promise (test.js:23:2)
-(anonymous) (expr.js:0:0)
-
-
-Running test: testOneLimitTwoPromises
-foo1 (test.js:11:2)
-
-foo2 (test.js:15:2)
-
-
-Running test: testFourLimitTwoPromises
-foo1 (test.js:11:2)
-
-foo2 (test.js:15:2)
-
-
-Running test: testSixLimitTwoPromises
-foo1 (test.js:11:2)
-
-foo2 (test.js:15:2)
--- Promise.resolve --
-twoPromises (test.js:35:2)
-(anonymous) (expr.js:0:0)
-
-
-Running test: testTwoLimitTwoSetTimeouts
-foo1 (test.js:11:2)
-
-foo2 (test.js:15:2)
--- setTimeout --
-twoSetTimeout (test.js:41:2)
-(anonymous) (expr.js:0:0)
-
-
-Running test: testThreeLimitTwoSetTimeouts
-foo1 (test.js:11:2)
--- setTimeout --
-twoSetTimeout (test.js:40:2)
-(anonymous) (expr.js:0:0)
-
-foo2 (test.js:15:2)
--- setTimeout --
-twoSetTimeout (test.js:41:2)
-(anonymous) (expr.js:0:0)
-
-
-Running test: testTenLimitTwentySetTimeouts
-foo1 (:0:17)
-(anonymous) (:0:28)
-
-foo2 (:0:17)
-(anonymous) (:0:28)
-
-foo3 (:0:17)
-(anonymous) (:0:28)
-
-foo4 (:0:17)
-(anonymous) (:0:28)
-
-foo5 (:0:17)
-(anonymous) (:0:28)
-
-foo6 (:0:17)
-(anonymous) (:0:28)
-
-foo7 (:0:17)
-(anonymous) (:0:28)
-
-foo8 (:0:17)
-(anonymous) (:0:28)
-
-foo9 (:0:17)
-(anonymous) (:0:28)
-
-foo10 (:0:18)
-(anonymous) (:0:29)
-
-foo11 (:0:18)
-(anonymous) (:0:29)
--- setTimeout --
-twentySetTimeout (test.js:49:4)
-(anonymous) (expr.js:0:0)
-
-foo12 (:0:18)
-(anonymous) (:0:29)
--- setTimeout --
-twentySetTimeout (test.js:49:4)
-(anonymous) (expr.js:0:0)
-
-foo13 (:0:18)
-(anonymous) (:0:29)
--- setTimeout --
-twentySetTimeout (test.js:49:4)
-(anonymous) (expr.js:0:0)
-
-foo14 (:0:18)
-(anonymous) (:0:29)
--- setTimeout --
-twentySetTimeout (test.js:49:4)
-(anonymous) (expr.js:0:0)
-
-foo15 (:0:18)
-(anonymous) (:0:29)
--- setTimeout --
-twentySetTimeout (test.js:49:4)
-(anonymous) (expr.js:0:0)
-
-foo16 (:0:18)
-(anonymous) (:0:29)
--- setTimeout --
-twentySetTimeout (test.js:49:4)
-(anonymous) (expr.js:0:0)
-
-foo17 (:0:18)
-(anonymous) (:0:29)
--- setTimeout --
-twentySetTimeout (test.js:49:4)
-(anonymous) (expr.js:0:0)
-
-foo18 (:0:18)
-(anonymous) (:0:29)
--- setTimeout --
-twentySetTimeout (test.js:49:4)
-(anonymous) (expr.js:0:0)
-
-foo19 (:0:18)
-(anonymous) (:0:29)
--- setTimeout --
-twentySetTimeout (test.js:49:4)
-(anonymous) (expr.js:0:0)
-
diff --git a/deps/v8/test/inspector/debugger/async-stacks-limit.js b/deps/v8/test/inspector/debugger/async-stacks-limit.js
deleted file mode 100644
index 62206750df..0000000000
--- a/deps/v8/test/inspector/debugger/async-stacks-limit.js
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-InspectorTest.log('Checks that async stacks works good with different limits');
-
-InspectorTest.addScript(`
-var resolveTest;
-
-function foo1() {
- debugger;
-}
-
-function foo2() {
- debugger;
- if (resolveTest) resolveTest();
-}
-
-function promise() {
- var resolve1;
- var p1 = new Promise(resolve => resolve1 = resolve);
- var p2 = p1.then(foo1);
- resolve1();
- return p2;
-}
-
-function twoPromises() {
- var resolve1;
- var resolve2;
- var p1 = new Promise(resolve => resolve1 = resolve);
- var p2 = new Promise(resolve => resolve2 = resolve);
- var p3 = p1.then(foo1);
- var p4 = p2.then(foo2);
- resolve1();
- resolve2();
- return Promise.all([p3, p4]);
-}
-
-function twoSetTimeout() {
- setTimeout(foo1, 0);
- setTimeout(foo2, 0);
- return new Promise(resolve => resolveTest = resolve);
-}
-
-function twentySetTimeout() {
- var resolve1;
- var p1 = new Promise(resolve => resolve1 = resolve);
- for (var i = 1; i <= 19; ++i)
- setTimeout('(function foo' + i + '(){debugger;})()',0);
- setTimeout(resolve1, 0);
- return p1;
-}
-
-//# sourceURL=test.js`, 7, 26);
-
-InspectorTest.setupScriptMap();
-Protocol.Debugger.onPaused(message => {
- InspectorTest.logCallFrames(message.params.callFrames);
- var asyncStackTrace = message.params.asyncStackTrace;
- while (asyncStackTrace) {
- InspectorTest.log(`-- ${asyncStackTrace.description} --`);
- InspectorTest.logCallFrames(asyncStackTrace.callFrames);
- asyncStackTrace = asyncStackTrace.parent;
- }
- InspectorTest.log('');
- Protocol.Debugger.resume();
-});
-
-Protocol.Debugger.enable();
-Protocol.Debugger.setAsyncCallStackDepth({ maxDepth: 128 });
-InspectorTest.runTestSuite([
- function testZeroLimit(next) {
- Protocol.Runtime.evaluate({
- expression: 'setMaxAsyncTaskStacks(0)//# sourceURL=expr.js'})
- .then(() => Protocol.Runtime.evaluate({
- expression: 'promise()//# sourceURL=expr.js', awaitPromise: true
- }))
- .then(() => cancelAllAsyncTasks())
- .then(next);
- },
-
- function testTwoLimit(next) {
- // we need one stack for parent task and one for next task.
- Protocol.Runtime
- .evaluate({expression: 'setMaxAsyncTaskStacks(2)//# sourceURL=expr.js'})
- .then(() => Protocol.Runtime.evaluate({
- expression: 'promise()//# sourceURL=expr.js',
- awaitPromise: true
- }))
- .then(() => cancelAllAsyncTasks())
- .then(next);
- },
-
- function testOneLimitTwoPromises(next) {
- // Should be no async stacks because when first microtask is finished
- // it will resolve and schedule p3 - will remove async stack for scheduled
- // p2.
- Protocol.Runtime.evaluate({
- expression: 'setMaxAsyncTaskStacks(1)//# sourceURL=expr.js'})
- .then(() => Protocol.Runtime.evaluate({
- expression: 'twoPromises()//# sourceURL=expr.js', awaitPromise: true
- }))
- .then(() => cancelAllAsyncTasks())
- .then(next);
- },
-
- function testFourLimitTwoPromises(next) {
- Protocol.Runtime
- .evaluate({expression: 'setMaxAsyncTaskStacks(4)//# sourceURL=expr.js'})
- .then(() => Protocol.Runtime.evaluate({
- expression: 'twoPromises()//# sourceURL=expr.js',
- awaitPromise: true
- }))
- .then(() => cancelAllAsyncTasks())
- .then(next);
- },
-
- function testSixLimitTwoPromises(next) {
- Protocol.Runtime
- .evaluate({expression: 'setMaxAsyncTaskStacks(6)//# sourceURL=expr.js'})
- .then(() => Protocol.Runtime.evaluate({
- expression: 'twoPromises()//# sourceURL=expr.js',
- awaitPromise: true
- }))
- .then(() => cancelAllAsyncTasks())
- .then(next);
- },
-
- function testTwoLimitTwoSetTimeouts(next) {
- Protocol.Runtime.evaluate({
- expression: 'setMaxAsyncTaskStacks(2)//# sourceURL=expr.js'})
- .then(() => Protocol.Runtime.evaluate({
- expression: 'twoSetTimeout()//# sourceURL=expr.js', awaitPromise: true
- }))
- .then(() => cancelAllAsyncTasks())
- .then(next);
- },
-
- function testThreeLimitTwoSetTimeouts(next) {
- Protocol.Runtime.evaluate({
- expression: 'setMaxAsyncTaskStacks(3)//# sourceURL=expr.js'})
- .then(() => Protocol.Runtime.evaluate({
- expression: 'twoSetTimeout()//# sourceURL=expr.js', awaitPromise: true
- }))
- .then(() => cancelAllAsyncTasks())
- .then(next);
- },
-
- function testTenLimitTwentySetTimeouts(next) {
- Protocol.Runtime.evaluate({
- expression: 'setMaxAsyncTaskStacks(10)//# sourceURL=expr.js'})
- .then(() => Protocol.Runtime.evaluate({
- expression: 'twentySetTimeout()//# sourceURL=expr.js',
- awaitPromise: true
- }))
- .then(() => cancelAllAsyncTasks())
- .then(next);
- }
-]);
-
-function cancelAllAsyncTasks() {
- return Protocol.Debugger.setAsyncCallStackDepth({ maxDepth: 0 })
- .then(() => Protocol.Debugger.setAsyncCallStackDepth({ maxDepth: 128 }));
-}
diff --git a/deps/v8/test/inspector/debugger/break-on-exception-expected.txt b/deps/v8/test/inspector/debugger/break-on-exception-expected.txt
index 92a38f0e83..f4d70f09b9 100644
--- a/deps/v8/test/inspector/debugger/break-on-exception-expected.txt
+++ b/deps/v8/test/inspector/debugger/break-on-exception-expected.txt
@@ -9,4 +9,4 @@ Running test: breakOnCaughtException
paused in throwUncaughtException
paused in throwCaughtException
-Running test: noBreakInEvaluateInSilentMode \ No newline at end of file
+Running test: noBreakInEvaluateInSilentMode
diff --git a/deps/v8/test/inspector/debugger/break-on-exception.js b/deps/v8/test/inspector/debugger/break-on-exception.js
index d9a208dadf..6f47ec86d7 100644
--- a/deps/v8/test/inspector/debugger/break-on-exception.js
+++ b/deps/v8/test/inspector/debugger/break-on-exception.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Check that inspector correctly change break on exception state.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Check that inspector correctly change break on exception state.");
-InspectorTest.addScript(`
+contextGroup.addScript(`
function scheduleUncaughtException()
{
setTimeout(throwUncaughtException, 0);
diff --git a/deps/v8/test/inspector/debugger/call-frame-function-location-expected.txt b/deps/v8/test/inspector/debugger/call-frame-function-location-expected.txt
index 8a34f45272..211352c2a2 100644
--- a/deps/v8/test/inspector/debugger/call-frame-function-location-expected.txt
+++ b/deps/v8/test/inspector/debugger/call-frame-function-location-expected.txt
@@ -1,3 +1,4 @@
+Tests that function location in call frames is correct
Paused on 'debugger;'
Top frame location: {"scriptId":"42","lineNumber":3,"columnNumber":4}
Top frame functionLocation: {"scriptId":"42","lineNumber":0,"columnNumber":21}
diff --git a/deps/v8/test/inspector/debugger/call-frame-function-location.js b/deps/v8/test/inspector/debugger/call-frame-function-location.js
index c91164a037..4775b30f6d 100644
--- a/deps/v8/test/inspector/debugger/call-frame-function-location.js
+++ b/deps/v8/test/inspector/debugger/call-frame-function-location.js
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.addScript(
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests that function location in call frames is correct');
+
+contextGroup.addScript(
`function testFunction()
{
var a = 2;
diff --git a/deps/v8/test/inspector/debugger/caught-exception-from-framework-inside-async.js b/deps/v8/test/inspector/debugger/caught-exception-from-framework-inside-async.js
index 3bc7b6efa8..f59e71fda3 100644
--- a/deps/v8/test/inspector/debugger/caught-exception-from-framework-inside-async.js
+++ b/deps/v8/test/inspector/debugger/caught-exception-from-framework-inside-async.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Async caught exception prediction and blackboxing.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Async caught exception prediction and blackboxing.');
-InspectorTest.addScript(`
+contextGroup.addScript(`
function constructorThrow() {
return new Promise((resolve, reject) =>
Promise.resolve().then(() =>
@@ -18,7 +18,7 @@ function dotCatch(producer) {
}
//# sourceURL=framework.js`);
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
(async function test() {
Protocol.Debugger.enable();
Protocol.Debugger.setBlackboxPatterns({patterns: ['framework\.js']});
@@ -33,6 +33,6 @@ InspectorTest.setupScriptMap();
async function waitPauseAndDumpLocation() {
var message = await Protocol.Debugger.oncePaused();
InspectorTest.log('paused at:');
- InspectorTest.logSourceLocation(message.params.callFrames[0].location);
+ session.logSourceLocation(message.params.callFrames[0].location);
return message;
}
diff --git a/deps/v8/test/inspector/debugger/caught-uncaught-exceptions.js b/deps/v8/test/inspector/debugger/caught-uncaught-exceptions.js
index d049cf0da0..38b622d3db 100644
--- a/deps/v8/test/inspector/debugger/caught-uncaught-exceptions.js
+++ b/deps/v8/test/inspector/debugger/caught-uncaught-exceptions.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Check that inspector correctly passes caught/uncaught information.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Check that inspector correctly passes caught/uncaught information.");
-InspectorTest.addScript(
+contextGroup.addScript(
`function throwCaught() { try { throw new Error(); } catch (_) {} }
function throwUncaught() { throw new Error(); }
function schedule(f) { setTimeout(f, 0); }
diff --git a/deps/v8/test/inspector/debugger/collect-obsolete-async-tasks-expected.txt b/deps/v8/test/inspector/debugger/collect-obsolete-async-tasks-expected.txt
new file mode 100644
index 0000000000..c114e34012
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/collect-obsolete-async-tasks-expected.txt
@@ -0,0 +1,37 @@
+Checks that we collect obsolete async tasks with async stacks.
+Async stacks count: 2
+Scheduled async tasks: 1
+Created async tasks: 1
+Async tasks with parent: 0
+Recurring async tasks: 1
+
+Async stacks count: 0
+Scheduled async tasks: 0
+Created async tasks: 0
+Async tasks with parent: 0
+Recurring async tasks: 0
+
+Async stacks count: 2
+Scheduled async tasks: 0
+Created async tasks: 2
+Async tasks with parent: 2
+Recurring async tasks: 0
+
+Async stacks count: 0
+Scheduled async tasks: 0
+Created async tasks: 0
+Async tasks with parent: 0
+Recurring async tasks: 0
+
+Async stacks count: 1
+Scheduled async tasks: 1
+Created async tasks: 0
+Async tasks with parent: 0
+Recurring async tasks: 0
+
+Async stacks count: 0
+Scheduled async tasks: 0
+Created async tasks: 0
+Async tasks with parent: 0
+Recurring async tasks: 0
+
diff --git a/deps/v8/test/inspector/debugger/collect-obsolete-async-tasks.js b/deps/v8/test/inspector/debugger/collect-obsolete-async-tasks.js
new file mode 100644
index 0000000000..cfef345d19
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/collect-obsolete-async-tasks.js
@@ -0,0 +1,35 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks that we collect obsolete async tasks with async stacks.');
+
+contextGroup.addScript(`
+function test() {
+ inspector.setMaxAsyncTaskStacks(128);
+ var p = Promise.resolve();
+
+ inspector.dumpAsyncTaskStacksStateForTest();
+ inspector.setMaxAsyncTaskStacks(128);
+ inspector.dumpAsyncTaskStacksStateForTest();
+
+ p.then(() => 42).then(() => 239);
+
+ inspector.dumpAsyncTaskStacksStateForTest();
+ inspector.setMaxAsyncTaskStacks(128);
+ inspector.dumpAsyncTaskStacksStateForTest();
+
+ setTimeout(() => 42, 0);
+
+ inspector.dumpAsyncTaskStacksStateForTest();
+ inspector.setMaxAsyncTaskStacks(128);
+ inspector.dumpAsyncTaskStacksStateForTest();
+}
+`);
+
+(async function test() {
+ Protocol.Debugger.enable();
+ Protocol.Debugger.setAsyncCallStackDepth({maxDepth: 128});
+ await Protocol.Runtime.evaluate({expression: 'test()'});
+ InspectorTest.completeTest();
+})()
diff --git a/deps/v8/test/inspector/debugger/collect-old-async-call-chains-expected.txt b/deps/v8/test/inspector/debugger/collect-old-async-call-chains-expected.txt
new file mode 100644
index 0000000000..64fef4af77
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/collect-old-async-call-chains-expected.txt
@@ -0,0 +1,211 @@
+Checks that we drop old async call chains.
+
+Running test: testInfrastructure
+inspector.setMaxAsyncTaskStacks(1024)
+Run expression 'console.trace(42)' with async chain len: 1
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(1024)
+Run expression 'console.trace(42)' with async chain len: 2
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(1024)
+Run expression 'console.trace(42)' with async chain len: 5
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(1024)
+Run expression 'console.trace(42)' with async chain len: 1
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(1024)
+Run expression 'console.trace(42)' with async chain len: 2
+actual async chain len: 2
+
+inspector.setMaxAsyncTaskStacks(1024)
+Run expression 'console.trace(42)' with async chain len: 5
+actual async chain len: 5
+
+
+Running test: testZeroLimit
+inspector.setMaxAsyncTaskStacks(0)
+Run expression 'console.trace(42)' with async chain len: 1
+actual async chain len: 0
+
+inspector.setMaxAsyncTaskStacks(0)
+Run expression 'console.trace(42)' with async chain len: 2
+actual async chain len: 0
+
+inspector.setMaxAsyncTaskStacks(0)
+Run expression 'console.trace(42)' with async chain len: 1
+actual async chain len: 0
+
+inspector.setMaxAsyncTaskStacks(0)
+Run expression 'console.trace(42)' with async chain len: 2
+actual async chain len: 0
+
+
+Running test: testOneLimit
+inspector.setMaxAsyncTaskStacks(1)
+Run expression 'console.trace(42)' with async chain len: 1
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(1)
+Run expression 'console.trace(42)' with async chain len: 2
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(1)
+Run expression 'console.trace(42)' with async chain len: 1
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(1)
+Run expression 'console.trace(42)' with async chain len: 2
+actual async chain len: 1
+
+
+Running test: testTwoLimit
+inspector.setMaxAsyncTaskStacks(2)
+Run expression 'console.trace(42)' with async chain len: 1
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(2)
+Run expression 'console.trace(42)' with async chain len: 2
+actual async chain len: 0
+
+inspector.setMaxAsyncTaskStacks(2)
+Run expression 'console.trace(42)' with async chain len: 3
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(2)
+Run expression 'console.trace(42)' with async chain len: 1
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(2)
+Run expression 'console.trace(42)' with async chain len: 2
+actual async chain len: 2
+
+inspector.setMaxAsyncTaskStacks(2)
+Run expression 'console.trace(42)' with async chain len: 3
+actual async chain len: 1
+
+
+Running test: testMoreThanTwoLimit
+inspector.setMaxAsyncTaskStacks(3)
+Run expression 'console.trace(42)' with async chain len: 1
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(3)
+Run expression 'console.trace(42)' with async chain len: 2
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(3)
+Run expression 'console.trace(42)' with async chain len: 3
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(3)
+Run expression 'console.trace(42)' with async chain len: 1
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(3)
+Run expression 'console.trace(42)' with async chain len: 2
+actual async chain len: 2
+
+inspector.setMaxAsyncTaskStacks(3)
+Run expression 'console.trace(42)' with async chain len: 3
+actual async chain len: 3
+
+inspector.setMaxAsyncTaskStacks(4)
+Run expression 'console.trace(42)' with async chain len: 1
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(4)
+Run expression 'console.trace(42)' with async chain len: 2
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(4)
+Run expression 'console.trace(42)' with async chain len: 3
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(4)
+Run expression 'console.trace(42)' with async chain len: 1
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(4)
+Run expression 'console.trace(42)' with async chain len: 2
+actual async chain len: 2
+
+inspector.setMaxAsyncTaskStacks(4)
+Run expression 'console.trace(42)' with async chain len: 3
+actual async chain len: 3
+
+inspector.setMaxAsyncTaskStacks(5)
+Run expression 'console.trace(42)' with async chain len: 1
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(5)
+Run expression 'console.trace(42)' with async chain len: 2
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(5)
+Run expression 'console.trace(42)' with async chain len: 3
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(5)
+Run expression 'console.trace(42)' with async chain len: 1
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(5)
+Run expression 'console.trace(42)' with async chain len: 2
+actual async chain len: 2
+
+inspector.setMaxAsyncTaskStacks(5)
+Run expression 'console.trace(42)' with async chain len: 3
+actual async chain len: 3
+
+inspector.setMaxAsyncTaskStacks(6)
+Run expression 'console.trace(42)' with async chain len: 1
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(6)
+Run expression 'console.trace(42)' with async chain len: 2
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(6)
+Run expression 'console.trace(42)' with async chain len: 3
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(6)
+Run expression 'console.trace(42)' with async chain len: 1
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(6)
+Run expression 'console.trace(42)' with async chain len: 2
+actual async chain len: 2
+
+inspector.setMaxAsyncTaskStacks(6)
+Run expression 'console.trace(42)' with async chain len: 3
+actual async chain len: 3
+
+inspector.setMaxAsyncTaskStacks(7)
+Run expression 'console.trace(42)' with async chain len: 1
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(7)
+Run expression 'console.trace(42)' with async chain len: 2
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(7)
+Run expression 'console.trace(42)' with async chain len: 3
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(7)
+Run expression 'console.trace(42)' with async chain len: 1
+actual async chain len: 1
+
+inspector.setMaxAsyncTaskStacks(7)
+Run expression 'console.trace(42)' with async chain len: 2
+actual async chain len: 2
+
+inspector.setMaxAsyncTaskStacks(7)
+Run expression 'console.trace(42)' with async chain len: 3
+actual async chain len: 3
+
diff --git a/deps/v8/test/inspector/debugger/collect-old-async-call-chains.js b/deps/v8/test/inspector/debugger/collect-old-async-call-chains.js
new file mode 100644
index 0000000000..7ac822534b
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/collect-old-async-call-chains.js
@@ -0,0 +1,171 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks that we drop old async call chains.');
+
+Protocol.Debugger.enable();
+Protocol.Runtime.enable();
+InspectorTest.runAsyncTestSuite([
+ async function testInfrastructure() {
+ Protocol.Debugger.setAsyncCallStackDepth({maxDepth: 128});
+ await setMaxAsyncTaskStacks(1024);
+ runWithAsyncChainPromise(1, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(1024);
+ runWithAsyncChainPromise(2, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(1024);
+ runWithAsyncChainPromise(5, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(1024);
+ runWithAsyncChainSetTimeout(1, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(1024);
+ runWithAsyncChainSetTimeout(2, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(1024);
+ runWithAsyncChainSetTimeout(5, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+ },
+
+ async function testZeroLimit() {
+ const limit = 0;
+ Protocol.Debugger.setAsyncCallStackDepth({maxDepth: 128});
+
+ await setMaxAsyncTaskStacks(limit);
+ runWithAsyncChainPromise(1, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(limit);
+ runWithAsyncChainPromise(2, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(limit);
+ runWithAsyncChainSetTimeout(1, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(limit);
+ runWithAsyncChainSetTimeout(2, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+ },
+
+ async function testOneLimit() {
+ const limit = 1;
+ Protocol.Debugger.setAsyncCallStackDepth({maxDepth: 128});
+
+ await setMaxAsyncTaskStacks(limit);
+ runWithAsyncChainPromise(1, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(limit);
+ runWithAsyncChainPromise(2, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(limit);
+ runWithAsyncChainSetTimeout(1, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(limit);
+ runWithAsyncChainSetTimeout(2, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+ },
+
+ async function testTwoLimit() {
+ const limit = 2;
+ Protocol.Debugger.setAsyncCallStackDepth({maxDepth: 128});
+
+ await setMaxAsyncTaskStacks(limit);
+ runWithAsyncChainPromise(1, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(limit);
+ runWithAsyncChainPromise(2, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(limit);
+ runWithAsyncChainPromise(3, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(limit);
+ runWithAsyncChainSetTimeout(1, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(limit);
+ runWithAsyncChainSetTimeout(2, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(limit);
+ runWithAsyncChainSetTimeout(3, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+ },
+
+ async function testMoreThanTwoLimit() {
+ for (let limit = 3; limit <= 7; ++limit) {
+ Protocol.Debugger.setAsyncCallStackDepth({maxDepth: 128});
+
+ await setMaxAsyncTaskStacks(limit);
+ runWithAsyncChainPromise(1, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(limit);
+ runWithAsyncChainPromise(2, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(limit);
+ runWithAsyncChainPromise(3, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(limit);
+ runWithAsyncChainSetTimeout(1, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(limit);
+ runWithAsyncChainSetTimeout(2, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(limit);
+ runWithAsyncChainSetTimeout(3, 'console.trace(42)');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+ }
+ },
+]);
+
+function runWithAsyncChainPromise(len, source) {
+ InspectorTest.log(`Run expression '${source}' with async chain len: ${len}`);
+ let then = '.then(() => 1)';
+ let pause = `.then(() => { ${source} })`;
+ Protocol.Runtime.evaluate({
+ expression: `Promise.resolve()${then.repeat(len - 1)}${pause}`
+ });
+}
+
+function runWithAsyncChainSetTimeout(len, source) {
+ InspectorTest.log(`Run expression '${source}' with async chain len: ${len}`);
+ let setTimeoutPrefix = '() => setTimeout(';
+ let setTimeoutSuffix = ', 0)';
+ Protocol.Runtime.evaluate({
+ expression: `setTimeout(${setTimeoutPrefix.repeat(len - 1)}'${source}'${setTimeoutSuffix.repeat(len - 1)}, 0)`
+ });
+}
+
+function dumpAsyncChainLength(message) {
+ let stackTrace = message.params.asyncStackTrace || message.params.stackTrace.parent;
+ let asyncChainCount = 0;
+ while (stackTrace) {
+ ++asyncChainCount;
+ stackTrace = stackTrace.parent;
+ }
+ InspectorTest.log(`actual async chain len: ${asyncChainCount}\n`);
+}
+
+async function setMaxAsyncTaskStacks(max) {
+ let expression = `inspector.setMaxAsyncTaskStacks(${max})`;
+ InspectorTest.log(expression);
+ await Protocol.Runtime.evaluate({expression});
+}
diff --git a/deps/v8/test/inspector/debugger/command-line-api-with-bound-function-expected.txt b/deps/v8/test/inspector/debugger/command-line-api-with-bound-function-expected.txt
index 821ce46cd3..338b136c8f 100644
--- a/deps/v8/test/inspector/debugger/command-line-api-with-bound-function-expected.txt
+++ b/deps/v8/test/inspector/debugger/command-line-api-with-bound-function-expected.txt
@@ -20,4 +20,4 @@ paused in boo
function boo called
> debug and unmonitor bar
> call bar
-paused in boo \ No newline at end of file
+paused in boo
diff --git a/deps/v8/test/inspector/debugger/command-line-api-with-bound-function.js b/deps/v8/test/inspector/debugger/command-line-api-with-bound-function.js
index d857d4191d..3c00585d21 100644
--- a/deps/v8/test/inspector/debugger/command-line-api-with-bound-function.js
+++ b/deps/v8/test/inspector/debugger/command-line-api-with-bound-function.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Check that debug and monitor methods from Command Line API works with bound function.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Check that debug and monitor methods from Command Line API works with bound function.");
-InspectorTest.addScript(`
+contextGroup.addScript(`
function foo() {}
function boo() {}
var bar = boo.bind(null);
diff --git a/deps/v8/test/inspector/debugger/continue-to-location-expected.txt b/deps/v8/test/inspector/debugger/continue-to-location-expected.txt
index d0c6ce715a..948edd85fb 100644
--- a/deps/v8/test/inspector/debugger/continue-to-location-expected.txt
+++ b/deps/v8/test/inspector/debugger/continue-to-location-expected.txt
@@ -1,3 +1,4 @@
+Tests Debugger.continueToLocation
Paused on debugger statement
Paused after continueToLocation
Stopped on line 8, expected 8, requested 8, (0-based numbers).
@@ -28,4 +29,3 @@ Paused after continueToLocation
Stopped on line 17, expected 17, requested 17, (0-based numbers).
Control parameter 'step' calculation result: 6, expected: 6
SUCCESS
-
diff --git a/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames-expected.txt b/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames-expected.txt
new file mode 100644
index 0000000000..1f41dcfc6d
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames-expected.txt
@@ -0,0 +1,81 @@
+Check that continue-to-location works with different strategies.
+
+Running test: testAwaitAny
+(anonymous) (expr.js:0:0)
+
+asyncFact (test.js:9:2)
+(anonymous) (expr.js:0:0)
+
+asyncFact (test.js:11:2)
+-- async function --
+asyncFact (test.js:8:24)
+asyncFact (test.js:10:20)
+asyncFact (test.js:10:20)
+asyncFact (test.js:10:20)
+(anonymous) (expr.js:0:0)
+
+
+Running test: testAwaitCurrent
+(anonymous) (expr.js:0:0)
+
+asyncFact (test.js:9:2)
+(anonymous) (expr.js:0:0)
+
+asyncFact (test.js:11:2)
+-- async function --
+asyncFact (test.js:8:24)
+(anonymous) (expr.js:0:0)
+
+
+Running test: testAny
+(anonymous) (expr.js:0:0)
+
+fact (test.js:16:2)
+(anonymous) (expr.js:0:0)
+
+fact (test.js:18:2)
+fact (test.js:17:14)
+fact (test.js:17:14)
+fact (test.js:17:14)
+(anonymous) (expr.js:0:0)
+
+
+Running test: testCurrent
+(anonymous) (expr.js:0:0)
+
+fact (test.js:16:2)
+(anonymous) (expr.js:0:0)
+
+fact (test.js:18:2)
+(anonymous) (expr.js:0:0)
+
+
+Running test: testTopLevelAny
+(anonymous) (expr.js:0:0)
+
+topLevel (test.js:23:2)
+(anonymous) (expr.js:0:0)
+
+(anonymous) (:1:10)
+topLevel (test.js:23:2)
+(anonymous) (expr.js:0:0)
+
+(anonymous) (:4:10)
+topLevel (test.js:23:2)
+(anonymous) (expr.js:0:0)
+
+
+Running test: testTopLevelCurrent
+(anonymous) (expr.js:0:0)
+
+topLevel (test.js:23:2)
+(anonymous) (expr.js:0:0)
+
+(anonymous) (:1:10)
+topLevel (test.js:23:2)
+(anonymous) (expr.js:0:0)
+
+(anonymous) (:4:10)
+topLevel (test.js:23:2)
+(anonymous) (expr.js:0:0)
+
diff --git a/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames.js b/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames.js
new file mode 100644
index 0000000000..c33ff6b93c
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/continue-to-location-target-call-frames.js
@@ -0,0 +1,139 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Check that continue-to-location works with different strategies.');
+
+contextGroup.addScript(`
+async function asyncFact(n) {
+ if (n == 0) return 1;
+ let r = n * await asyncFact(n - 1);
+ console.log(r);
+ return r;
+}
+
+function fact(n) {
+ if (n == 0) return 1;
+ let r = n * fact(n - 1);
+ console.log(r);
+ return r;
+}
+
+function topLevel() {
+ eval(` + '`' + `
+ var a = 1;
+ var b = 2;
+ fact(3);
+ console.log(a + b);
+ ` + '`' + `);
+}
+
+//# sourceURL=test.js`, 7, 26);
+
+session.setupScriptMap();
+InspectorTest.runAsyncTestSuite([
+ async function testAwaitAny() {
+ Protocol.Debugger.enable();
+ Protocol.Debugger.setAsyncCallStackDepth({ maxDepth: 128 });
+ Protocol.Debugger.pause();
+ Protocol.Runtime.evaluate({expression: 'asyncFact(4)//# sourceURL=expr.js'});
+ await pausedAndDumpStack();
+ Protocol.Debugger.stepInto();
+ let message = await pausedAndDumpStack();
+ let location = message.params.callFrames[0].location;
+ location.lineNumber = 11;
+ Protocol.Debugger.continueToLocation({location, targetCallFrames: 'any'});
+ await pausedAndDumpStack();
+ Protocol.Debugger.disable();
+ },
+
+ async function testAwaitCurrent() {
+ Protocol.Debugger.enable();
+ Protocol.Debugger.setAsyncCallStackDepth({ maxDepth: 128 });
+ Protocol.Debugger.pause();
+ Protocol.Runtime.evaluate({expression: 'asyncFact(4)//# sourceURL=expr.js'});
+ await pausedAndDumpStack();
+ Protocol.Debugger.stepInto();
+ let message = await pausedAndDumpStack();
+ let location = message.params.callFrames[0].location;
+ location.lineNumber = 11;
+ Protocol.Debugger.continueToLocation({location, targetCallFrames: 'current'});
+ await pausedAndDumpStack();
+ await Protocol.Debugger.resume();
+ Protocol.Debugger.disable();
+ },
+
+ async function testAny() {
+ Protocol.Debugger.enable();
+ Protocol.Debugger.setAsyncCallStackDepth({ maxDepth: 128 });
+ Protocol.Debugger.pause();
+ Protocol.Runtime.evaluate({expression: 'fact(4)//# sourceURL=expr.js'});
+ await pausedAndDumpStack();
+ Protocol.Debugger.stepInto();
+ let message = await pausedAndDumpStack();
+ let location = message.params.callFrames[0].location;
+ location.lineNumber = 18;
+ Protocol.Debugger.continueToLocation({location, targetCallFrames: 'any'});
+ await pausedAndDumpStack();
+ Protocol.Debugger.disable();
+ },
+
+ async function testCurrent() {
+ Protocol.Debugger.enable();
+ Protocol.Debugger.setAsyncCallStackDepth({ maxDepth: 128 });
+ Protocol.Debugger.pause();
+ Protocol.Runtime.evaluate({expression: 'fact(4)//# sourceURL=expr.js'});
+ await pausedAndDumpStack();
+ Protocol.Debugger.stepInto();
+ let message = await pausedAndDumpStack();
+ let location = message.params.callFrames[0].location;
+ location.lineNumber = 18;
+ Protocol.Debugger.continueToLocation({location, targetCallFrames: 'current'});
+ await pausedAndDumpStack();
+ await Protocol.Debugger.resume();
+ Protocol.Debugger.disable();
+ },
+
+ async function testTopLevelAny() {
+ Protocol.Debugger.enable();
+ Protocol.Debugger.setAsyncCallStackDepth({ maxDepth: 128 });
+ Protocol.Debugger.pause();
+ Protocol.Runtime.evaluate({expression: 'topLevel()//# sourceURL=expr.js'});
+ await pausedAndDumpStack();
+ Protocol.Debugger.stepInto();
+ await pausedAndDumpStack();
+ Protocol.Debugger.stepInto();
+ let message = await pausedAndDumpStack();
+ let location = message.params.callFrames[0].location;
+ location.lineNumber = 4;
+ Protocol.Debugger.continueToLocation({location, targetCallFrames: 'any'});
+ await pausedAndDumpStack();
+ Protocol.Debugger.disable();
+ },
+
+ async function testTopLevelCurrent() {
+ Protocol.Debugger.enable();
+ Protocol.Debugger.setAsyncCallStackDepth({ maxDepth: 128 });
+ Protocol.Debugger.pause();
+ Protocol.Runtime.evaluate({expression: 'topLevel()//# sourceURL=expr.js'});
+ await pausedAndDumpStack();
+ Protocol.Debugger.stepInto();
+ await pausedAndDumpStack();
+ Protocol.Debugger.stepInto();
+ let message = await pausedAndDumpStack();
+ let location = message.params.callFrames[0].location;
+ location.lineNumber = 4;
+ Protocol.Debugger.continueToLocation({location, targetCallFrames: 'current'});
+ await pausedAndDumpStack();
+ await Protocol.Debugger.resume();
+ Protocol.Debugger.disable();
+ }
+]);
+
+async function pausedAndDumpStack() {
+ let message = await Protocol.Debugger.oncePaused();
+ session.logCallFrames(message.params.callFrames);
+ session.logAsyncStackTrace(message.params.asyncStackTrace);
+ InspectorTest.log('');
+ return message;
+}
diff --git a/deps/v8/test/inspector/debugger/continue-to-location.js b/deps/v8/test/inspector/debugger/continue-to-location.js
index b72c8585e6..878499d1fe 100644
--- a/deps/v8/test/inspector/debugger/continue-to-location.js
+++ b/deps/v8/test/inspector/debugger/continue-to-location.js
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.addScript(
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests Debugger.continueToLocation');
+
+contextGroup.addScript(
`function statementsExample()
{
var self = arguments.callee;
diff --git a/deps/v8/test/inspector/debugger/destory-in-break-program-expected.txt b/deps/v8/test/inspector/debugger/destory-in-break-program-expected.txt
new file mode 100644
index 0000000000..c0ce88ecbc
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/destory-in-break-program-expected.txt
@@ -0,0 +1 @@
+Check destroying agent inside of breakProgram
diff --git a/deps/v8/test/inspector/debugger/destory-in-break-program.js b/deps/v8/test/inspector/debugger/destory-in-break-program.js
new file mode 100644
index 0000000000..e002328aa1
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/destory-in-break-program.js
@@ -0,0 +1,13 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Check destroying agent inside of breakProgram');
+
+(async function test(){
+ await Protocol.Debugger.enable();
+ Protocol.Runtime.evaluate({expression: 'inspector.breakProgram(\'\', \'{}\')'});
+ await Protocol.Debugger.oncePaused();
+ session.disconnect();
+ InspectorTest.quitImmediately();
+})();
diff --git a/deps/v8/test/inspector/debugger/doesnt-step-into-injected-script.js b/deps/v8/test/inspector/debugger/doesnt-step-into-injected-script.js
index 45ca0ee0dd..98c7bf2cff 100644
--- a/deps/v8/test/inspector/debugger/doesnt-step-into-injected-script.js
+++ b/deps/v8/test/inspector/debugger/doesnt-step-into-injected-script.js
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Check that stepInto at then end of the script go to next user script instead InjectedScriptSource.js.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Check that stepInto at then end of the script go to next user script instead InjectedScriptSource.js.');
(async function test() {
- InspectorTest.setupScriptMap();
+ session.setupScriptMap();
await Protocol.Debugger.enable();
Protocol.Runtime.evaluate({expression: '(function boo() { setTimeout(() => 239, 0); debugger; })()\n'});
await waitPauseAndDumpLocation();
@@ -22,6 +22,6 @@ InspectorTest.log('Check that stepInto at then end of the script go to next user
async function waitPauseAndDumpLocation() {
var message = await Protocol.Debugger.oncePaused();
InspectorTest.log('paused at:');
- InspectorTest.logSourceLocation(message.params.callFrames[0].location);
+ session.logSourceLocation(message.params.callFrames[0].location);
return message;
}
diff --git a/deps/v8/test/inspector/debugger/es6-module-script-parsed.js b/deps/v8/test/inspector/debugger/es6-module-script-parsed.js
index e8fd8c69d1..94b623d0e9 100644
--- a/deps/v8/test/inspector/debugger/es6-module-script-parsed.js
+++ b/deps/v8/test/inspector/debugger/es6-module-script-parsed.js
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Debugger.scriptParsed and Debugger.scriptFailedToParse with ES6 module');
+let {session, contextGroup, Protocol} = InspectorTest.start('Debugger.scriptParsed and Debugger.scriptFailedToParse with ES6 module');
let moduleSource = `
export function foo() {
return 42;
}`;
-InspectorTest.addModule(moduleSource, 'module1.js');
-InspectorTest.addModule('}', 'module-with-syntax-error-1.js');
+contextGroup.addModule(moduleSource, 'module1.js');
+contextGroup.addModule('}', 'module-with-syntax-error-1.js');
Protocol.Debugger.onScriptParsed(InspectorTest.logMessage);
Protocol.Debugger.onScriptFailedToParse(InspectorTest.logMessage);
@@ -21,8 +21,8 @@ InspectorTest.runTestSuite([
},
function testScriptEventsWhenDebuggerIsEnabled(next) {
- InspectorTest.addModule(moduleSource, 'module2.js');
- InspectorTest.addModule('}', 'module-with-syntax-error-2.js');
- InspectorTest.waitPendingTasks().then(next);
+ contextGroup.addModule(moduleSource, 'module2.js');
+ contextGroup.addModule('}', 'module-with-syntax-error-2.js');
+ InspectorTest.waitForPendingTasks().then(next);
}
]);
diff --git a/deps/v8/test/inspector/debugger/es6-module-set-script-source.js b/deps/v8/test/inspector/debugger/es6-module-set-script-source.js
index 5e191ca8a8..81d97b6d78 100644
--- a/deps/v8/test/inspector/debugger/es6-module-set-script-source.js
+++ b/deps/v8/test/inspector/debugger/es6-module-set-script-source.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks that Debugger.setScriptSource doesn\'t crash with modules');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks that Debugger.setScriptSource doesn\'t crash with modules');
var module1 = `
export function foo() {
@@ -25,9 +25,9 @@ Protocol.Debugger.onScriptParsed(message => {
module1Id = message.params.scriptId;
});
Protocol.Debugger.enable()
- .then(() => InspectorTest.addModule(module1, 'module1'))
- .then(() => InspectorTest.addModule(module2, 'module2'))
- .then(() => InspectorTest.waitPendingTasks())
+ .then(() => contextGroup.addModule(module1, 'module1'))
+ .then(() => contextGroup.addModule(module2, 'module2'))
+ .then(() => InspectorTest.waitForPendingTasks())
.then(() => Protocol.Debugger.setScriptSource({ scriptId: module1Id, scriptSource: editedModule1 }))
.then(InspectorTest.logMessage)
.then(InspectorTest.completeTest);
diff --git a/deps/v8/test/inspector/debugger/eval-scopes-expected.txt b/deps/v8/test/inspector/debugger/eval-scopes-expected.txt
index af27f113ec..71d6618c8e 100644
--- a/deps/v8/test/inspector/debugger/eval-scopes-expected.txt
+++ b/deps/v8/test/inspector/debugger/eval-scopes-expected.txt
@@ -1,3 +1,4 @@
+Tests that variables introduced in eval scopes are accessible
{
id : <messageId>
result : {
@@ -16,4 +17,4 @@
}
]
}
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/inspector/debugger/eval-scopes.js b/deps/v8/test/inspector/debugger/eval-scopes.js
index 46e0c4b5a7..e5a0f131b4 100644
--- a/deps/v8/test/inspector/debugger/eval-scopes.js
+++ b/deps/v8/test/inspector/debugger/eval-scopes.js
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.addScript(
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests that variables introduced in eval scopes are accessible');
+
+contextGroup.addScript(
`function testNonEmptyEvalScope() {
eval("'use strict'; var hest = 420; debugger;");
}
diff --git a/deps/v8/test/inspector/debugger/framework-break-expected.txt b/deps/v8/test/inspector/debugger/framework-break-expected.txt
index 4339645cc8..e858e836e8 100644
--- a/deps/v8/test/inspector/debugger/framework-break-expected.txt
+++ b/deps/v8/test/inspector/debugger/framework-break-expected.txt
@@ -43,13 +43,13 @@ Running test: testDebuggerStatement
Running test: testSyncDOMBreakpoint
> all frames in framework:
> mixed, top frame in framework:
-syncDOMBreakpoint (framework.js:33:2)
+syncDOMBreakpoint (framework.js:33:12)
(anonymous) (user.js:0:0)
Running test: testSyncDOMBreakpointWithInlinedUserFrame
> mixed, top frame in framework:
-syncDOMBreakpoint (framework.js:33:2)
+syncDOMBreakpoint (framework.js:33:12)
userFunction (user.js:70:2)
inlinedWrapper (framework.js:64:4)
syncDOMBreakpointWithInlinedUserFrame (framework.js:67:2)
diff --git a/deps/v8/test/inspector/debugger/framework-break.js b/deps/v8/test/inspector/debugger/framework-break.js
index 1566c264a2..9b9fee6f3e 100644
--- a/deps/v8/test/inspector/debugger/framework-break.js
+++ b/deps/v8/test/inspector/debugger/framework-break.js
@@ -3,9 +3,9 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax
-InspectorTest.log('Checks that breaks in framework code correctly processed.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks that breaks in framework code correctly processed.');
-InspectorTest.addScript(`
+contextGroup.addScript(`
function frameworkAssert() {
console.assert(false);
}
@@ -30,7 +30,7 @@ function debuggerStatement() {
}
function syncDOMBreakpoint() {
- breakProgram('', '');
+ inspector.breakProgram('', '');
}
function asyncDOMBreakpoint() {
@@ -69,7 +69,7 @@ function syncDOMBreakpointWithInlinedUserFrame() {
//# sourceURL=framework.js`, 8, 26);
-InspectorTest.addScript(`
+contextGroup.addScript(`
function throwUserException() {
throw new Error();
}
@@ -80,9 +80,9 @@ function userFunction() {
//# sourceURL=user.js`, 64, 26)
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
Protocol.Debugger.onPaused(message => {
- InspectorTest.logCallFrames(message.params.callFrames);
+ session.logCallFrames(message.params.callFrames);
InspectorTest.log('');
Protocol.Debugger.resume();
});
@@ -188,16 +188,16 @@ InspectorTest.runTestSuite([
},
function testAsyncDOMBreakpoint(next) {
- utils.schedulePauseOnNextStatement('', '');
+ contextGroup.schedulePauseOnNextStatement('', '');
InspectorTest.log('> all frames in framework:');
Protocol.Runtime
.evaluate(
{expression: 'asyncDOMBreakpoint()//# sourceURL=framework.js'})
- .then(() => utils.cancelPauseOnNextStatement())
+ .then(() => contextGroup.cancelPauseOnNextStatement())
.then(
() => Protocol.Runtime.evaluate(
{expression: '42//# sourceURL=user.js'}))
- .then(() => utils.schedulePauseOnNextStatement('', ''))
+ .then(() => contextGroup.schedulePauseOnNextStatement('', ''))
.then(
() => Protocol.Runtime.evaluate(
{expression: 'asyncDOMBreakpoint()//# sourceURL=user.js'}))
diff --git a/deps/v8/test/inspector/debugger/framework-nested-scheduled-break-expected.txt b/deps/v8/test/inspector/debugger/framework-nested-scheduled-break-expected.txt
index a7ab22229e..f9901f099b 100644
--- a/deps/v8/test/inspector/debugger/framework-nested-scheduled-break-expected.txt
+++ b/deps/v8/test/inspector/debugger/framework-nested-scheduled-break-expected.txt
@@ -3,10 +3,10 @@ break reason: framework-break
break aux data: {
"data": "data for framework-break"
}
-doFrameworkBreak (framework.js:20:2)
-doFrameworkWork (framework.js:15:2)
-frameworkCall (framework.js:9:2)
-testFunction (user.js:27:2)
+doFrameworkBreak (framework.js:20:12)
+doFrameworkWork (framework.js:15:12)
+frameworkCall (framework.js:9:12)
+testFunction (user.js:27:12)
(anonymous) (expr.js:0:0)
break reason: ambiguous
@@ -25,17 +25,17 @@ break aux data: {
}
callback (user.js:31:17)
doFrameworkWork (framework.js:16:2)
-frameworkCall (framework.js:9:2)
-testFunction (user.js:27:2)
+frameworkCall (framework.js:9:12)
+testFunction (user.js:27:12)
(anonymous) (expr.js:0:0)
break reason: user-break
break aux data: {
"data": "data for user-break"
}
-callback (user.js:32:2)
+callback (user.js:32:12)
doFrameworkWork (framework.js:16:2)
-frameworkCall (framework.js:9:2)
-testFunction (user.js:27:2)
+frameworkCall (framework.js:9:12)
+testFunction (user.js:27:12)
(anonymous) (expr.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/framework-nested-scheduled-break.js b/deps/v8/test/inspector/debugger/framework-nested-scheduled-break.js
index bb1e47199c..3e6299154d 100644
--- a/deps/v8/test/inspector/debugger/framework-nested-scheduled-break.js
+++ b/deps/v8/test/inspector/debugger/framework-nested-scheduled-break.js
@@ -2,44 +2,44 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks nested scheduled break in framework code.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks nested scheduled break in framework code.');
-InspectorTest.addScript(`
+contextGroup.addScript(`
function frameworkCall(callback) {
- callWithScheduledBreak(doFrameworkWork.bind(null, callback),
+ inspector.callWithScheduledBreak(doFrameworkWork.bind(null, callback),
'top-framework-scheduled-break',
JSON.stringify({ data: 'data for top-framework-scheduled-break' }));
}
function doFrameworkWork(callback) {
- callWithScheduledBreak(doFrameworkBreak, 'should-not-be-a-reason', '');
+ inspector.callWithScheduledBreak(doFrameworkBreak, 'should-not-be-a-reason', '');
callback();
}
function doFrameworkBreak() {
- breakProgram('framework-break', JSON.stringify({ data: 'data for framework-break' }));
+ inspector.breakProgram('framework-break', JSON.stringify({ data: 'data for framework-break' }));
}
//# sourceURL=framework.js`, 7, 26);
-InspectorTest.addScript(`
+contextGroup.addScript(`
function testFunction() {
- callWithScheduledBreak(frameworkCall.bind(null, callback),
+ inspector.callWithScheduledBreak(frameworkCall.bind(null, callback),
'top-scheduled-break', '');
}
function callback() {
- breakProgram('user-break', JSON.stringify({ data: 'data for user-break' }));
+ inspector.breakProgram('user-break', JSON.stringify({ data: 'data for user-break' }));
return 42;
}
//# sourceURL=user.js`, 25, 26);
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
Protocol.Debugger.onPaused(message => {
InspectorTest.log('break reason: ' + message.params.reason);
InspectorTest.log('break aux data: ' + JSON.stringify(message.params.data || {}, null, ' '));
- InspectorTest.logCallFrames(message.params.callFrames);
+ session.logCallFrames(message.params.callFrames);
InspectorTest.log('');
Protocol.Debugger.resume();
});
diff --git a/deps/v8/test/inspector/debugger/framework-precise-ranges.js b/deps/v8/test/inspector/debugger/framework-precise-ranges.js
index 3b23cf50df..4f76033a96 100644
--- a/deps/v8/test/inspector/debugger/framework-precise-ranges.js
+++ b/deps/v8/test/inspector/debugger/framework-precise-ranges.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks framework debugging with blackboxed ranges.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks framework debugging with blackboxed ranges.');
-InspectorTest.addScript(
+contextGroup.addScript(
`
function foo() {
return boo();
@@ -18,9 +18,9 @@ function testFunction() {
//# sourceURL=test.js`,
7, 26);
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
Protocol.Debugger.onPaused(message => {
- InspectorTest.logCallFrames(message.params.callFrames);
+ session.logCallFrames(message.params.callFrames);
InspectorTest.log('');
Protocol.Debugger.stepInto();
});
@@ -64,7 +64,7 @@ var testSuite = [
];
function testPositions(positions) {
- utils.schedulePauseOnNextStatement('', '');
+ contextGroup.schedulePauseOnNextStatement('', '');
return Protocol.Debugger
.setBlackboxedRanges({scriptId: scriptId, positions: positions})
.then(InspectorTest.logMessage)
diff --git a/deps/v8/test/inspector/debugger/framework-stepping-expected.txt b/deps/v8/test/inspector/debugger/framework-stepping-expected.txt
index aee4bf3eaf..fa2bb35f5a 100644
--- a/deps/v8/test/inspector/debugger/framework-stepping-expected.txt
+++ b/deps/v8/test/inspector/debugger/framework-stepping-expected.txt
@@ -67,7 +67,7 @@ testStepFromUser (user.js:31:2)
Executing resume...
Running test: testStepIntoFromFramework
-frameworkBreakAndCall (framework.js:14:2)
+frameworkBreakAndCall (framework.js:14:12)
testStepFromFramework (user.js:35:2)
(anonymous) (expr.js:0:0)
@@ -80,7 +80,7 @@ testStepFromFramework (user.js:35:2)
Executing resume...
Running test: testStepOverFromFramework
-frameworkBreakAndCall (framework.js:14:2)
+frameworkBreakAndCall (framework.js:14:12)
testStepFromFramework (user.js:35:2)
(anonymous) (expr.js:0:0)
@@ -91,7 +91,7 @@ testStepFromFramework (user.js:36:0)
Executing resume...
Running test: testStepOutFromFramework
-frameworkBreakAndCall (framework.js:14:2)
+frameworkBreakAndCall (framework.js:14:12)
testStepFromFramework (user.js:35:2)
(anonymous) (expr.js:0:0)
diff --git a/deps/v8/test/inspector/debugger/framework-stepping.js b/deps/v8/test/inspector/debugger/framework-stepping.js
index 6c6ae9caa0..f91c06ba52 100644
--- a/deps/v8/test/inspector/debugger/framework-stepping.js
+++ b/deps/v8/test/inspector/debugger/framework-stepping.js
@@ -2,22 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks stepping with blackboxed frames on stack');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks stepping with blackboxed frames on stack');
-InspectorTest.addScript(
+contextGroup.addScript(
`
function frameworkCall(funcs) {
for (var f of funcs) f();
}
function frameworkBreakAndCall(funcs) {
- breakProgram('', '');
+ inspector.breakProgram('', '');
for (var f of funcs) f();
}
//# sourceURL=framework.js`,
8, 4);
-InspectorTest.addScript(
+contextGroup.addScript(
`
function userFoo() {
return 1;
@@ -37,7 +37,7 @@ function testStepFromFramework() {
//# sourceURL=user.js`,
21, 4);
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
Protocol.Debugger.enable()
.then(
@@ -47,7 +47,7 @@ Protocol.Debugger.enable()
var testSuite = [
function testStepIntoFromUser(next) {
- utils.schedulePauseOnNextStatement('', '');
+ contextGroup.schedulePauseOnNextStatement('', '');
test('testStepFromUser()', [
'print', // before testStepFromUser call
'stepInto', 'stepInto', 'print', // userFoo
@@ -57,7 +57,7 @@ var testSuite = [
},
function testStepOverFromUser(next) {
- utils.schedulePauseOnNextStatement('', '');
+ contextGroup.schedulePauseOnNextStatement('', '');
test('testStepFromUser()', [
'print', // before testStepFromUser call
'stepInto', 'stepInto', 'print', // userFoo
@@ -67,7 +67,7 @@ var testSuite = [
},
function testStepOutFromUser(next) {
- utils.schedulePauseOnNextStatement('', '');
+ contextGroup.schedulePauseOnNextStatement('', '');
test('testStepFromUser()', [
'print', // before testStepFromUser call
'stepInto', 'stepInto', 'print', // userFoo
@@ -101,7 +101,7 @@ function test(entryExpression, actions) {
Protocol.Debugger.onPaused(message => {
var action = actions.shift() || 'resume';
if (action === 'print') {
- InspectorTest.logCallFrames(message.params.callFrames);
+ session.logCallFrames(message.params.callFrames);
InspectorTest.log('');
action = actions.shift() || 'resume';
}
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-array-literal-expected.txt b/deps/v8/test/inspector/debugger/get-possible-breakpoints-array-literal-expected.txt
index eff9225d66..c5d8c2466c 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints-array-literal-expected.txt
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-array-literal-expected.txt
@@ -1,3 +1,4 @@
+Tests possible breakpoints in array literal
{
id : <messageId>
result : {
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-array-literal.js b/deps/v8/test/inspector/debugger/get-possible-breakpoints-array-literal.js
index 13e2920cc7..a5aeeff34b 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints-array-literal.js
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-array-literal.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests possible breakpoints in array literal');
+
Protocol.Debugger.enable();
Protocol.Debugger.onceScriptParsed().then(message => message.params.scriptId)
@@ -9,4 +11,4 @@ Protocol.Debugger.onceScriptParsed().then(message => message.params.scriptId)
.then(InspectorTest.logMessage)
.then(InspectorTest.completeTest);
-InspectorTest.addScript("() => []");
+contextGroup.addScript("() => []");
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error-expected.txt b/deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error-expected.txt
new file mode 100644
index 0000000000..9c65ba2325
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error-expected.txt
@@ -0,0 +1,26 @@
+getPossibleBreakpoints should not crash during lazy compilation (crbug.com/715334)
+{
+ method : Debugger.scriptParsed
+ params : {
+ endColumn : 21
+ endLine : 2
+ executionContextId : <executionContextId>
+ hasSourceURL : true
+ hash : FA2A959297747012766FE9C5006E7F522D88FA72
+ isLiveEdit : false
+ isModule : false
+ length : 52
+ scriptId : <scriptId>
+ sourceMapURL :
+ startColumn : 0
+ startLine : 0
+ url : test.js
+ }
+}
+{
+ id : <messageId>
+ result : {
+ locations : [
+ ]
+ }
+}
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error.js b/deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error.js
new file mode 100644
index 0000000000..d1326e967f
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-lazy-error.js
@@ -0,0 +1,28 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('getPossibleBreakpoints should not crash during lazy compilation (crbug.com/715334)');
+
+contextGroup.addScript(`
+function test() { continue; }
+//# sourceURL=test.js`);
+
+(async function test() {
+ Protocol.Debugger.enable();
+ let script = await Protocol.Debugger.onceScriptParsed();
+ InspectorTest.logMessage(script);
+ let scriptId = script.params.scriptId;
+ Protocol.Debugger.onScriptFailedToParse(msg => {
+ InspectorTest.logMessage(msg);
+ if (msg.params.scriptId !== script.params.scriptId) {
+ InspectorTest.log('Failed script to parse event has different scriptId');
+ } else {
+ InspectorTest.log('One script is reported twice');
+ }
+ });
+ let response = await Protocol.Debugger.getPossibleBreakpoints({
+ start: {scriptId, lineNumber: 0, columnNumber: 0}});
+ InspectorTest.logMessage(response);
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt b/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt
index 7a118e6c3d..d83a4aa137 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-master-expected.txt
@@ -183,7 +183,7 @@ function testCaughtException() {
|R|}
function testClasses() {
- |_|class Cat {
+ class Cat {
constructor(name) {
|_|this.name = name;
|R|}
@@ -191,7 +191,7 @@ function testClasses() {
speak() {
|R|}
}
- |_|class Lion extends Cat {
+ class Lion extends Cat {
constructor(name) {
|C|super(name);
|R|}
@@ -204,17 +204,17 @@ function testClasses() {
|R|}
async function asyncFoo() {
- |_|await Promise.resolve().then(v => v |_|* 2|R|);
+ |_|await Promise.|C|resolve().|C|then(v => v |_|* 2|R|);
|C|return42();
|_|await |C|asyncBoo();
|R|}
async function asyncBoo() {
- |_|await Promise.resolve();
+ |_|await Promise.|C|resolve();
|R|}
async function testAsyncAwait() {
- |_|await asyncFoo();
+ |_|await |C|asyncFoo();
|_|await |C|awaitBoo();
|R|}
@@ -247,7 +247,7 @@ async function testPromiseComplex() {
var testPromise = |C|new Promise(resolve => nextTest |_|= resolve|R|);
async function main() {
async function foo() {
- |_|await Promise.resolve();
+ |_|await Promise.|C|resolve();
|_|return 42;
|R|}
var x = |_|1;
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-master.js b/deps/v8/test/inspector/debugger/get-possible-breakpoints-master.js
index 482f68f87f..07f90e7a25 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints-master.js
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-master.js
@@ -4,10 +4,10 @@
// Flags: --turbo
-InspectorTest.log('Checks Debugger.getPossibleBreakpoints');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks Debugger.getPossibleBreakpoints');
var source = utils.read('test/inspector/debugger/resources/break-locations.js');
-InspectorTest.addScript(source);
+contextGroup.addScript(source);
Protocol.Debugger.onceScriptParsed()
.then(message => Protocol.Debugger.getPossibleBreakpoints({ start: { lineNumber: 0, columnNumber : 0, scriptId: message.params.scriptId }}))
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints-restrict-to-function.js b/deps/v8/test/inspector/debugger/get-possible-breakpoints-restrict-to-function.js
index f5020eb64b..27c65fe290 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints-restrict-to-function.js
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints-restrict-to-function.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks Debugger.getPossibleBreakpoints with ignoreNestedFunctions');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks Debugger.getPossibleBreakpoints with ignoreNestedFunctions');
var source = `
function test() {
@@ -17,7 +17,7 @@ function test() {
nested2();
}
//# sourceURL=test.js`;
-InspectorTest.addScript(source);
+contextGroup.addScript(source);
var scriptId;
Protocol.Debugger.onceScriptParsed().then(message => {
@@ -25,7 +25,7 @@ Protocol.Debugger.onceScriptParsed().then(message => {
scriptId = message.params.scriptId;
}).then(() => InspectorTest.runTestSuite(tests));
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
Protocol.Debugger.onPaused(dumpBreakLocationInSourceAndResume);
Protocol.Debugger.enable();
@@ -103,7 +103,7 @@ function dumpAllLocations(message) {
}
function dumpBreakLocationInSourceAndResume(message) {
- InspectorTest.logCallFrames([ message.params.callFrames[0] ]);
+ session.logCallFrames([ message.params.callFrames[0] ]);
var location = message.params.callFrames[0].location;
var sourceLines = source.split('\n')
diff --git a/deps/v8/test/inspector/debugger/get-possible-breakpoints.js b/deps/v8/test/inspector/debugger/get-possible-breakpoints.js
index ef996e910b..3232d59574 100644
--- a/deps/v8/test/inspector/debugger/get-possible-breakpoints.js
+++ b/deps/v8/test/inspector/debugger/get-possible-breakpoints.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Test for Debugger.getPossibleBreakpoints');
+let {session, contextGroup, Protocol} = InspectorTest.start('Test for Debugger.getPossibleBreakpoints');
Protocol.Runtime.enable();
Protocol.Debugger.enable();
@@ -153,7 +153,7 @@ function foo6() { Promise.resolve().then(() => 42) }`;
function compileScript(source, origin) {
var promise = Protocol.Debugger.onceScriptParsed().then(message => message.params.scriptId);
if (!origin) origin = { name: '', line_offset: 0, column_offset: 0 };
- utils.compileAndRunWithOrigin(source, origin.name, origin.line_offset, origin.column_offset, false);
+ contextGroup.addScript(source, origin.line_offset, origin.column_offset, origin.name);
return promise;
}
diff --git a/deps/v8/test/inspector/debugger/inspector-break-api-expected.txt b/deps/v8/test/inspector/debugger/inspector-break-api-expected.txt
index f3b019d058..1677b5923a 100644
--- a/deps/v8/test/inspector/debugger/inspector-break-api-expected.txt
+++ b/deps/v8/test/inspector/debugger/inspector-break-api-expected.txt
@@ -2,7 +2,7 @@ Checks breakProgram,(schedule|cancel)PauseOnNextStatement test API
Running test: testBreakProgram
Stack:
-callBreakProgram (:9:2)
+callBreakProgram (:9:12)
(anonymous) (:0:0)
Other data:
{
diff --git a/deps/v8/test/inspector/debugger/inspector-break-api.js b/deps/v8/test/inspector/debugger/inspector-break-api.js
index 009bf717a4..dc39924200 100644
--- a/deps/v8/test/inspector/debugger/inspector-break-api.js
+++ b/deps/v8/test/inspector/debugger/inspector-break-api.js
@@ -2,21 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Checks breakProgram,(schedule|cancel)PauseOnNextStatement test API");
+let {session, contextGroup, Protocol} = InspectorTest.start("Checks breakProgram,(schedule|cancel)PauseOnNextStatement test API");
-InspectorTest.addScript(`
+contextGroup.addScript(`
function callBreakProgram() {
- breakProgram('reason', JSON.stringify({a: 42}));
+ inspector.breakProgram('reason', JSON.stringify({a: 42}));
}
function foo() {
return 42;
}`, 7, 26);
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
Protocol.Debugger.onPaused(message => {
InspectorTest.log('Stack:');
- InspectorTest.logCallFrames(message.params.callFrames);
+ session.logCallFrames(message.params.callFrames);
delete message.params.callFrames;
InspectorTest.log('Other data:');
InspectorTest.logMessage(message);
@@ -33,17 +33,17 @@ InspectorTest.runTestSuite([
},
function testSchedulePauseOnNextStatement(next) {
- utils.schedulePauseOnNextStatement('reason', JSON.stringify({a: 42}));
+ contextGroup.schedulePauseOnNextStatement('reason', JSON.stringify({a: 42}));
Protocol.Runtime.evaluate({ expression: 'foo()//# sourceURL=expr1.js'})
.then(() => Protocol.Runtime.evaluate({
expression: 'foo()//# sourceURL=expr2.js'}))
- .then(() => utils.cancelPauseOnNextStatement())
+ .then(() => contextGroup.cancelPauseOnNextStatement())
.then(next);
},
function testCancelPauseOnNextStatement(next) {
- utils.schedulePauseOnNextStatement('reason', JSON.stringify({a: 42}));
- utils.cancelPauseOnNextStatement();
+ contextGroup.schedulePauseOnNextStatement('reason', JSON.stringify({a: 42}));
+ contextGroup.cancelPauseOnNextStatement();
Protocol.Runtime.evaluate({ expression: 'foo()'})
.then(next);
}
diff --git a/deps/v8/test/inspector/debugger/max-async-call-chain-depth-expected.txt b/deps/v8/test/inspector/debugger/max-async-call-chain-depth-expected.txt
new file mode 100644
index 0000000000..0c421da607
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/max-async-call-chain-depth-expected.txt
@@ -0,0 +1,91 @@
+Checks that we trim async call chains correctly.
+set async chain depth to 8
+
+Running test: testDebuggerPaused
+Run expression 'debugger;' with async chain len: 4
+actual async chain len: 1
+Run expression 'debugger;' with async chain len: 8
+actual async chain len: 1
+Run expression 'debugger;' with async chain len: 9
+actual async chain len: 1
+Run expression 'debugger;' with async chain len: 32
+actual async chain len: 1
+
+Running test: testConsoleTrace
+Run expression 'console.trace(42);' with async chain len: 4
+actual async chain len: 1
+Run expression 'console.trace(42);' with async chain len: 8
+actual async chain len: 1
+Run expression 'console.trace(42);' with async chain len: 9
+actual async chain len: 1
+Run expression 'console.trace(42);' with async chain len: 32
+actual async chain len: 1
+
+Running test: testDebuggerPausedSetTimeout
+Run expression 'debugger;' with async chain len: 4
+actual async chain len: 4
+Run expression 'debugger;' with async chain len: 8
+actual async chain len: 8
+Run expression 'debugger;' with async chain len: 9
+actual async chain len: 8
+Run expression 'debugger;' with async chain len: 32
+actual async chain len: 8
+
+Running test: testConsoleTraceSetTimeout
+Run expression 'console.trace(42);' with async chain len: 4
+actual async chain len: 4
+Run expression 'console.trace(42);' with async chain len: 8
+actual async chain len: 8
+Run expression 'console.trace(42);' with async chain len: 9
+actual async chain len: 8
+Run expression 'console.trace(42);' with async chain len: 32
+actual async chain len: 8
+
+Running test: testConsoleTraceWithEmptySync
+{
+ callFrames : [
+ [0] : {
+ columnNumber : 66
+ functionName : Promise.then
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ parent : {
+ callFrames : [
+ [0] : {
+ columnNumber : 47
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ description : Promise.resolve
+ }
+}
+
+Running test: testDebuggerPausedThenableJob
+Run expression 'debugger;' with async chain len: 4
+actual async chain len: 1
+Run expression 'debugger;' with async chain len: 8
+actual async chain len: 1
+Run expression 'debugger;' with async chain len: 9
+actual async chain len: 1
+Run expression 'debugger;' with async chain len: 32
+actual async chain len: 1
+
+Running test: testConsoleTraceThenableJob
+Run expression 'console.trace(42);' with async chain len: 4
+actual async chain len: 1
+Run expression 'console.trace(42);' with async chain len: 8
+actual async chain len: 1
+Run expression 'console.trace(42);' with async chain len: 9
+actual async chain len: 1
+Run expression 'console.trace(42);' with async chain len: 32
+actual async chain len: 1
+
+Running test: twoConsoleAssert
+actual async chain len: 1
+actual async chain len: 2
diff --git a/deps/v8/test/inspector/debugger/max-async-call-chain-depth.js b/deps/v8/test/inspector/debugger/max-async-call-chain-depth.js
new file mode 100644
index 0000000000..ae98f55037
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/max-async-call-chain-depth.js
@@ -0,0 +1,162 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TODO(kozyatinskiy): fix or remove it later.
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks that we trim async call chains correctly.');
+
+Protocol.Debugger.enable();
+InspectorTest.log('set async chain depth to 8');
+Protocol.Debugger.setAsyncCallStackDepth({maxDepth: 8});
+InspectorTest.runAsyncTestSuite([
+ async function testDebuggerPaused() {
+ runWithAsyncChain(4, 'debugger;');
+ dumpAsyncChainLength(await Protocol.Debugger.oncePaused());
+ await Protocol.Debugger.resume();
+
+ runWithAsyncChain(8, 'debugger;');
+ dumpAsyncChainLength(await Protocol.Debugger.oncePaused());
+ await Protocol.Debugger.resume();
+
+ runWithAsyncChain(9, 'debugger;');
+ dumpAsyncChainLength(await Protocol.Debugger.oncePaused());
+ await Protocol.Debugger.resume();
+
+ runWithAsyncChain(32, 'debugger;');
+ dumpAsyncChainLength(await Protocol.Debugger.oncePaused());
+ await Protocol.Debugger.resume();
+ },
+
+ async function testConsoleTrace() {
+ Protocol.Runtime.enable();
+ runWithAsyncChain(4, 'console.trace(42);');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ runWithAsyncChain(8, 'console.trace(42);');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ runWithAsyncChain(9, 'console.trace(42);');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ runWithAsyncChain(32, 'console.trace(42);');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+ },
+
+ async function testDebuggerPausedSetTimeout() {
+ runWithAsyncChainSetTimeout(4, 'debugger;');
+ dumpAsyncChainLength(await Protocol.Debugger.oncePaused());
+ await Protocol.Debugger.resume();
+
+ runWithAsyncChainSetTimeout(8, 'debugger;');
+ dumpAsyncChainLength(await Protocol.Debugger.oncePaused());
+ await Protocol.Debugger.resume();
+
+ runWithAsyncChainSetTimeout(9, 'debugger;');
+ dumpAsyncChainLength(await Protocol.Debugger.oncePaused());
+ await Protocol.Debugger.resume();
+
+ runWithAsyncChainSetTimeout(32, 'debugger;');
+ dumpAsyncChainLength(await Protocol.Debugger.oncePaused());
+ await Protocol.Debugger.resume();
+ },
+
+ async function testConsoleTraceSetTimeout() {
+ runWithAsyncChainSetTimeout(4, 'console.trace(42);');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ runWithAsyncChainSetTimeout(8, 'console.trace(42);');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ runWithAsyncChainSetTimeout(9, 'console.trace(42);');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ runWithAsyncChainSetTimeout(32, 'console.trace(42);');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+ },
+
+ async function testConsoleTraceWithEmptySync() {
+ Protocol.Runtime.evaluate({
+ expression: 'new Promise(resolve => setTimeout(resolve, 0)).then(() => console.trace(42))'
+ });
+ InspectorTest.logMessage((await Protocol.Runtime.onceConsoleAPICalled()).params.stackTrace);
+ },
+
+ async function testDebuggerPausedThenableJob() {
+ runWithThenableJob(4, 'debugger;');
+ dumpAsyncChainLength(await Protocol.Debugger.oncePaused());
+ await Protocol.Debugger.resume();
+
+ runWithThenableJob(8, 'debugger;');
+ dumpAsyncChainLength(await Protocol.Debugger.oncePaused());
+ await Protocol.Debugger.resume();
+
+ runWithThenableJob(9, 'debugger;');
+ dumpAsyncChainLength(await Protocol.Debugger.oncePaused());
+ await Protocol.Debugger.resume();
+
+ runWithThenableJob(32, 'debugger;');
+ dumpAsyncChainLength(await Protocol.Debugger.oncePaused());
+ await Protocol.Debugger.resume();
+ },
+
+ async function testConsoleTraceThenableJob() {
+ runWithThenableJob(4, 'console.trace(42);');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ runWithThenableJob(8, 'console.trace(42);');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ runWithThenableJob(9, 'console.trace(42);');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+
+ runWithThenableJob(32, 'console.trace(42);');
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+ },
+
+ async function twoConsoleAssert() {
+ Protocol.Runtime.evaluate({
+ expression: 'setTimeout(' +
+ 'setTimeout.bind(null, ' +
+ 'setTimeout.bind(null, () => { console.assert(); setTimeout(console.assert, 0) }, 0), 0), 0)'
+ });
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+ dumpAsyncChainLength(await Protocol.Runtime.onceConsoleAPICalled());
+ }
+]);
+
+function runWithAsyncChain(len, source) {
+ InspectorTest.log(`Run expression '${source}' with async chain len: ${len}`);
+ let then = '.then(() => 1)';
+ let pause = `.then(() => { ${source} })`;
+ Protocol.Runtime.evaluate({
+ expression: `Promise.resolve()${then.repeat(len - 1)}${pause}`
+ });
+}
+
+function runWithAsyncChainSetTimeout(len, source) {
+ InspectorTest.log(`Run expression '${source}' with async chain len: ${len}`);
+ let setTimeout = 'setTimeout(() => {';
+ let suffix = '}, 0)';
+ Protocol.Runtime.evaluate({
+ expression: `${setTimeout.repeat(len)}${source}${suffix.repeat(len)}`
+ });
+}
+
+function runWithThenableJob(len, source) {
+ InspectorTest.log(`Run expression '${source}' with async chain len: ${len}`);
+ let then = '.then(Promise.resolve.bind(Promise, 0))';
+ let pause = `.then(() => { ${source} })`;
+ Protocol.Runtime.evaluate({
+ expression: `Promise.resolve()${then.repeat(len - 1)}${pause}`
+ });
+}
+
+function dumpAsyncChainLength(message) {
+ let stackTrace = message.params.asyncStackTrace || message.params.stackTrace.parent;
+ let asyncChainCount = 0;
+ while (stackTrace) {
+ ++asyncChainCount;
+ stackTrace = stackTrace.parent;
+ }
+ InspectorTest.log(`actual async chain len: ${asyncChainCount}`);
+}
diff --git a/deps/v8/test/inspector/debugger/object-preview-internal-properties.js b/deps/v8/test/inspector/debugger/object-preview-internal-properties.js
index 78d4d8326e..442ca8149a 100644
--- a/deps/v8/test/inspector/debugger/object-preview-internal-properties.js
+++ b/deps/v8/test/inspector/debugger/object-preview-internal-properties.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Check internal properties reported in object preview.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Check internal properties reported in object preview.");
Protocol.Debugger.enable();
Protocol.Runtime.enable();
diff --git a/deps/v8/test/inspector/debugger/pause-expected.txt b/deps/v8/test/inspector/debugger/pause-expected.txt
index 29b7e14082..a51bc8d0bc 100644
--- a/deps/v8/test/inspector/debugger/pause-expected.txt
+++ b/deps/v8/test/inspector/debugger/pause-expected.txt
@@ -19,6 +19,9 @@ Running test: testSkipOtherContext2
paused at:
#var a = 239;
+paused at:
+var a = #239;
+
Running test: testWithNativeBreakpoint
paused at:
diff --git a/deps/v8/test/inspector/debugger/pause-on-oom.js b/deps/v8/test/inspector/debugger/pause-on-oom.js
index e36d251b26..fe5d61b492 100644
--- a/deps/v8/test/inspector/debugger/pause-on-oom.js
+++ b/deps/v8/test/inspector/debugger/pause-on-oom.js
@@ -4,9 +4,9 @@
// Flags: --max-old-space-size=8
-InspectorTest.log('Check pause on OOM');
+let {session, contextGroup, Protocol} = InspectorTest.start('Check pause on OOM');
-InspectorTest.addScript(`
+contextGroup.addScript(`
var arr = [];
var stop = false;
function generateGarbage() {
diff --git a/deps/v8/test/inspector/debugger/pause.js b/deps/v8/test/inspector/debugger/pause.js
index 33f76f9dce..33ebeb830c 100644
--- a/deps/v8/test/inspector/debugger/pause.js
+++ b/deps/v8/test/inspector/debugger/pause.js
@@ -3,87 +3,99 @@
// found in the LICENSE file.
InspectorTest.log('Checks Debugger.pause');
+let contextGroup1 = new InspectorTest.ContextGroup();
+let session1 = contextGroup1.connect();
+let Protocol1 = session1.Protocol;
-InspectorTest.setupScriptMap();
-Protocol.Debugger.enable();
+session1.setupScriptMap();
+Protocol1.Debugger.enable();
InspectorTest.runAsyncTestSuite([
async function testPause() {
- Protocol.Debugger.pause();
- Protocol.Runtime.evaluate({expression: 'var a = 42;'});
- await waitPauseAndDumpLocation();
- await Protocol.Debugger.resume();
+ Protocol1.Debugger.pause();
+ Protocol1.Runtime.evaluate({expression: 'var a = 42;'});
+ await waitPauseAndDumpLocation(session1);
+ await Protocol1.Debugger.resume();
},
async function testSkipFrameworks() {
- Protocol.Debugger.setBlackboxPatterns({patterns: ['framework\.js']});
- Protocol.Debugger.pause();
- Protocol.Runtime.evaluate({expression: 'var a = 42; //# sourceURL=framework.js'});
- Protocol.Runtime.evaluate({expression: 'var a = 239;'});
- await waitPauseAndDumpLocation();
- await Protocol.Debugger.resume();
+ Protocol1.Debugger.setBlackboxPatterns({patterns: ['framework\.js']});
+ Protocol1.Debugger.pause();
+ Protocol1.Runtime.evaluate({expression: 'var a = 42; //# sourceURL=framework.js'});
+ Protocol1.Runtime.evaluate({expression: 'var a = 239;'});
+ await waitPauseAndDumpLocation(session1);
+ await Protocol1.Debugger.resume();
},
async function testSkipOtherContext1() {
- let contextGroupId = utils.createContextGroup();
- Protocol.Debugger.enable({}, contextGroupId);
- Protocol.Debugger.pause();
- Protocol.Runtime.evaluate({expression: 'var a = 42; //# sourceURL=framework.js'});
- Protocol.Runtime.evaluate({expression: 'var a = 239;'}, contextGroupId);
- Protocol.Runtime.evaluate({expression: 'var a = 1;'});
- await waitPauseAndDumpLocation();
- await Protocol.Debugger.resume();
- await Protocol.Debugger.disable({}, contextGroupId);
+ let contextGroup2 = new InspectorTest.ContextGroup();
+ let session2 = contextGroup2.connect();
+ let Protocol2 = session2.Protocol;
+ Protocol2.Debugger.enable({});
+ Protocol1.Debugger.pause();
+ Protocol1.Runtime.evaluate({expression: 'var a = 42; //# sourceURL=framework.js'});
+ Protocol2.Runtime.evaluate({expression: 'var a = 239;'});
+ Protocol1.Runtime.evaluate({expression: 'var a = 1;'});
+ await waitPauseAndDumpLocation(session1);
+ await Protocol1.Debugger.resume();
+ await Protocol2.Debugger.disable({});
},
async function testSkipOtherContext2() {
- let contextGroupId = utils.createContextGroup();
- Protocol.Debugger.enable({}, contextGroupId);
- Protocol.Debugger.pause({}, contextGroupId);
- Protocol.Runtime.evaluate({expression: 'var a = 42; //# sourceURL=framework.js'});
- Protocol.Runtime.evaluate({expression: 'var a = 239;'}, contextGroupId);
- Protocol.Runtime.evaluate({expression: 'var a = 1;'});
- await waitPauseAndDumpLocation();
- await Protocol.Debugger.resume();
- await Protocol.Debugger.disable({}, contextGroupId);
+ let contextGroup2 = new InspectorTest.ContextGroup();
+ let session2 = contextGroup2.connect();
+ let Protocol2 = session2.Protocol;
+ session2.setupScriptMap();
+ Protocol2.Debugger.enable({});
+ Protocol2.Debugger.pause({});
+ Protocol1.Runtime.evaluate({expression: 'var a = 42; //# sourceURL=framework.js'});
+ Protocol2.Runtime.evaluate({expression: 'var a = 239;'});
+ Protocol1.Runtime.evaluate({expression: 'var a = 1;'});
+ await waitPauseAndDumpLocation(session2);
+ // should not resume pause from different context group id.
+ Protocol1.Debugger.resume();
+ Protocol2.Debugger.stepOver({});
+ await waitPauseAndDumpLocation(session2);
+ await Protocol2.Debugger.resume({});
+ await Protocol2.Debugger.disable({});
},
async function testWithNativeBreakpoint() {
- utils.schedulePauseOnNextStatement('', '');
- await Protocol.Debugger.pause();
- utils.cancelPauseOnNextStatement();
- Protocol.Runtime.evaluate({expression: 'var a = 42;'});
- await waitPauseAndDumpLocation();
- await Protocol.Debugger.resume();
+ contextGroup1.schedulePauseOnNextStatement('', '');
+ await Protocol1.Debugger.pause();
+ contextGroup1.cancelPauseOnNextStatement();
+ Protocol1.Runtime.evaluate({expression: 'var a = 42;'});
+ await waitPauseAndDumpLocation(session1);
+ await Protocol1.Debugger.resume();
- await Protocol.Debugger.pause();
- utils.schedulePauseOnNextStatement('', '');
- utils.cancelPauseOnNextStatement();
- Protocol.Runtime.evaluate({expression: 'var a = 42;'});
- await waitPauseAndDumpLocation();
- await Protocol.Debugger.resume();
+ await Protocol1.Debugger.pause();
+ contextGroup1.schedulePauseOnNextStatement('', '');
+ contextGroup1.cancelPauseOnNextStatement();
+ Protocol1.Runtime.evaluate({expression: 'var a = 42;'});
+ await waitPauseAndDumpLocation(session1);
+ await Protocol1.Debugger.resume();
- utils.schedulePauseOnNextStatement('', '');
- utils.cancelPauseOnNextStatement();
- await Protocol.Debugger.pause();
- Protocol.Runtime.evaluate({expression: 'var a = 42;'});
- await waitPauseAndDumpLocation();
- await Protocol.Debugger.resume();
+ contextGroup1.schedulePauseOnNextStatement('', '');
+ contextGroup1.cancelPauseOnNextStatement();
+ await Protocol1.Debugger.pause();
+ Protocol1.Runtime.evaluate({expression: 'var a = 42;'});
+ await waitPauseAndDumpLocation(session1);
+ await Protocol1.Debugger.resume();
},
async function testDisableBreaksShouldCancelPause() {
- await Protocol.Debugger.pause();
- await Protocol.Debugger.setBreakpointsActive({active: false});
- Protocol.Runtime.evaluate({expression: 'var a = 42;'})
- .then(() => Protocol.Debugger.setBreakpointsActive({active: true}))
- .then(() => Protocol.Runtime.evaluate({expression: 'debugger'}));
- await waitPauseAndDumpLocation();
- await Protocol.Debugger.resume();
+ await Protocol1.Debugger.pause();
+ await Protocol1.Debugger.setBreakpointsActive({active: false});
+ Protocol1.Runtime.evaluate({expression: 'var a = 42;'})
+ .then(() => Protocol1.Debugger.setBreakpointsActive({active: true}))
+ .then(() => Protocol1.Runtime.evaluate({expression: 'debugger'}));
+ await waitPauseAndDumpLocation(session1);
+ await Protocol1.Debugger.resume();
}
]);
-async function waitPauseAndDumpLocation() {
- var message = await Protocol.Debugger.oncePaused();
+async function waitPauseAndDumpLocation(session) {
+ var message = await session.Protocol.Debugger.oncePaused();
InspectorTest.log('paused at:');
- await InspectorTest.logSourceLocation(message.params.callFrames[0].location);
+ await session.logSourceLocation(message.params.callFrames[0].location);
return message;
}
diff --git a/deps/v8/test/inspector/debugger/promise-chain-when-limit-hit-expected.txt b/deps/v8/test/inspector/debugger/promise-chain-when-limit-hit-expected.txt
new file mode 100644
index 0000000000..57357ab15a
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/promise-chain-when-limit-hit-expected.txt
@@ -0,0 +1,235 @@
+Tests how async promise chains behave when reaching the limit of stacks
+Checks correctness of promise chains when limit hit
+inspector.setMaxAsyncTaskStacks(3)
+Run expression 'console.trace()' with async chain len: 3
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : console.trace
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 67
+ functionName : Promise.resolve.then.then.then
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ parent : {
+ callFrames : [
+ [0] : {
+ columnNumber : 46
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ }
+ }
+ timestamp : <timestamp>
+ type : trace
+ }
+}
+inspector.setMaxAsyncTaskStacks(4)
+Run expression 'console.trace()' with async chain len: 3
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : console.trace
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 67
+ functionName : Promise.resolve.then.then.then
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ parent : {
+ callFrames : [
+ [0] : {
+ columnNumber : 46
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ description : Promise.resolve
+ }
+ }
+ timestamp : <timestamp>
+ type : trace
+ }
+}
+inspector.setMaxAsyncTaskStacks(5)
+Run expression 'console.trace()' with async chain len: 3
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : console.trace
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 67
+ functionName : Promise.resolve.then.then.then
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ parent : {
+ callFrames : [
+ [0] : {
+ columnNumber : 46
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ description : Promise.resolve
+ }
+ }
+ timestamp : <timestamp>
+ type : trace
+ }
+}
+inspector.setMaxAsyncTaskStacks(6)
+Run expression 'console.trace()' with async chain len: 3
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : console.trace
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 67
+ functionName : Promise.resolve.then.then.then
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ parent : {
+ callFrames : [
+ [0] : {
+ columnNumber : 46
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ description : Promise.resolve
+ }
+ }
+ timestamp : <timestamp>
+ type : trace
+ }
+}
+inspector.setMaxAsyncTaskStacks(7)
+Run expression 'console.trace()' with async chain len: 3
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : console.trace
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 67
+ functionName : Promise.resolve.then.then.then
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ parent : {
+ callFrames : [
+ [0] : {
+ columnNumber : 46
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ description : Promise.resolve
+ }
+ }
+ timestamp : <timestamp>
+ type : trace
+ }
+}
+inspector.setMaxAsyncTaskStacks(8)
+Run expression 'console.trace()' with async chain len: 3
+{
+ method : Runtime.consoleAPICalled
+ params : {
+ args : [
+ [0] : {
+ type : string
+ value : console.trace
+ }
+ ]
+ executionContextId : <executionContextId>
+ stackTrace : {
+ callFrames : [
+ [0] : {
+ columnNumber : 67
+ functionName : Promise.resolve.then.then.then
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ parent : {
+ callFrames : [
+ [0] : {
+ columnNumber : 46
+ functionName :
+ lineNumber : 0
+ scriptId : <scriptId>
+ url :
+ }
+ ]
+ description : Promise.resolve
+ }
+ }
+ timestamp : <timestamp>
+ type : trace
+ }
+}
diff --git a/deps/v8/test/inspector/debugger/promise-chain-when-limit-hit.js b/deps/v8/test/inspector/debugger/promise-chain-when-limit-hit.js
new file mode 100644
index 0000000000..072af732c4
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/promise-chain-when-limit-hit.js
@@ -0,0 +1,54 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TODO(kozyatinskiy): fix or remove it later.
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests how async promise chains behave when reaching the limit of stacks');
+
+(async function test(){
+ InspectorTest.log('Checks correctness of promise chains when limit hit');
+ await Protocol.Runtime.enable();
+ await Protocol.Debugger.enable();
+ Protocol.Debugger.setAsyncCallStackDepth({maxDepth: 128});
+
+ await setMaxAsyncTaskStacks(3);
+ runWithAsyncChainPromise(3, 'console.trace()');
+ InspectorTest.logMessage(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(4);
+ runWithAsyncChainPromise(3, 'console.trace()');
+ InspectorTest.logMessage(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(5);
+ runWithAsyncChainPromise(3, 'console.trace()');
+ InspectorTest.logMessage(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(6);
+ runWithAsyncChainPromise(3, 'console.trace()');
+ InspectorTest.logMessage(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(7);
+ runWithAsyncChainPromise(3, 'console.trace()');
+ InspectorTest.logMessage(await Protocol.Runtime.onceConsoleAPICalled());
+
+ await setMaxAsyncTaskStacks(8);
+ runWithAsyncChainPromise(3, 'console.trace()');
+ InspectorTest.logMessage(await Protocol.Runtime.onceConsoleAPICalled());
+
+ InspectorTest.completeTest();
+})();
+
+function runWithAsyncChainPromise(len, source) {
+ InspectorTest.log(`Run expression '${source}' with async chain len: ${len}`);
+ let then = '.then(() => 1)';
+ let pause = `.then(() => { ${source} })`;
+ Protocol.Runtime.evaluate({
+ expression: `Promise.resolve()${then.repeat(len - 1)}${pause}`
+ });
+}
+
+async function setMaxAsyncTaskStacks(max) {
+ let expression = `inspector.setMaxAsyncTaskStacks(${max})`;
+ InspectorTest.log(expression);
+ await Protocol.Runtime.evaluate({expression});
+}
diff --git a/deps/v8/test/inspector/debugger/protocol-string-to-double-locale-expected.txt b/deps/v8/test/inspector/debugger/protocol-string-to-double-locale-expected.txt
new file mode 100644
index 0000000000..52919c1d18
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/protocol-string-to-double-locale-expected.txt
@@ -0,0 +1,8 @@
+Tests that double numbers are parsed and serialized correctly on different locales
+This test verifies that we correctly parse doubles with non-US locale
+{
+ a : 0.5
+}
+{
+ a : 1
+}
diff --git a/deps/v8/test/inspector/debugger/protocol-string-to-double-locale.js b/deps/v8/test/inspector/debugger/protocol-string-to-double-locale.js
new file mode 100644
index 0000000000..89b6d826ff
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/protocol-string-to-double-locale.js
@@ -0,0 +1,24 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests that double numbers are parsed and serialized correctly on different locales');
+
+(async function() {
+ InspectorTest.log('This test verifies that we correctly parse doubles with non-US locale');
+ utils.setlocale("fr_CA.UTF-8");
+ Protocol.Debugger.enable();
+ Protocol.Runtime.evaluate({
+ expression: 'inspector.breakProgram(\'\', JSON.stringify({a: 0.5}))'});
+ let message = await Protocol.Debugger.oncePaused();
+ InspectorTest.logObject(message.params.data || {});
+ Protocol.Debugger.resume();
+
+ Protocol.Runtime.evaluate({
+ expression: 'inspector.breakProgram(\'\', JSON.stringify({a: 1}))'});
+ message = await Protocol.Debugger.oncePaused();
+ InspectorTest.logObject(message.params.data || {});
+ Protocol.Debugger.resume();
+
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/debugger/restore-breakpoint.js b/deps/v8/test/inspector/debugger/restore-breakpoint.js
index 3a10f5378e..e0d2b84766 100644
--- a/deps/v8/test/inspector/debugger/restore-breakpoint.js
+++ b/deps/v8/test/inspector/debugger/restore-breakpoint.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks that debugger agent uses source content to restore breakpoints.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks that debugger agent uses source content to restore breakpoints.');
Protocol.Debugger.enable();
InspectorTest.runTestSuite([
diff --git a/deps/v8/test/inspector/debugger/return-break-locations.js b/deps/v8/test/inspector/debugger/return-break-locations.js
index 73e0416b0d..b253b88417 100644
--- a/deps/v8/test/inspector/debugger/return-break-locations.js
+++ b/deps/v8/test/inspector/debugger/return-break-locations.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Return break locations within function');
+let {session, contextGroup, Protocol} = InspectorTest.start('Return break locations within function');
-InspectorTest.addScript(`
+contextGroup.addScript(`
function fib(x) {
if (x < 0) return;
if (x === 0) return 1;
diff --git a/deps/v8/test/inspector/debugger/schedule-step-into-async-set-timeout.js b/deps/v8/test/inspector/debugger/schedule-step-into-async-set-timeout.js
index f6ffe6e0a7..f2171a5037 100644
--- a/deps/v8/test/inspector/debugger/schedule-step-into-async-set-timeout.js
+++ b/deps/v8/test/inspector/debugger/schedule-step-into-async-set-timeout.js
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks Debugger.scheduleStepIntoAsync with setTimeout.');
-InspectorTest.setupScriptMap();
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks Debugger.scheduleStepIntoAsync with setTimeout.');
+session.setupScriptMap();
Protocol.Debugger.enable();
InspectorTest.runAsyncTestSuite([
async function testSetTimeout() {
@@ -42,7 +42,7 @@ InspectorTest.runAsyncTestSuite([
Protocol.Debugger.stepOver();
await waitPauseAndDumpLocation();
await Protocol.Debugger.resume();
- await InspectorTest.waitPendingTasks();
+ await InspectorTest.waitForPendingTasks();
},
async function testSetTimeoutWithoutJS() {
@@ -70,6 +70,6 @@ InspectorTest.runAsyncTestSuite([
async function waitPauseAndDumpLocation() {
var message = await Protocol.Debugger.oncePaused();
InspectorTest.log('paused at:');
- await InspectorTest.logSourceLocation(message.params.callFrames[0].location);
+ await session.logSourceLocation(message.params.callFrames[0].location);
return message;
}
diff --git a/deps/v8/test/inspector/debugger/schedule-step-into-async.js b/deps/v8/test/inspector/debugger/schedule-step-into-async.js
index 1556e8a55a..c4dfb73992 100644
--- a/deps/v8/test/inspector/debugger/schedule-step-into-async.js
+++ b/deps/v8/test/inspector/debugger/schedule-step-into-async.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks Debugger.scheduleStepIntoAsync.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks Debugger.scheduleStepIntoAsync.');
-InspectorTest.addScript(`
+contextGroup.addScript(`
function testNoScheduledTask() {
debugger;
return 42;
@@ -47,7 +47,7 @@ function testBlackboxedCreatePromise() {
}
//# sourceURL=test.js`);
-InspectorTest.addScript(`
+contextGroup.addScript(`
function createPromise() {
return Promise.resolve().then(v => v * 3).then(v => v * 4);
@@ -55,7 +55,7 @@ function createPromise() {
//# sourceURL=framework.js`)
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
Protocol.Debugger.enable();
InspectorTest.runAsyncTestSuite([
@@ -154,6 +154,6 @@ InspectorTest.runAsyncTestSuite([
async function waitPauseAndDumpLocation() {
var message = await Protocol.Debugger.oncePaused();
InspectorTest.log('paused at:');
- InspectorTest.logSourceLocation(message.params.callFrames[0].location);
+ session.logSourceLocation(message.params.callFrames[0].location);
return message;
}
diff --git a/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name-expected.txt b/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name-expected.txt
index b3dce305d8..626f9787c3 100644
--- a/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name-expected.txt
+++ b/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name-expected.txt
@@ -1,3 +1,4 @@
+Tests that scopes do not report variables with empty names
{
id : <messageId>
result : {
@@ -16,4 +17,4 @@
}
]
}
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name.js b/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name.js
index e2b38d8ec9..72cbeeefcb 100644
--- a/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name.js
+++ b/deps/v8/test/inspector/debugger/scope-skip-variables-with-empty-name.js
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.addScript(
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests that scopes do not report variables with empty names');
+
+contextGroup.addScript(
`function testFunction()
{
for (var a of [1]) {
diff --git a/deps/v8/test/inspector/debugger/script-end-location.js b/deps/v8/test/inspector/debugger/script-end-location.js
index cdfff8cda6..57d12350e0 100644
--- a/deps/v8/test/inspector/debugger/script-end-location.js
+++ b/deps/v8/test/inspector/debugger/script-end-location.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks that we report correct endLine, endColumn and source for scripts.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks that we report correct endLine, endColumn and source for scripts.');
var sources = [
'',
@@ -27,7 +27,7 @@ var sources = [
(async function test() {
Protocol.Debugger.enable();
for (let source of sources) {
- InspectorTest.addScript(source);
+ contextGroup.addScript(source);
var message = await Protocol.Debugger.onceScriptParsed();
var inspectorSource = (await Protocol.Debugger.getScriptSource({ scriptId: message.params.scriptId })).result.scriptSource;
var lines = source.split('\n');
diff --git a/deps/v8/test/inspector/debugger/script-on-after-compile.js b/deps/v8/test/inspector/debugger/script-on-after-compile.js
index 6a5a55b5ff..544dbaaae2 100644
--- a/deps/v8/test/inspector/debugger/script-on-after-compile.js
+++ b/deps/v8/test/inspector/debugger/script-on-after-compile.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Checks that inspector correctly process compiled scripts");
+let {session, contextGroup, Protocol} = InspectorTest.start("Checks that inspector correctly process compiled scripts");
function addScripts() {
// sourceURL in the same line
diff --git a/deps/v8/test/inspector/debugger/script-parsed-for-runtime-evaluate.js b/deps/v8/test/inspector/debugger/script-parsed-for-runtime-evaluate.js
index 300821baa3..bebf64bddc 100644
--- a/deps/v8/test/inspector/debugger/script-parsed-for-runtime-evaluate.js
+++ b/deps/v8/test/inspector/debugger/script-parsed-for-runtime-evaluate.js
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Checks that inspector reports script compiled in Runtime.evaluate, " +
+let {session, contextGroup, Protocol} = InspectorTest.start("Checks that inspector reports script compiled in Runtime.evaluate, " +
"Runtime.callFunctionOn and Runtime.compileScript");
-InspectorTest.addScript(`
+contextGroup.addScript(`
function fooTop() {
eval(\`
function foo() {
@@ -15,7 +15,7 @@ function fooTop() {
}
//# sourceURL=top-frame.js`, 8, 26);
-InspectorTest.addScript(`
+contextGroup.addScript(`
function fooTopFail() {
eval(\`
function fooFail() {
diff --git a/deps/v8/test/inspector/debugger/script-parsed-hash-expected.txt b/deps/v8/test/inspector/debugger/script-parsed-hash-expected.txt
index 20fdb859fd..8836266f77 100644
--- a/deps/v8/test/inspector/debugger/script-parsed-hash-expected.txt
+++ b/deps/v8/test/inspector/debugger/script-parsed-hash-expected.txt
@@ -1,3 +1,4 @@
+Tests scripts hasing
Hash received: 1C6D2E82E4E4F1BA4CB5762843D429DC872EBA18
Hash received: EBF1ECD351E7A3294CB5762843D429DC872EBA18
-Hash received: 86A31E7131896CF01BA837945C2894385F369F24 \ No newline at end of file
+Hash received: 86A31E7131896CF01BA837945C2894385F369F24
diff --git a/deps/v8/test/inspector/debugger/script-parsed-hash.js b/deps/v8/test/inspector/debugger/script-parsed-hash.js
index 5dd1dfacee..15c82ad7a8 100644
--- a/deps/v8/test/inspector/debugger/script-parsed-hash.js
+++ b/deps/v8/test/inspector/debugger/script-parsed-hash.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests scripts hasing');
+
var hashes = new Set(["1C6D2E82E4E4F1BA4CB5762843D429DC872EBA18",
"EBF1ECD351E7A3294CB5762843D429DC872EBA18",
"86A31E7131896CF01BA837945C2894385F369F24"]);
diff --git a/deps/v8/test/inspector/debugger/script-with-negative-offset-expected.txt b/deps/v8/test/inspector/debugger/script-with-negative-offset-expected.txt
new file mode 100644
index 0000000000..ebdf8f3ee1
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/script-with-negative-offset-expected.txt
@@ -0,0 +1,19 @@
+Locations in script with negative offset.
+[
+ [0] : {
+ columnNumber : 16
+ lineNumber : 0
+ scriptId : <scriptId>
+ type : debuggerStatement
+ }
+ [1] : {
+ columnNumber : 26
+ lineNumber : 0
+ scriptId : <scriptId>
+ type : return
+ }
+]
+foo (:-1:16)
+(anonymous) (:0:0)
+boo (:0:16)
+(anonymous) (:0:0)
diff --git a/deps/v8/test/inspector/debugger/script-with-negative-offset.js b/deps/v8/test/inspector/debugger/script-with-negative-offset.js
new file mode 100644
index 0000000000..fbc0b34302
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/script-with-negative-offset.js
@@ -0,0 +1,31 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} =
+ InspectorTest.start('Locations in script with negative offset.');
+
+(async function test() {
+ contextGroup.addScript(`function foo() { debugger; }
+function boo(){ debugger; }
+`, -1, -1);
+ session.setupScriptMap();
+ Protocol.Debugger.enable();
+ let {params:{scriptId}} = await Protocol.Debugger.onceScriptParsed();
+ let {result:{locations}} = await Protocol.Debugger.getPossibleBreakpoints({
+ start: {scriptId, lineNumber: 0, columnNumber: 0}
+ });
+ InspectorTest.logMessage(locations);
+
+ Protocol.Runtime.evaluate({expression: 'foo()'});
+ var {params:{callFrames}} = await Protocol.Debugger.oncePaused();
+ session.logCallFrames(callFrames);
+ await Protocol.Debugger.resume();
+
+ Protocol.Runtime.evaluate({expression: 'boo()'});
+ var {params:{callFrames}} = await Protocol.Debugger.oncePaused();
+ session.logCallFrames(callFrames);
+ await Protocol.Debugger.resume();
+
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/debugger/set-async-call-stack-depth-expected.txt b/deps/v8/test/inspector/debugger/set-async-call-stack-depth-expected.txt
new file mode 100644
index 0000000000..98fccebe68
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-async-call-stack-depth-expected.txt
@@ -0,0 +1,37 @@
+Checks that we report not more then maxDepth call chains.
+
+Running test: testPaused
+Actual call chain length: 8
+setAsyncCallStackDepth(maxDepth): 16
+reported: 1
+
+Actual call chain length: 8
+setAsyncCallStackDepth(maxDepth): 8
+reported: 1
+
+Actual call chain length: 8
+setAsyncCallStackDepth(maxDepth): 7
+reported: 1
+
+Actual call chain length: 8
+setAsyncCallStackDepth(maxDepth): 0
+reported: 0
+
+
+Running test: testConsoleTrace
+Actual call chain length: 8
+setAsyncCallStackDepth(maxDepth): 16
+reported: 1
+
+Actual call chain length: 8
+setAsyncCallStackDepth(maxDepth): 8
+reported: 1
+
+Actual call chain length: 8
+setAsyncCallStackDepth(maxDepth): 7
+reported: 1
+
+Actual call chain length: 8
+setAsyncCallStackDepth(maxDepth): 0
+reported: 0
+
diff --git a/deps/v8/test/inspector/debugger/set-async-call-stack-depth.js b/deps/v8/test/inspector/debugger/set-async-call-stack-depth.js
new file mode 100644
index 0000000000..0c7567f499
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/set-async-call-stack-depth.js
@@ -0,0 +1,79 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TODO(kozyatinskiy): fix or remove it later with new stack traces it's almost
+// imposible to hit limit.
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks that we report not more then maxDepth call chains.');
+
+contextGroup.addScript(`
+function promisesChain(num) {
+ var p = Promise.resolve();
+ for (var i = 0; i < num - 1; ++i) {
+ p = p.then(() => 42);
+ }
+ return p;
+}
+`);
+
+Protocol.Debugger.enable();
+InspectorTest.runAsyncTestSuite([
+ async function testPaused() {
+ let callback = '() => { debugger; }';
+ startTest({ generated: 8, limit: 16, callback});
+ dumpCaptured((await Protocol.Debugger.oncePaused()).params.asyncStackTrace);
+ await Protocol.Debugger.resume();
+
+ startTest({ generated: 8, limit: 8, callback});
+ dumpCaptured((await Protocol.Debugger.oncePaused()).params.asyncStackTrace);
+ await Protocol.Debugger.resume();
+
+ startTest({ generated: 8, limit: 7, callback});
+ dumpCaptured((await Protocol.Debugger.oncePaused()).params.asyncStackTrace);
+ await Protocol.Debugger.resume();
+
+ startTest({ generated: 8, limit: 0, callback});
+ dumpCaptured((await Protocol.Debugger.oncePaused()).params.asyncStackTrace);
+ await Protocol.Debugger.resume();
+ },
+
+ async function testConsoleTrace() {
+ await Protocol.Runtime.enable();
+ let callback = '() => { console.trace(42); }';
+ startTest({ generated: 8, limit: 16, callback});
+ let msg = await Protocol.Runtime.onceConsoleAPICalled();
+ dumpCaptured(msg.params.stackTrace.parent);
+
+ startTest({ generated: 8, limit: 8, callback});
+ msg = await Protocol.Runtime.onceConsoleAPICalled();
+ dumpCaptured(msg.params.stackTrace.parent);
+
+ startTest({ generated: 8, limit: 7, callback});
+ msg = await Protocol.Runtime.onceConsoleAPICalled();
+ dumpCaptured(msg.params.stackTrace.parent);
+
+ startTest({ generated: 8, limit: 0, callback});
+ msg = await Protocol.Runtime.onceConsoleAPICalled();
+ dumpCaptured(msg.params.stackTrace.parent);
+
+ await Protocol.Runtime.disable();
+ }
+]);
+
+function startTest(params) {
+ InspectorTest.log('Actual call chain length: ' + params.generated);
+ InspectorTest.log('setAsyncCallStackDepth(maxDepth): ' + params.limit);
+
+ Protocol.Debugger.setAsyncCallStackDepth({maxDepth: params.limit});
+ Protocol.Runtime.evaluate({expression:
+ `promisesChain(${params.generated}).then(${params.callback})`});
+}
+
+function dumpCaptured(stack) {
+ let count = 0;
+ while (stack) {
+ ++count;
+ stack = stack.parent;
+ }
+ InspectorTest.log('reported: ' + count + '\n');
+}
diff --git a/deps/v8/test/inspector/debugger/set-blackbox-patterns-expected.txt b/deps/v8/test/inspector/debugger/set-blackbox-patterns-expected.txt
index fb54163107..bb3055a62d 100644
--- a/deps/v8/test/inspector/debugger/set-blackbox-patterns-expected.txt
+++ b/deps/v8/test/inspector/debugger/set-blackbox-patterns-expected.txt
@@ -1,3 +1,4 @@
+Tests blackboxing by patterns
Pattern parser error: Uncaught SyntaxError: Invalid regular expression: /(foo([)/: Unterminated character class
Paused in
(...):1
diff --git a/deps/v8/test/inspector/debugger/set-blackbox-patterns.js b/deps/v8/test/inspector/debugger/set-blackbox-patterns.js
index 12e9e214d3..d060c90a9d 100644
--- a/deps/v8/test/inspector/debugger/set-blackbox-patterns.js
+++ b/deps/v8/test/inspector/debugger/set-blackbox-patterns.js
@@ -2,13 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.addScript(
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests blackboxing by patterns');
+
+contextGroup.addScript(
`function bar()
{
return 42;
}`);
-InspectorTest.addScript(
+contextGroup.addScript(
`function foo()
{
var a = bar();
@@ -16,7 +18,7 @@ InspectorTest.addScript(
}
//# sourceURL=foo.js`);
-InspectorTest.addScript(
+contextGroup.addScript(
`function qwe()
{
var a = foo();
@@ -24,7 +26,7 @@ InspectorTest.addScript(
}
//# sourceURL=qwe.js`);
-InspectorTest.addScript(
+contextGroup.addScript(
`function baz()
{
var a = qwe();
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt b/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt
index e4fdd95d5f..26017349ef 100644
--- a/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling-expected.txt
@@ -1,3 +1,4 @@
+Tests that setting breakpoint before enabling debugger produces an error
setBreakpointByUrl error: undefined
setBreakpoint error: {
"code": -32602,
diff --git a/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling.js b/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling.js
index 8480aa6f75..84541be37d 100644
--- a/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling.js
+++ b/deps/v8/test/inspector/debugger/set-breakpoint-before-enabling.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests that setting breakpoint before enabling debugger produces an error');
+
Protocol.Debugger.setBreakpointByUrl({ url: "http://example.com", lineNumber: 10 }).then(didSetBreakpointByUrlBeforeEnable);
function didSetBreakpointByUrlBeforeEnable(message)
diff --git a/deps/v8/test/inspector/debugger/set-script-source-exception.js b/deps/v8/test/inspector/debugger/set-script-source-exception.js
index d3082789f2..627aa7ec1e 100644
--- a/deps/v8/test/inspector/debugger/set-script-source-exception.js
+++ b/deps/v8/test/inspector/debugger/set-script-source-exception.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Check that setScriptSource completes correctly when an exception is thrown.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Check that setScriptSource completes correctly when an exception is thrown.');
Protocol.Debugger.enable();
@@ -18,6 +18,6 @@ InspectorTest.runTestSuite([
.then(message => Protocol.Debugger.setScriptSource({ scriptId: message.params.scriptId, scriptSource: 'a # b' }))
.then(InspectorTest.logMessage)
.then(next);
- InspectorTest.addScript('function foo() {}');
+ contextGroup.addScript('function foo() {}');
}
]);
diff --git a/deps/v8/test/inspector/debugger/set-script-source-expected.txt b/deps/v8/test/inspector/debugger/set-script-source-expected.txt
index 1b76ec5f95..e77aafd690 100644
--- a/deps/v8/test/inspector/debugger/set-script-source-expected.txt
+++ b/deps/v8/test/inspector/debugger/set-script-source-expected.txt
@@ -1,3 +1,4 @@
+Tests Debugger.setScriptSource
Function evaluate: {"type":"number","value":6,"description":"6"}
PASS, result value: 6
Function evaluate: {"type":"number","value":8,"description":"8"}
@@ -5,4 +6,3 @@ PASS, result value: 8
Has error reported: PASS
Reported error is a compile error: PASS
PASS, result value: 1
-
diff --git a/deps/v8/test/inspector/debugger/set-script-source.js b/deps/v8/test/inspector/debugger/set-script-source.js
index 36944cca6c..f13a3a39bb 100644
--- a/deps/v8/test/inspector/debugger/set-script-source.js
+++ b/deps/v8/test/inspector/debugger/set-script-source.js
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.addScript(
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests Debugger.setScriptSource');
+
+contextGroup.addScript(
`function TestExpression(a, b) {
return a + b;
}`);
diff --git a/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate-expected.txt b/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate-expected.txt
index d77870e737..6d113861dd 100644
--- a/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate-expected.txt
+++ b/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate-expected.txt
@@ -1,3 +1,4 @@
+Tests side-effect-free evaluation
Paused on 'debugger;'
f() returns 1
g() returns 2
diff --git a/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate.js b/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate.js
index 705901ad78..34e3a25981 100644
--- a/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate.js
+++ b/deps/v8/test/inspector/debugger/side-effect-free-debug-evaluate.js
@@ -3,8 +3,9 @@
// found in the LICENSE file.
// Flags: --ignition --turbo
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests side-effect-free evaluation');
-InspectorTest.addScript(`
+contextGroup.addScript(`
function testFunction()
{
var o = 0;
diff --git a/deps/v8/test/inspector/debugger/step-into-expected.txt b/deps/v8/test/inspector/debugger/step-into-expected.txt
index b912b82fa2..8be36948b2 100644
--- a/deps/v8/test/inspector/debugger/step-into-expected.txt
+++ b/deps/v8/test/inspector/debugger/step-into-expected.txt
@@ -747,16 +747,6 @@ break at:
Running test: testClasses
break at:
-function testClasses() {
- #class Cat {
- constructor(name) {
-
-break at:
- }
- #class Lion extends Cat {
- constructor(name) {
-
-break at:
}
#new Lion().speak();
}
@@ -810,11 +800,21 @@ break at:
Running test: testAsyncAwait
break at:
async function testAsyncAwait() {
+ #await asyncFoo();
+ await awaitBoo();
+
+break at:
+async function testAsyncAwait() {
await #asyncFoo();
await awaitBoo();
break at:
async function asyncFoo() {
+ #await Promise.resolve().then(v => v * 2);
+ return42();
+
+break at:
+async function asyncFoo() {
await Promise.resolve().#then(v => v * 2);
return42();
@@ -845,6 +845,11 @@ break at:
break at:
async function asyncBoo() {
+ #await Promise.resolve();
+}
+
+break at:
+async function asyncBoo() {
await Promise.#resolve();
}
@@ -882,6 +887,11 @@ break at:
break at:
setTimeout(returnCall, 0);
+ #await foo();
+ await foo();
+
+break at:
+ setTimeout(returnCall, 0);
await #foo();
await foo();
@@ -907,6 +917,11 @@ break at:
break at:
setTimeout(resolveNested, 0);
+ #await p;
+ }
+
+break at:
+ setTimeout(resolveNested, 0);
await #p;
}
@@ -937,6 +952,11 @@ break at:
break at:
setTimeout(resolveNested, 0);
+ #await p;
+ }
+
+break at:
+ setTimeout(resolveNested, 0);
await #p;
}
@@ -1064,6 +1084,11 @@ break at:
break at:
async function foo() {
+ #await Promise.resolve();
+ return 42;
+
+break at:
+ async function foo() {
await Promise.#resolve();
return 42;
diff --git a/deps/v8/test/inspector/debugger/step-into-nested-arrow.js b/deps/v8/test/inspector/debugger/step-into-nested-arrow.js
index 0b0307a5e6..305796754a 100644
--- a/deps/v8/test/inspector/debugger/step-into-nested-arrow.js
+++ b/deps/v8/test/inspector/debugger/step-into-nested-arrow.js
@@ -2,18 +2,18 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log(
+let {session, contextGroup, Protocol} = InspectorTest.start(
'Checks that stepInto nested arrow function doesn\'t produce crash.');
-InspectorTest.setupScriptMap();
-InspectorTest.addScript(`
+session.setupScriptMap();
+contextGroup.addScript(`
const rec = (x) => (y) =>
rec();
//# sourceURL=test.js`);
Protocol.Debugger.onPaused(message => {
InspectorTest.log("paused");
- InspectorTest.logCallFrames(message.params.callFrames);
+ session.logCallFrames(message.params.callFrames);
Protocol.Debugger.stepInto();
})
diff --git a/deps/v8/test/inspector/debugger/step-into-next-script.js b/deps/v8/test/inspector/debugger/step-into-next-script.js
index 37436be1d5..80e9a9180f 100644
--- a/deps/v8/test/inspector/debugger/step-into-next-script.js
+++ b/deps/v8/test/inspector/debugger/step-into-next-script.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Debugger breaks in next script after stepOut from previous one.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Debugger breaks in next script after stepOut from previous one.');
-InspectorTest.addScript(`
+contextGroup.addScript(`
function test() {
setTimeout('var a = 1;//# sourceURL=timeout1.js', 0);
setTimeout(foo, 0);
@@ -13,16 +13,16 @@ function test() {
}
//# sourceURL=foo.js`, 7, 26);
-InspectorTest.addScript(`
+contextGroup.addScript(`
function foo() {
return 42;
}
//# sourceURL=timeout2.js`)
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
var stepAction;
Protocol.Debugger.onPaused(message => {
- InspectorTest.logCallFrames(message.params.callFrames);
+ session.logCallFrames(message.params.callFrames);
InspectorTest.log('');
Protocol.Debugger[stepAction]();
});
@@ -31,21 +31,21 @@ InspectorTest.runTestSuite([
function testStepOut(next) {
stepAction = 'stepOut';
Protocol.Runtime.evaluate({ expression: 'test()' })
- .then(() => InspectorTest.waitPendingTasks())
+ .then(() => InspectorTest.waitForPendingTasks())
.then(next);
},
function testStepOver(next) {
stepAction = 'stepOver';
Protocol.Runtime.evaluate({ expression: 'test()' })
- .then(() => InspectorTest.waitPendingTasks())
+ .then(() => InspectorTest.waitForPendingTasks())
.then(next);
},
function testStepInto(next) {
stepAction = 'stepInto';
Protocol.Runtime.evaluate({ expression: 'test()' })
- .then(() => InspectorTest.waitPendingTasks())
+ .then(() => InspectorTest.waitForPendingTasks())
.then(next);
}
]);
diff --git a/deps/v8/test/inspector/debugger/step-into.js b/deps/v8/test/inspector/debugger/step-into.js
index e08707c9fc..b0b83b3d59 100644
--- a/deps/v8/test/inspector/debugger/step-into.js
+++ b/deps/v8/test/inspector/debugger/step-into.js
@@ -4,9 +4,9 @@
// Flags: --turbo
-InspectorTest.log('Checks possible break locations.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks possible break locations.');
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
Protocol.Debugger.onPaused(message => {
var frames = message.params.callFrames;
if (frames.length === 1) {
@@ -15,11 +15,11 @@ Protocol.Debugger.onPaused(message => {
}
var scriptId = frames[0].location.scriptId;
InspectorTest.log('break at:');
- InspectorTest.logSourceLocation(frames[0].location)
+ session.logSourceLocation(frames[0].location)
.then(() => Protocol.Debugger.stepInto());
});
-InspectorTest.loadScript('test/inspector/debugger/resources/break-locations.js');
+contextGroup.loadScript('test/inspector/debugger/resources/break-locations.js');
Protocol.Debugger.enable();
Protocol.Runtime.evaluate({ expression: 'Object.keys(this).filter(name => name.indexOf(\'test\') === 0)', returnByValue: true })
diff --git a/deps/v8/test/inspector/debugger/step-out-async-await.js b/deps/v8/test/inspector/debugger/step-out-async-await.js
index 3b249dc7f3..ff83b82e78 100644
--- a/deps/v8/test/inspector/debugger/step-out-async-await.js
+++ b/deps/v8/test/inspector/debugger/step-out-async-await.js
@@ -6,9 +6,9 @@
// of async generator we should break at next instruction of resumed generator
// instead of next scheduled microtask.
-InspectorTest.log('StepOut from return position of async function.');
+let {session, contextGroup, Protocol} = InspectorTest.start('StepOut from return position of async function.');
-InspectorTest.addScript(`
+contextGroup.addScript(`
async function testFunction() {
async function foo() {
var p = Promise.resolve();
@@ -21,7 +21,7 @@ InspectorTest.addScript(`
}
`);
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
Protocol.Debugger.enable();
InspectorTest.runAsyncTestSuite([
async function testStepInto() {
@@ -68,5 +68,5 @@ InspectorTest.runAsyncTestSuite([
]);
function logPauseLocation(message) {
- return InspectorTest.logSourceLocation(message.params.callFrames[0].location);
+ return session.logSourceLocation(message.params.callFrames[0].location);
}
diff --git a/deps/v8/test/inspector/debugger/step-over-another-context-group.js b/deps/v8/test/inspector/debugger/step-over-another-context-group.js
index c860ef9f46..758ff84697 100644
--- a/deps/v8/test/inspector/debugger/step-over-another-context-group.js
+++ b/deps/v8/test/inspector/debugger/step-over-another-context-group.js
@@ -4,27 +4,33 @@
InspectorTest.log('Checks stepping with more then one context group.');
+var contextGroup1 = new InspectorTest.ContextGroup();
+var session1 = contextGroup1.connect();
+session1.setupScriptMap();
+
+let contextGroup2 = new InspectorTest.ContextGroup();
+let session2 = contextGroup2.connect();
+session2.setupScriptMap();
+
(async function test() {
- InspectorTest.setupScriptMap();
- await Protocol.Debugger.enable();
- let contextGroupId = utils.createContextGroup();
- await Protocol.Debugger.enable({}, contextGroupId);
- Protocol.Runtime.evaluate({expression: 'debugger'});
- Protocol.Runtime.evaluate({expression: 'setTimeout(() => { debugger }, 0)'}, contextGroupId);
- Protocol.Runtime.evaluate({expression: 'setTimeout(() => 42, 0)'});
- await waitPauseAndDumpLocation();
- Protocol.Debugger.stepOver();
- await Protocol.Debugger.oncePaused();
- Protocol.Debugger.stepOver();
- await waitPauseAndDumpLocation();
- await Protocol.Debugger.disable({}, contextGroupId);
- await Protocol.Debugger.disable();
+ await session1.Protocol.Debugger.enable();
+ await session2.Protocol.Debugger.enable({});
+ session1.Protocol.Runtime.evaluate({expression: 'debugger'});
+ session2.Protocol.Runtime.evaluate({expression: 'setTimeout(() => { debugger }, 0)'});
+ session1.Protocol.Runtime.evaluate({expression: 'setTimeout(() => 42, 0)'});
+ await waitPauseAndDumpLocation(session1);
+ session1.Protocol.Debugger.stepOver();
+ await session1.Protocol.Debugger.oncePaused();
+ session1.Protocol.Debugger.stepOver();
+ await waitPauseAndDumpLocation(session1);
+ await session2.Protocol.Debugger.disable({});
+ await session1.Protocol.Debugger.disable();
InspectorTest.completeTest();
})();
-async function waitPauseAndDumpLocation() {
- var message = await Protocol.Debugger.oncePaused();
+async function waitPauseAndDumpLocation(session) {
+ var message = await session.Protocol.Debugger.oncePaused();
InspectorTest.log('paused at:');
- await InspectorTest.logSourceLocation(message.params.callFrames[0].location);
+ await session.logSourceLocation(message.params.callFrames[0].location);
return message;
}
diff --git a/deps/v8/test/inspector/debugger/step-over-caught-exception-expected.txt b/deps/v8/test/inspector/debugger/step-over-caught-exception-expected.txt
index a18b0934cb..5b72d4cce0 100644
--- a/deps/v8/test/inspector/debugger/step-over-caught-exception-expected.txt
+++ b/deps/v8/test/inspector/debugger/step-over-caught-exception-expected.txt
@@ -1,4 +1,5 @@
+Tests that stepping over caught exception will pause when asked for
testFunction:9
testFunction:11
testFunction:9
-testFunction:11 \ No newline at end of file
+testFunction:11
diff --git a/deps/v8/test/inspector/debugger/step-over-caught-exception.js b/deps/v8/test/inspector/debugger/step-over-caught-exception.js
index e00dcf27dc..c8e711b0be 100644
--- a/deps/v8/test/inspector/debugger/step-over-caught-exception.js
+++ b/deps/v8/test/inspector/debugger/step-over-caught-exception.js
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.addScript(
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests that stepping over caught exception will pause when asked for');
+
+contextGroup.addScript(
`function testFunction()
{
function foo()
diff --git a/deps/v8/test/inspector/debugger/step-snapshot-expected.txt b/deps/v8/test/inspector/debugger/step-snapshot-expected.txt
new file mode 100644
index 0000000000..7853ed7370
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/step-snapshot-expected.txt
@@ -0,0 +1,35 @@
+Embedding script 'function c(f, ...args) { return f(...args); }'
+Tests that stepping works on snapshotted function
+paused
+ }
+ #debugger;
+ c(f, 2);
+
+paused
+ debugger;
+ #c(f, 2);
+}
+
+paused
+function c(f, ...args) { #return f(...args); }
+
+paused
+ function f(x) {
+ #return x * 2;
+ }
+
+paused
+ return x * 2;
+ #}
+ debugger;
+
+paused
+function c(f, ...args) { return f(...args); #}
+
+paused
+ c(f, 2);
+#}
+
+paused
+test(#)
+
diff --git a/deps/v8/test/inspector/debugger/step-snapshot.js b/deps/v8/test/inspector/debugger/step-snapshot.js
new file mode 100644
index 0000000000..a4ecbf2f28
--- /dev/null
+++ b/deps/v8/test/inspector/debugger/step-snapshot.js
@@ -0,0 +1,31 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Embed a user function in the snapshot and step through it.
+
+// Flags: --embed 'function c(f, ...args) { return f(...args); }'
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests that stepping works on snapshotted function');
+session.setupScriptMap();
+
+contextGroup.addScript(`
+function test() {
+ function f(x) {
+ return x * 2;
+ }
+ debugger;
+ c(f, 2);
+}
+//# sourceURL=test.js`);
+
+Protocol.Debugger.onPaused(message => {
+ InspectorTest.log("paused");
+ var frames = message.params.callFrames;
+ session.logSourceLocation(frames[0].location);
+ Protocol.Debugger.stepInto();
+})
+
+Protocol.Debugger.enable()
+ .then(() => Protocol.Runtime.evaluate({ expression: 'test()' }))
+ .then(InspectorTest.completeTest);
diff --git a/deps/v8/test/inspector/debugger/stepping-after-get-possible-breakpoints-expected.txt b/deps/v8/test/inspector/debugger/stepping-after-get-possible-breakpoints-expected.txt
index abe85f5c80..67f38301fd 100644
--- a/deps/v8/test/inspector/debugger/stepping-after-get-possible-breakpoints-expected.txt
+++ b/deps/v8/test/inspector/debugger/stepping-after-get-possible-breakpoints-expected.txt
@@ -1,3 +1,4 @@
+Tests that stepping works after calling getPossibleBreakpoints
-- call boo:
(top)
(top)
diff --git a/deps/v8/test/inspector/debugger/stepping-after-get-possible-breakpoints.js b/deps/v8/test/inspector/debugger/stepping-after-get-possible-breakpoints.js
index 7d6577b82a..c36a36cb07 100644
--- a/deps/v8/test/inspector/debugger/stepping-after-get-possible-breakpoints.js
+++ b/deps/v8/test/inspector/debugger/stepping-after-get-possible-breakpoints.js
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.addScript(`
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests that stepping works after calling getPossibleBreakpoints');
+
+contextGroup.addScript(`
function boo() {}
boo();
function foo() {}
diff --git a/deps/v8/test/inspector/debugger/stepping-and-break-program-api-expected.txt b/deps/v8/test/inspector/debugger/stepping-and-break-program-api-expected.txt
index cd7c214b75..438177ade6 100644
--- a/deps/v8/test/inspector/debugger/stepping-and-break-program-api-expected.txt
+++ b/deps/v8/test/inspector/debugger/stepping-and-break-program-api-expected.txt
@@ -2,16 +2,16 @@ Checks that stepping is cleared after breakProgram.
paused at:
function callBreakProgram() {
#debugger;
- breakProgram('reason', '');
+ inspector.breakProgram('reason', '');
paused at:
debugger;
- #breakProgram('reason', '');
+ #inspector.breakProgram('reason', '');
}
paused at:
debugger;
- #breakProgram('reason', '');
+ inspector.#breakProgram('reason', '');
}
paused at:
diff --git a/deps/v8/test/inspector/debugger/stepping-and-break-program-api.js b/deps/v8/test/inspector/debugger/stepping-and-break-program-api.js
index 4900843fc5..f54bad827c 100644
--- a/deps/v8/test/inspector/debugger/stepping-and-break-program-api.js
+++ b/deps/v8/test/inspector/debugger/stepping-and-break-program-api.js
@@ -2,15 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks that stepping is cleared after breakProgram.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks that stepping is cleared after breakProgram.');
-InspectorTest.addScript(`
+contextGroup.addScript(`
function callBreakProgram() {
debugger;
- breakProgram('reason', '');
+ inspector.breakProgram('reason', '');
}`);
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
(async function test() {
Protocol.Debugger.enable();
Protocol.Runtime.evaluate({expression: 'callBreakProgram();'});
@@ -29,6 +29,6 @@ InspectorTest.setupScriptMap();
async function waitPauseAndDumpLocation() {
var message = await Protocol.Debugger.oncePaused();
InspectorTest.log('paused at:');
- InspectorTest.logSourceLocation(message.params.callFrames[0].location);
+ session.logSourceLocation(message.params.callFrames[0].location);
return message;
}
diff --git a/deps/v8/test/inspector/debugger/stepping-ignores-injected-script-expected.txt b/deps/v8/test/inspector/debugger/stepping-ignores-injected-script-expected.txt
index e4557d5cf7..5a63493dc7 100644
--- a/deps/v8/test/inspector/debugger/stepping-ignores-injected-script-expected.txt
+++ b/deps/v8/test/inspector/debugger/stepping-ignores-injected-script-expected.txt
@@ -1 +1,2 @@
+Tests that stepping ignores injected script
InjectedSciptSource was not reached
diff --git a/deps/v8/test/inspector/debugger/stepping-ignores-injected-script.js b/deps/v8/test/inspector/debugger/stepping-ignores-injected-script.js
index 31c958084a..9021664a96 100644
--- a/deps/v8/test/inspector/debugger/stepping-ignores-injected-script.js
+++ b/deps/v8/test/inspector/debugger/stepping-ignores-injected-script.js
@@ -2,8 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests that stepping ignores injected script');
+
Protocol.Debugger.onPaused(message => {
- let url = InspectorTest._scriptMap.get(message.params.callFrames[0].location.scriptId).url;
+ let url = session._scriptMap.get(message.params.callFrames[0].location.scriptId).url;
if (url !== 'test.js') {
InspectorTest.log('InjectedSciptSource on stack.');
InspectorTest.completeTest();
@@ -11,7 +13,7 @@ Protocol.Debugger.onPaused(message => {
Protocol.Debugger.stepInto();
});
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
Protocol.Debugger.enable();
Protocol.Debugger.pause();
Protocol.Runtime.evaluate({expression: 'console.log(42)//# sourceURL=test.js'})
diff --git a/deps/v8/test/inspector/debugger/stepping-tail-call.js b/deps/v8/test/inspector/debugger/stepping-tail-call.js
index 763b23b8a6..797df7d675 100644
--- a/deps/v8/test/inspector/debugger/stepping-tail-call.js
+++ b/deps/v8/test/inspector/debugger/stepping-tail-call.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks stepping over tail calls.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks stepping over tail calls.');
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
InspectorTest.logProtocolCommandCalls('Debugger.pause');
InspectorTest.logProtocolCommandCalls('Debugger.stepInto');
InspectorTest.logProtocolCommandCalls('Debugger.stepOver');
@@ -76,6 +76,6 @@ InspectorTest.runAsyncTestSuite([
]);
function logPauseLocation(message) {
- InspectorTest.logCallFrames(message.params.callFrames);
- return InspectorTest.logSourceLocation(message.params.callFrames[0].location);
+ session.logCallFrames(message.params.callFrames);
+ return session.logSourceLocation(message.params.callFrames[0].location);
}
diff --git a/deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges-expected.txt b/deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges-expected.txt
index acea22fd5f..7e23082c4b 100644
--- a/deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges-expected.txt
+++ b/deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges-expected.txt
@@ -1,3 +1,4 @@
+Tests that blackboxed ranges are respected while stepping
foo: 8:4
blackboxedBoo: 3:12
notBlackboxedFoo: 3:12
@@ -52,3 +53,4 @@ notBlackboxedFoo: 3:12
blackboxedFoo: 10:12
notBlackboxedBoo: 17:12
testFunction: 3:4
+
diff --git a/deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges.js b/deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges.js
index 65b694b566..c1029a8785 100644
--- a/deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges.js
+++ b/deps/v8/test/inspector/debugger/stepping-with-blackboxed-ranges.js
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.addScript(
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests that blackboxed ranges are respected while stepping');
+
+contextGroup.addScript(
`function blackboxedBoo()
{
var a = 42;
@@ -11,7 +13,7 @@ InspectorTest.addScript(
}
//# sourceURL=blackboxed-script.js`);
-InspectorTest.addScript(
+contextGroup.addScript(
`function notBlackboxedFoo()
{
var a = 42;
@@ -34,7 +36,7 @@ function notBlackboxedBoo()
}
//# sourceURL=mixed-source.js`);
-InspectorTest.addScript(
+contextGroup.addScript(
`function testFunction()
{
notBlackboxedBoo(); // for setup ranges and stepOut
diff --git a/deps/v8/test/inspector/debugger/stepping-with-exposed-injected-script-expected.txt b/deps/v8/test/inspector/debugger/stepping-with-exposed-injected-script-expected.txt
index 8fa52f1916..65c32c3ec9 100644
--- a/deps/v8/test/inspector/debugger/stepping-with-exposed-injected-script-expected.txt
+++ b/deps/v8/test/inspector/debugger/stepping-with-exposed-injected-script-expected.txt
@@ -1 +1,2 @@
+Tests that stepping does not ignore injected script when passed a flag
InjectedSciptSource on stack.
diff --git a/deps/v8/test/inspector/debugger/stepping-with-exposed-injected-script.js b/deps/v8/test/inspector/debugger/stepping-with-exposed-injected-script.js
index 499611c897..d608137c81 100644
--- a/deps/v8/test/inspector/debugger/stepping-with-exposed-injected-script.js
+++ b/deps/v8/test/inspector/debugger/stepping-with-exposed-injected-script.js
@@ -3,8 +3,10 @@
// found in the LICENSE file.
// Flags: --expose-inspector-scripts
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests that stepping does not ignore injected script when passed a flag');
+
Protocol.Debugger.onPaused(message => {
- let url = InspectorTest._scriptMap.get(message.params.callFrames[0].location.scriptId).url;
+ let url = session._scriptMap.get(message.params.callFrames[0].location.scriptId).url;
if (url !== 'test.js') {
InspectorTest.log('InjectedSciptSource on stack.');
InspectorTest.completeTest();
@@ -12,7 +14,7 @@ Protocol.Debugger.onPaused(message => {
Protocol.Debugger.stepInto();
});
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
Protocol.Debugger.enable();
Protocol.Debugger.pause();
Protocol.Runtime.evaluate({expression: 'console.log(42)//# sourceURL=test.js'})
diff --git a/deps/v8/test/inspector/debugger/stepping-with-natives-and-frameworks.js b/deps/v8/test/inspector/debugger/stepping-with-natives-and-frameworks.js
index 1be1acdd28..c6648ec932 100644
--- a/deps/v8/test/inspector/debugger/stepping-with-natives-and-frameworks.js
+++ b/deps/v8/test/inspector/debugger/stepping-with-natives-and-frameworks.js
@@ -2,16 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Stepping with natives and frameworks.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Stepping with natives and frameworks.');
-InspectorTest.addScript(`
+contextGroup.addScript(`
function callAll() {
for (var f of arguments)
f();
}
//# sourceURL=framework.js`);
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
InspectorTest.logProtocolCommandCalls('Debugger.pause');
InspectorTest.logProtocolCommandCalls('Debugger.stepInto');
InspectorTest.logProtocolCommandCalls('Debugger.stepOver');
@@ -296,5 +296,5 @@ InspectorTest.runAsyncTestSuite([
]);
function logPauseLocation(message) {
- return InspectorTest.logSourceLocation(message.params.callFrames[0].location);
+ return session.logSourceLocation(message.params.callFrames[0].location);
}
diff --git a/deps/v8/test/inspector/debugger/suspended-generator-scopes-expected.txt b/deps/v8/test/inspector/debugger/suspended-generator-scopes-expected.txt
index ed60c3e43c..f529c7ee24 100644
--- a/deps/v8/test/inspector/debugger/suspended-generator-scopes-expected.txt
+++ b/deps/v8/test/inspector/debugger/suspended-generator-scopes-expected.txt
@@ -1,3 +1,5 @@
+Tests that suspended generators produce scopes
+
Running test: testScopesPaused
{
id : <messageId>
diff --git a/deps/v8/test/inspector/debugger/suspended-generator-scopes.js b/deps/v8/test/inspector/debugger/suspended-generator-scopes.js
index edce5951ff..f7d389072a 100644
--- a/deps/v8/test/inspector/debugger/suspended-generator-scopes.js
+++ b/deps/v8/test/inspector/debugger/suspended-generator-scopes.js
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.addScript(
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests that suspended generators produce scopes');
+
+contextGroup.addScript(
`function *gen(a) {
var b = 42;
yield a;
diff --git a/deps/v8/test/inspector/debugger/update-call-frame-scopes-expected.txt b/deps/v8/test/inspector/debugger/update-call-frame-scopes-expected.txt
index ed52d231c2..acad9cea39 100644
--- a/deps/v8/test/inspector/debugger/update-call-frame-scopes-expected.txt
+++ b/deps/v8/test/inspector/debugger/update-call-frame-scopes-expected.txt
@@ -1,7 +1,7 @@
+Tests updating call frame scopes
Paused on 'debugger;'
Variable value changed
Stacktrace re-read again
Scope variables downloaded anew
New variable is 55, expected is 55, old was: 2
SUCCESS
-
diff --git a/deps/v8/test/inspector/debugger/update-call-frame-scopes.js b/deps/v8/test/inspector/debugger/update-call-frame-scopes.js
index f4a0f12397..e0b7307b92 100644
--- a/deps/v8/test/inspector/debugger/update-call-frame-scopes.js
+++ b/deps/v8/test/inspector/debugger/update-call-frame-scopes.js
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.addScript(
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests updating call frame scopes');
+
+contextGroup.addScript(
`function TestFunction()
{
var a = 2;
diff --git a/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-expected.txt b/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-expected.txt
index 7f869bf040..96c7a64bd4 100644
--- a/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-get-breakable-locations-expected.txt
@@ -1,3 +1,4 @@
+Tests breakable locations in wasm
Running testFunction...
Script nr 0 parsed. URL: v8://test/setup
Script nr 1 parsed. URL: v8://test/runTestFunction
diff --git a/deps/v8/test/inspector/debugger/wasm-get-breakable-locations.js b/deps/v8/test/inspector/debugger/wasm-get-breakable-locations.js
index eb70b29a82..ea2a116be5 100644
--- a/deps/v8/test/inspector/debugger/wasm-get-breakable-locations.js
+++ b/deps/v8/test/inspector/debugger/wasm-get-breakable-locations.js
@@ -4,6 +4,8 @@
// Flags: --expose-wasm
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests breakable locations in wasm');
+
utils.load('test/mjsunit/wasm/wasm-constants.js');
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
diff --git a/deps/v8/test/inspector/debugger/wasm-imports-expected.txt b/deps/v8/test/inspector/debugger/wasm-imports-expected.txt
index 0a53bdc521..a98b9d29b4 100644
--- a/deps/v8/test/inspector/debugger/wasm-imports-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-imports-expected.txt
@@ -1,3 +1,4 @@
+Tests imports in wasm
Installing code and global variable.
Calling instantiate function for module A.
Waiting for wasm script to be parsed.
diff --git a/deps/v8/test/inspector/debugger/wasm-imports.js b/deps/v8/test/inspector/debugger/wasm-imports.js
index e2a87867e3..dbe96ce671 100644
--- a/deps/v8/test/inspector/debugger/wasm-imports.js
+++ b/deps/v8/test/inspector/debugger/wasm-imports.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests imports in wasm');
+
utils.load('test/mjsunit/wasm/wasm-constants.js');
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
@@ -39,7 +41,7 @@ function instantiate(bytes, imp) {
var evalWithUrl = (code, url) => Protocol.Runtime.evaluate(
{'expression': code + '\n//# sourceURL=v8://test/' + url});
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
// Main promise chain:
Protocol.Debugger.enable()
@@ -61,7 +63,7 @@ Protocol.Debugger.enable()
url =>
Protocol.Debugger.setBreakpointByUrl({lineNumber: 1, url: url}))
.then(printFailure)
- .then(msg => InspectorTest.logSourceLocations(msg.result.locations))
+ .then(msg => session.logSourceLocations(msg.result.locations))
.then(() => InspectorTest.log('Calling instantiate function for module B.'))
.then(
() =>
@@ -84,7 +86,7 @@ Protocol.Debugger.oncePaused()
(InspectorTest.log(
'Paused at ' + loc.lineNumber + ':' + loc.columnNumber + '.'),
loc))
- .then(InspectorTest.logSourceLocation)
+ .then(session.logSourceLocation.bind(session))
.then(
() => InspectorTest.log(
'Getting current stack trace via "new Error().stack".'))
diff --git a/deps/v8/test/inspector/debugger/wasm-scripts-expected.txt b/deps/v8/test/inspector/debugger/wasm-scripts-expected.txt
index 41d2c0b380..5d23605566 100644
--- a/deps/v8/test/inspector/debugger/wasm-scripts-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-scripts-expected.txt
@@ -1,3 +1,4 @@
+Tests how wasm scripts are reported
Check that inspector gets two wasm scripts at module creation time.
Script #0 parsed. URL: v8://test/testFunction
Script #1 parsed. URL: v8://test/runTestRunction
diff --git a/deps/v8/test/inspector/debugger/wasm-scripts.js b/deps/v8/test/inspector/debugger/wasm-scripts.js
index 9fe8c26e6a..0993f11b53 100644
--- a/deps/v8/test/inspector/debugger/wasm-scripts.js
+++ b/deps/v8/test/inspector/debugger/wasm-scripts.js
@@ -4,6 +4,8 @@
// Flags: --expose-wasm
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests how wasm scripts are reported');
+
utils.load('test/mjsunit/wasm/wasm-constants.js');
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
@@ -27,9 +29,8 @@ function testFunction(bytes) {
new WebAssembly.Module(buffer);
}
-InspectorTest.addScriptWithUrl(
- testFunction.toString(), 'v8://test/testFunction');
-InspectorTest.addScript('var module_bytes = ' + JSON.stringify(module_bytes));
+contextGroup.addScript(testFunction.toString(), 0, 0, 'v8://test/testFunction');
+contextGroup.addScript('var module_bytes = ' + JSON.stringify(module_bytes));
Protocol.Debugger.enable();
Protocol.Debugger.onScriptParsed(handleScriptParsed);
diff --git a/deps/v8/test/inspector/debugger/wasm-source-expected.txt b/deps/v8/test/inspector/debugger/wasm-source-expected.txt
index 8e2c125676..b140be0876 100644
--- a/deps/v8/test/inspector/debugger/wasm-source-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-source-expected.txt
@@ -1,3 +1,4 @@
+Tests how wasm scrips report the source
Check that inspector gets disassembled wasm code
Paused on debugger!
Number of frames: 5
diff --git a/deps/v8/test/inspector/debugger/wasm-source.js b/deps/v8/test/inspector/debugger/wasm-source.js
index cadd44f909..bf7bab735c 100644
--- a/deps/v8/test/inspector/debugger/wasm-source.js
+++ b/deps/v8/test/inspector/debugger/wasm-source.js
@@ -4,6 +4,8 @@
// Flags: --expose-wasm
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests how wasm scrips report the source');
+
utils.load('test/mjsunit/wasm/wasm-constants.js');
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
@@ -45,8 +47,8 @@ function testFunction(bytes) {
instance.exports.main();
}
-InspectorTest.addScript(testFunction.toString());
-InspectorTest.addScript('var module_bytes = ' + JSON.stringify(module_bytes));
+contextGroup.addScript(testFunction.toString());
+contextGroup.addScript('var module_bytes = ' + JSON.stringify(module_bytes));
Protocol.Debugger.enable();
Protocol.Debugger.onPaused(handleDebuggerPaused);
diff --git a/deps/v8/test/inspector/debugger/wasm-stack-expected.txt b/deps/v8/test/inspector/debugger/wasm-stack-expected.txt
index df7d3a3f5a..c3226f5631 100644
--- a/deps/v8/test/inspector/debugger/wasm-stack-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stack-expected.txt
@@ -1,3 +1,4 @@
+Tests call stack in wasm scripts
Running testFunction with generated WASM bytes...
Paused on 'debugger;'
Number of frames: 5
diff --git a/deps/v8/test/inspector/debugger/wasm-stack.js b/deps/v8/test/inspector/debugger/wasm-stack.js
index 45ab6097f7..2603c31784 100644
--- a/deps/v8/test/inspector/debugger/wasm-stack.js
+++ b/deps/v8/test/inspector/debugger/wasm-stack.js
@@ -4,6 +4,8 @@
// Flags: --expose-wasm
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests call stack in wasm scripts');
+
utils.load('test/mjsunit/wasm/wasm-constants.js');
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
@@ -40,7 +42,7 @@ function testFunction(bytes) {
instance.exports.main();
}
-InspectorTest.addScript(testFunction.toString());
+contextGroup.addScript(testFunction.toString());
Protocol.Debugger.enable();
Protocol.Debugger.onPaused(handleDebuggerPaused);
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping-expected.txt b/deps/v8/test/inspector/debugger/wasm-stepping-expected.txt
index a2df3e47b9..793552f7f5 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping-expected.txt
+++ b/deps/v8/test/inspector/debugger/wasm-stepping-expected.txt
@@ -1,3 +1,4 @@
+Tests stepping through wasm scripts
Installing code an global variable.
Calling instantiate function.
Waiting for two wasm scripts to be parsed.
@@ -32,52 +33,298 @@ Setting breakpoint on line 7 (on the setlocal before the call), url wasm://wasm/
scriptId : <scriptId>
}
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:7:6: >set_local 0
-Step action: stepInto
+at wasm_B (7:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 4 (number)
+ stack: {"0":3} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:8:6: >call 0
-Step action: stepInto
+at wasm_B (8:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 3 (number)
+ stack: {} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-0:1:2: >nop
-Step action: stepOver
+at wasm_A (1:2):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ stack: {} (Object)
+at wasm_B (8:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 3 (number)
+ stack: {} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepOver called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-0:2:2: >nop
-Step action: stepOut
+at wasm_A (2:2):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ stack: {} (Object)
+at wasm_B (8:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 3 (number)
+ stack: {} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepOut called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:9:6: >br 1
-Step action: stepOut
+at wasm_B (9:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 3 (number)
+ stack: {} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepOut called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:7:6: >set_local 0
-Step action: stepOver
+at wasm_B (7:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 3 (number)
+ stack: {"0":2} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepOver called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:8:6: >call 0
-Step action: stepOver
+at wasm_B (8:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 2 (number)
+ stack: {} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepOver called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:9:6: >br 1
-Step action: resume
+at wasm_B (9:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 2 (number)
+ stack: {} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.resume called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:7:6: >set_local 0
-Step action: stepInto
+at wasm_B (7:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 2 (number)
+ stack: {"0":1} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:8:6: >call 0
-Step action: stepInto
+at wasm_B (8:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 1 (number)
+ stack: {} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-0:1:2: >nop
-Step action: stepOut
+at wasm_A (1:2):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ stack: {} (Object)
+at wasm_B (8:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 1 (number)
+ stack: {} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepOut called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:9:6: >br 1
-Step action: stepInto
+at wasm_B (9:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 1 (number)
+ stack: {} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:1:2: >loop
-Step action: stepInto
+at wasm_B (1:2):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 1 (number)
+ stack: {} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:2:4: >get_local 0
-Step action: stepInto
+at wasm_B (2:4):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 1 (number)
+ stack: {} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:3:4: >if
-Step action: stepInto
+at wasm_B (3:4):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 1 (number)
+ stack: {"0":1} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:4:6: >get_local 0
-Step action: stepInto
+at wasm_B (4:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 1 (number)
+ stack: {} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:5:6: >i32.const 1
-Step action: stepInto
+at wasm_B (5:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 1 (number)
+ stack: {"0":1} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:6:6: >i32.sub
-Step action: stepInto
+at wasm_B (6:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 1 (number)
+ stack: {"0":1,"1":1} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:7:6: >set_local 0
-Step action: stepInto
+at wasm_B (7:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 1 (number)
+ stack: {"0":0} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:8:6: >call 0
-Step action: stepInto
+at wasm_B (8:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 0 (number)
+ stack: {} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-0:1:2: >nop
-Step action: stepInto
+at wasm_A (1:2):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ stack: {} (Object)
+at wasm_B (8:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 0 (number)
+ stack: {} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-0:2:2: >nop
-Step action: stepInto
+at wasm_A (2:2):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ stack: {} (Object)
+at wasm_B (8:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 0 (number)
+ stack: {} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-0:3:0: >end
-Step action: stepInto
+at wasm_A (3:0):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ stack: {} (Object)
+at wasm_B (8:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 0 (number)
+ stack: {} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.stepInto called
Paused at wasm://wasm/wasm-0c10a5fe/wasm-0c10a5fe-1:9:6: >br 1
-Step action: resume
+at wasm_B (9:6):
+ - scope (global):
+ -- skipped
+ - scope (local):
+ param#0: 0 (number)
+ stack: {} (Object)
+at (anonymous) (0:17):
+ - scope (global):
+ -- skipped
+Debugger.resume called
exports.main returned!
Finished!
diff --git a/deps/v8/test/inspector/debugger/wasm-stepping.js b/deps/v8/test/inspector/debugger/wasm-stepping.js
index 5f132df3ba..d3a2c64048 100644
--- a/deps/v8/test/inspector/debugger/wasm-stepping.js
+++ b/deps/v8/test/inspector/debugger/wasm-stepping.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests stepping through wasm scripts');
+
utils.load('test/mjsunit/wasm/wasm-constants.js');
utils.load('test/mjsunit/wasm/wasm-module-builder.js');
@@ -68,6 +70,9 @@ var step_actions = [
// then just resume.
'resume'
];
+for (var action of step_actions) {
+ InspectorTest.logProtocolCommandCalls('Debugger.' + action)
+}
var sources = {};
var urls = {};
var afterTwoSourcesCallback;
@@ -151,10 +156,48 @@ function printPauseLocation(scriptId, lineNr, columnNr) {
line);
}
+async function getValueString(value) {
+ if (value.type == 'object') {
+ var msg = await Protocol.Runtime.callFunctionOn({
+ objectId: value.objectId,
+ functionDeclaration: 'function () { return JSON.stringify(this); }'
+ });
+ printFailure(msg);
+ return msg.result.result.value + ' (' + value.description + ')';
+ }
+ return value.value + ' (' + value.type + ')';
+}
+
+async function dumpProperties(message) {
+ printFailure(message);
+ for (var value of message.result.result) {
+ var value_str = await getValueString(value.value);
+ InspectorTest.log(' ' + value.name + ': ' + value_str);
+ }
+}
+
+async function dumpScopeChainsOnPause(message) {
+ for (var frame of message.params.callFrames) {
+ var functionName = frame.functionName || '(anonymous)';
+ var lineNumber = frame.location ? frame.location.lineNumber : frame.lineNumber;
+ var columnNumber = frame.location ? frame.location.columnNumber : frame.columnNumber;
+ InspectorTest.log(`at ${functionName} (${lineNumber}:${columnNumber}):`);
+ for (var scope of frame.scopeChain) {
+ InspectorTest.logObject(' - scope (' + scope.type + '):');
+ if (scope.type == 'global') {
+ InspectorTest.logObject(' -- skipped');
+ } else {
+ var properties = await Protocol.Runtime.getProperties(
+ {'objectId': scope.object.objectId});
+ await dumpProperties(properties);
+ }
+ }
+ }
+}
+
function handlePaused(msg) {
var loc = msg.params.callFrames[0].location;
printPauseLocation(loc.scriptId, loc.lineNumber, loc.columnNumber);
- var action = step_actions.shift();
- InspectorTest.log('Step action: ' + action);
- Protocol.Debugger[action]();
+ dumpScopeChainsOnPause(msg)
+ .then(Protocol.Debugger[step_actions.shift() || 'resume']);
}
diff --git a/deps/v8/test/inspector/heap-profiler/take-heap-snapshot-on-pause.js b/deps/v8/test/inspector/heap-profiler/take-heap-snapshot-on-pause.js
index bb8c211d5f..4e4bc400b5 100644
--- a/deps/v8/test/inspector/heap-profiler/take-heap-snapshot-on-pause.js
+++ b/deps/v8/test/inspector/heap-profiler/take-heap-snapshot-on-pause.js
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks that takeHeapSnapshot uses empty accessing_context for access \
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks that takeHeapSnapshot uses empty accessing_context for access \
checks.');
-InspectorTest.addScript(`
+contextGroup.addScript(`
function testFunction() {
- var array = [ createObjectWithStrictCheck() ];
+ var array = [ inspector.createObjectWithStrictCheck() ];
debugger;
}
//# sourceURL=test.js`);
diff --git a/deps/v8/test/inspector/inspector-impl.cc b/deps/v8/test/inspector/inspector-impl.cc
index aa3b1447ff..664fdf8366 100644
--- a/deps/v8/test/inspector/inspector-impl.cc
+++ b/deps/v8/test/inspector/inspector-impl.cc
@@ -7,41 +7,35 @@
#include "include/v8.h"
#include "src/vector.h"
+#include "test/inspector/isolate-data.h"
+#include "test/inspector/task-runner.h"
namespace {
-const int kInspectorClientIndex = v8::Context::kDebugIdIndex + 1;
-
class ChannelImpl final : public v8_inspector::V8Inspector::Channel {
public:
- explicit ChannelImpl(InspectorClientImpl::FrontendChannel* frontend_channel)
- : frontend_channel_(frontend_channel) {}
+ ChannelImpl(InspectorClientImpl::FrontendChannel* frontend_channel,
+ int session_id)
+ : frontend_channel_(frontend_channel), session_id_(session_id) {}
virtual ~ChannelImpl() = default;
private:
void sendResponse(
int callId,
std::unique_ptr<v8_inspector::StringBuffer> message) override {
- frontend_channel_->SendMessageToFrontend(message->string());
+ frontend_channel_->SendMessageToFrontend(session_id_, message->string());
}
void sendNotification(
std::unique_ptr<v8_inspector::StringBuffer> message) override {
- frontend_channel_->SendMessageToFrontend(message->string());
+ frontend_channel_->SendMessageToFrontend(session_id_, message->string());
}
void flushProtocolNotifications() override {}
InspectorClientImpl::FrontendChannel* frontend_channel_;
+ int session_id_;
DISALLOW_COPY_AND_ASSIGN(ChannelImpl);
};
-InspectorClientImpl* InspectorClientFromContext(
- v8::Local<v8::Context> context) {
- InspectorClientImpl* inspector_client = static_cast<InspectorClientImpl*>(
- context->GetAlignedPointerFromEmbedderData(kInspectorClientIndex));
- CHECK(inspector_client);
- return inspector_client;
-}
-
v8::internal::Vector<uint16_t> ToVector(v8::Local<v8::String> str) {
v8::internal::Vector<uint16_t> buffer =
v8::internal::Vector<uint16_t>::New(str->Length());
@@ -55,7 +49,7 @@ void MessageHandler(v8::Local<v8::Message> message,
v8::Local<v8::Context> context = isolate->GetEnteredContext();
if (context.IsEmpty()) return;
v8_inspector::V8Inspector* inspector =
- InspectorClientImpl::InspectorFromContext(context);
+ IsolateData::FromContext(context)->inspector()->inspector();
v8::Local<v8::StackTrace> stack = message->GetStackTrace();
int script_id =
@@ -85,164 +79,111 @@ void MessageHandler(v8::Local<v8::Message> message,
inspector->createStackTrace(stack), script_id);
}
-} // namespace
-
-class ConnectTask : public TaskRunner::Task {
- public:
- ConnectTask(InspectorClientImpl* client, v8::base::Semaphore* ready_semaphore)
- : client_(client), ready_semaphore_(ready_semaphore) {}
- virtual ~ConnectTask() = default;
-
- bool is_inspector_task() final { return true; }
-
- void Run(v8::Isolate* isolate,
- const v8::Global<v8::Context>& global_context) {
- v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Context> context = global_context.Get(isolate);
- client_->connect(context);
- if (ready_semaphore_) ready_semaphore_->Signal();
- }
-
- private:
- InspectorClientImpl* client_;
- v8::base::Semaphore* ready_semaphore_;
-};
-
-class DisconnectTask : public TaskRunner::Task {
- public:
- explicit DisconnectTask(InspectorClientImpl* client, bool reset_inspector,
- v8::base::Semaphore* ready_semaphore)
- : client_(client),
- reset_inspector_(reset_inspector),
- ready_semaphore_(ready_semaphore) {}
- virtual ~DisconnectTask() = default;
-
- bool is_inspector_task() final { return true; }
-
- void Run(v8::Isolate* isolate,
- const v8::Global<v8::Context>& global_context) {
- client_->disconnect(reset_inspector_);
- if (ready_semaphore_) ready_semaphore_->Signal();
- }
-
- private:
- InspectorClientImpl* client_;
- bool reset_inspector_;
- v8::base::Semaphore* ready_semaphore_;
-};
-
-class CreateContextGroupTask : public TaskRunner::Task {
- public:
- CreateContextGroupTask(InspectorClientImpl* client,
- v8::ExtensionConfiguration* extensions,
- v8::base::Semaphore* ready_semaphore,
- int* context_group_id)
- : client_(client),
- extensions_(extensions),
- ready_semaphore_(ready_semaphore),
- context_group_id_(context_group_id) {}
- virtual ~CreateContextGroupTask() = default;
-
- bool is_inspector_task() final { return true; }
-
- void Run(v8::Isolate* isolate,
- const v8::Global<v8::Context>& global_context) {
- *context_group_id_ = client_->createContextGroup(extensions_);
- if (ready_semaphore_) ready_semaphore_->Signal();
- }
+v8::Local<v8::String> ToString(v8::Isolate* isolate,
+ const v8_inspector::StringView& string) {
+ if (string.is8Bit())
+ return v8::String::NewFromOneByte(isolate, string.characters8(),
+ v8::NewStringType::kNormal,
+ static_cast<int>(string.length()))
+ .ToLocalChecked();
+ else
+ return v8::String::NewFromTwoByte(isolate, string.characters16(),
+ v8::NewStringType::kNormal,
+ static_cast<int>(string.length()))
+ .ToLocalChecked();
+}
- private:
- InspectorClientImpl* client_;
- v8::ExtensionConfiguration* extensions_;
- v8::base::Semaphore* ready_semaphore_;
- int* context_group_id_;
-};
+void Print(v8::Isolate* isolate, const v8_inspector::StringView& string) {
+ v8::Local<v8::String> v8_string = ToString(isolate, string);
+ v8::String::Utf8Value utf8_string(v8_string);
+ fwrite(*utf8_string, sizeof(**utf8_string), utf8_string.length(), stdout);
+}
+} // namespace
-InspectorClientImpl::InspectorClientImpl(TaskRunner* task_runner,
- FrontendChannel* frontend_channel,
- v8::base::Semaphore* ready_semaphore)
- : isolate_(nullptr),
- task_runner_(task_runner),
+InspectorClientImpl::InspectorClientImpl(v8::Isolate* isolate,
+ TaskRunner* task_runner,
+ FrontendChannel* frontend_channel)
+ : task_runner_(task_runner),
+ isolate_(isolate),
frontend_channel_(frontend_channel) {
- task_runner_->Append(new ConnectTask(this, ready_semaphore));
+ isolate_->AddMessageListener(MessageHandler);
+ inspector_ = v8_inspector::V8Inspector::create(isolate_, this);
}
InspectorClientImpl::~InspectorClientImpl() {}
-void InspectorClientImpl::connect(v8::Local<v8::Context> context) {
- isolate_ = context->GetIsolate();
- isolate_->AddMessageListener(MessageHandler);
- channel_.reset(new ChannelImpl(frontend_channel_));
- inspector_ = v8_inspector::V8Inspector::create(isolate_, this);
-
- if (states_.empty()) {
- int context_group_id = TaskRunner::GetContextGroupId(context);
- v8_inspector::StringView state;
- sessions_[context_group_id] =
- inspector_->connect(context_group_id, channel_.get(), state);
- context->SetAlignedPointerInEmbedderData(kInspectorClientIndex, this);
- v8_inspector::V8ContextInfo info(context, context_group_id,
- v8_inspector::StringView());
- info.hasMemoryOnConsole = true;
- inspector_->contextCreated(info);
- } else {
- for (const auto& it : states_) {
- int context_group_id = it.first;
- v8::Local<v8::Context> context =
- task_runner_->GetContext(context_group_id);
- v8_inspector::StringView state = it.second->string();
- sessions_[context_group_id] =
- inspector_->connect(context_group_id, channel_.get(), state);
- context->SetAlignedPointerInEmbedderData(kInspectorClientIndex, this);
- v8_inspector::V8ContextInfo info(context, context_group_id,
- v8_inspector::StringView());
- info.hasMemoryOnConsole = true;
- inspector_->contextCreated(info);
- }
+int InspectorClientImpl::ConnectSession(int context_group_id,
+ const v8_inspector::StringView& state) {
+ int session_id = ++last_session_id_;
+ channels_[session_id].reset(new ChannelImpl(frontend_channel_, session_id));
+ sessions_[session_id] =
+ inspector_->connect(context_group_id, channels_[session_id].get(), state);
+ context_group_by_session_[sessions_[session_id].get()] = context_group_id;
+ return session_id;
+}
+
+std::unique_ptr<v8_inspector::StringBuffer>
+InspectorClientImpl::DisconnectSession(int session_id) {
+ auto it = sessions_.find(session_id);
+ CHECK(it != sessions_.end());
+ context_group_by_session_.erase(it->second.get());
+ std::unique_ptr<v8_inspector::StringBuffer> result = it->second->stateJSON();
+ sessions_.erase(it);
+ channels_.erase(session_id);
+ return result;
+}
+
+void InspectorClientImpl::SendMessage(int session_id,
+ const v8_inspector::StringView& message) {
+ auto it = sessions_.find(session_id);
+ if (it != sessions_.end()) it->second->dispatchProtocolMessage(message);
+}
+
+void InspectorClientImpl::BreakProgram(
+ int context_group_id, const v8_inspector::StringView& reason,
+ const v8_inspector::StringView& details) {
+ for (int session_id : GetSessionIds(context_group_id)) {
+ auto it = sessions_.find(session_id);
+ if (it != sessions_.end()) it->second->breakProgram(reason, details);
}
- states_.clear();
}
-void InspectorClientImpl::scheduleReconnect(
- v8::base::Semaphore* ready_semaphore) {
- task_runner_->Append(
- new DisconnectTask(this, /* reset_inspector */ true, nullptr));
- task_runner_->Append(new ConnectTask(this, ready_semaphore));
+void InspectorClientImpl::SchedulePauseOnNextStatement(
+ int context_group_id, const v8_inspector::StringView& reason,
+ const v8_inspector::StringView& details) {
+ for (int session_id : GetSessionIds(context_group_id)) {
+ auto it = sessions_.find(session_id);
+ if (it != sessions_.end())
+ it->second->schedulePauseOnNextStatement(reason, details);
+ }
}
-void InspectorClientImpl::scheduleDisconnect(
- v8::base::Semaphore* ready_semaphore) {
- task_runner_->Append(
- new DisconnectTask(this, /* reset_inspector */ false, ready_semaphore));
+void InspectorClientImpl::CancelPauseOnNextStatement(int context_group_id) {
+ for (int session_id : GetSessionIds(context_group_id)) {
+ auto it = sessions_.find(session_id);
+ if (it != sessions_.end()) it->second->cancelPauseOnNextStatement();
+ }
}
-void InspectorClientImpl::disconnect(bool reset_inspector) {
- for (const auto& it : sessions_) {
- states_[it.first] = it.second->stateJSON();
- }
- sessions_.clear();
- if (reset_inspector) inspector_.reset();
+void InspectorClientImpl::ContextCreated(v8::Local<v8::Context> context,
+ int context_group_id) {
+ v8_inspector::V8ContextInfo info(context, context_group_id,
+ v8_inspector::StringView());
+ info.hasMemoryOnConsole = true;
+ inspector_->contextCreated(info);
}
-void InspectorClientImpl::scheduleCreateContextGroup(
- v8::ExtensionConfiguration* extensions,
- v8::base::Semaphore* ready_semaphore, int* context_group_id) {
- task_runner_->Append(new CreateContextGroupTask(
- this, extensions, ready_semaphore, context_group_id));
+void InspectorClientImpl::ContextDestroyed(v8::Local<v8::Context> context) {
+ inspector_->contextDestroyed(context);
}
-int InspectorClientImpl::createContextGroup(
- v8::ExtensionConfiguration* extensions) {
- v8::HandleScope handle_scope(isolate_);
- v8::Local<v8::Context> context = task_runner_->NewContextGroup();
- context->SetAlignedPointerInEmbedderData(kInspectorClientIndex, this);
- int context_group_id = TaskRunner::GetContextGroupId(context);
- v8_inspector::StringView state;
- sessions_[context_group_id] =
- inspector_->connect(context_group_id, channel_.get(), state);
- inspector_->contextCreated(v8_inspector::V8ContextInfo(
- context, context_group_id, v8_inspector::StringView()));
- return context_group_id;
+std::vector<int> InspectorClientImpl::GetSessionIds(int context_group_id) {
+ std::vector<int> result;
+ for (auto& it : sessions_) {
+ if (context_group_by_session_[it.second.get()] == context_group_id)
+ result.push_back(it.first);
+ }
+ return result;
}
bool InspectorClientImpl::formatAccessorsAsProperties(
@@ -262,10 +203,10 @@ bool InspectorClientImpl::formatAccessorsAsProperties(
v8::Local<v8::Context> InspectorClientImpl::ensureDefaultContextInGroup(
int context_group_id) {
CHECK(isolate_);
- return task_runner_->GetContext(context_group_id);
+ return task_runner_->data()->GetContext(context_group_id);
}
-void InspectorClientImpl::setCurrentTimeMSForTest(double time) {
+void InspectorClientImpl::SetCurrentTimeMSForTest(double time) {
current_time_ = time;
current_time_set_for_test_ = true;
}
@@ -275,11 +216,15 @@ double InspectorClientImpl::currentTimeMS() {
return v8::base::OS::TimeCurrentMillis();
}
-void InspectorClientImpl::setMemoryInfoForTest(
+void InspectorClientImpl::SetMemoryInfoForTest(
v8::Local<v8::Value> memory_info) {
memory_info_.Reset(isolate_, memory_info);
}
+void InspectorClientImpl::SetLogConsoleApiMessageCalls(bool log) {
+ log_console_api_message_calls_ = log;
+}
+
v8::MaybeLocal<v8::Value> InspectorClientImpl::memoryInfo(
v8::Isolate* isolate, v8::Local<v8::Context>) {
if (memory_info_.IsEmpty()) return v8::MaybeLocal<v8::Value>();
@@ -294,72 +239,16 @@ void InspectorClientImpl::quitMessageLoopOnPause() {
task_runner_->QuitMessageLoop();
}
-v8_inspector::V8Inspector* InspectorClientImpl::InspectorFromContext(
- v8::Local<v8::Context> context) {
- return InspectorClientFromContext(context)->inspector_.get();
-}
-
-v8_inspector::V8InspectorSession* InspectorClientImpl::SessionFromContext(
- v8::Local<v8::Context> context) {
- int context_group_id = TaskRunner::GetContextGroupId(context);
- return InspectorClientFromContext(context)->sessions_[context_group_id].get();
-}
-
-v8_inspector::V8InspectorSession* InspectorClientImpl::session(
- int context_group_id) {
- if (context_group_id) {
- return sessions_[context_group_id].get();
- } else {
- return sessions_.begin()->second.get();
- }
-}
-
-class SendMessageToBackendTask : public TaskRunner::Task {
- public:
- explicit SendMessageToBackendTask(
- const v8::internal::Vector<uint16_t>& message, int context_group_id)
- : message_(message), context_group_id_(context_group_id) {}
-
- bool is_inspector_task() final { return true; }
-
- void Run(v8::Isolate* isolate,
- const v8::Global<v8::Context>& global_context) override {
- v8_inspector::V8InspectorSession* session = nullptr;
- {
- v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Context> context = global_context.Get(isolate);
- if (!context_group_id_) {
- session = InspectorClientImpl::SessionFromContext(context);
- } else {
- session = InspectorClientFromContext(context)
- ->sessions_[context_group_id_]
- .get();
- }
- if (!session) return;
- }
- v8_inspector::StringView message_view(message_.start(), message_.length());
- session->dispatchProtocolMessage(message_view);
- }
-
- private:
- v8::internal::Vector<uint16_t> message_;
- int context_group_id_;
-};
-
-TaskRunner* SendMessageToBackendExtension::backend_task_runner_ = nullptr;
-
-v8::Local<v8::FunctionTemplate>
-SendMessageToBackendExtension::GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Local<v8::String> name) {
- return v8::FunctionTemplate::New(
- isolate, SendMessageToBackendExtension::SendMessageToBackend);
-}
-
-void SendMessageToBackendExtension::SendMessageToBackend(
- const v8::FunctionCallbackInfo<v8::Value>& args) {
- CHECK(backend_task_runner_);
- CHECK(args.Length() == 2 && args[0]->IsString() && args[1]->IsInt32());
- v8::Local<v8::String> message = args[0].As<v8::String>();
- backend_task_runner_->Append(new SendMessageToBackendTask(
- ToVector(message), args[1].As<v8::Int32>()->Value()));
+void InspectorClientImpl::consoleAPIMessage(
+ int contextGroupId, v8::Isolate::MessageErrorLevel level,
+ const v8_inspector::StringView& message,
+ const v8_inspector::StringView& url, unsigned lineNumber,
+ unsigned columnNumber, v8_inspector::V8StackTrace* stack) {
+ if (!log_console_api_message_calls_) return;
+ Print(isolate_, message);
+ fprintf(stdout, " (");
+ Print(isolate_, url);
+ fprintf(stdout, ":%d:%d)", lineNumber, columnNumber);
+ Print(isolate_, stack->toString()->string());
+ fprintf(stdout, "\n");
}
diff --git a/deps/v8/test/inspector/inspector-impl.h b/deps/v8/test/inspector/inspector-impl.h
index 89dd002e1c..edbec72cfb 100644
--- a/deps/v8/test/inspector/inspector-impl.h
+++ b/deps/v8/test/inspector/inspector-impl.h
@@ -5,11 +5,15 @@
#ifndef V8_TEST_INSPECTOR_PROTOCOL_INSPECTOR_IMPL_H_
#define V8_TEST_INSPECTOR_PROTOCOL_INSPECTOR_IMPL_H_
+#include <map>
+#include <vector>
+
#include "include/v8-inspector.h"
#include "include/v8.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
-#include "test/inspector/task-runner.h"
+
+class TaskRunner;
class InspectorClientImpl : public v8_inspector::V8InspectorClient {
public:
@@ -17,30 +21,30 @@ class InspectorClientImpl : public v8_inspector::V8InspectorClient {
public:
virtual ~FrontendChannel() = default;
virtual void SendMessageToFrontend(
- const v8_inspector::StringView& message) = 0;
+ int session_id, const v8_inspector::StringView& message) = 0;
};
- InspectorClientImpl(TaskRunner* task_runner,
- FrontendChannel* frontend_channel,
- v8::base::Semaphore* ready_semaphore);
+ InspectorClientImpl(v8::Isolate* isolate, TaskRunner* task_runner,
+ FrontendChannel* frontend_channel);
virtual ~InspectorClientImpl();
- void scheduleReconnect(v8::base::Semaphore* ready_semaphore);
- void scheduleDisconnect(v8::base::Semaphore* ready_semaphore);
- void scheduleCreateContextGroup(v8::ExtensionConfiguration* extensions,
- v8::base::Semaphore* ready_semaphore,
- int* context_group_id);
-
- static v8_inspector::V8Inspector* InspectorFromContext(
- v8::Local<v8::Context> context);
- static v8_inspector::V8InspectorSession* SessionFromContext(
- v8::Local<v8::Context> context);
-
- // context_group_id = 0 means default context group.
- v8_inspector::V8InspectorSession* session(int context_group_id = 0);
-
- void setCurrentTimeMSForTest(double time);
- void setMemoryInfoForTest(v8::Local<v8::Value> memory_info);
+ v8_inspector::V8Inspector* inspector() const { return inspector_.get(); }
+ int ConnectSession(int context_group_id,
+ const v8_inspector::StringView& state);
+ std::unique_ptr<v8_inspector::StringBuffer> DisconnectSession(int session_id);
+ void SendMessage(int session_id, const v8_inspector::StringView& message);
+ void BreakProgram(int context_group_id,
+ const v8_inspector::StringView& reason,
+ const v8_inspector::StringView& details);
+ void SchedulePauseOnNextStatement(int context_group_id,
+ const v8_inspector::StringView& reason,
+ const v8_inspector::StringView& details);
+ void CancelPauseOnNextStatement(int context_group_id);
+ void SetCurrentTimeMSForTest(double time);
+ void SetMemoryInfoForTest(v8::Local<v8::Value> memory_info);
+ void SetLogConsoleApiMessageCalls(bool log);
+ void ContextCreated(v8::Local<v8::Context> context, int context_group_id);
+ void ContextDestroyed(v8::Local<v8::Context> context);
private:
// V8InspectorClient implementation.
@@ -52,51 +56,29 @@ class InspectorClientImpl : public v8_inspector::V8InspectorClient {
v8::Local<v8::Context>) override;
void runMessageLoopOnPause(int context_group_id) override;
void quitMessageLoopOnPause() override;
+ void consoleAPIMessage(int contextGroupId,
+ v8::Isolate::MessageErrorLevel level,
+ const v8_inspector::StringView& message,
+ const v8_inspector::StringView& url,
+ unsigned lineNumber, unsigned columnNumber,
+ v8_inspector::V8StackTrace*) override;
- friend class SendMessageToBackendTask;
-
- friend class ConnectTask;
- void connect(v8::Local<v8::Context> context);
- friend class DisconnectTask;
- void disconnect(bool reset_inspector);
- friend class CreateContextGroupTask;
- int createContextGroup(v8::ExtensionConfiguration* extensions);
+ std::vector<int> GetSessionIds(int context_group_id);
std::unique_ptr<v8_inspector::V8Inspector> inspector_;
- std::unique_ptr<v8_inspector::V8Inspector::Channel> channel_;
-
+ int last_session_id_ = 0;
std::map<int, std::unique_ptr<v8_inspector::V8InspectorSession>> sessions_;
- std::map<int, std::unique_ptr<v8_inspector::StringBuffer>> states_;
-
+ std::map<v8_inspector::V8InspectorSession*, int> context_group_by_session_;
+ std::map<int, std::unique_ptr<v8_inspector::V8Inspector::Channel>> channels_;
+ TaskRunner* task_runner_;
v8::Isolate* isolate_;
v8::Global<v8::Value> memory_info_;
-
- TaskRunner* task_runner_;
FrontendChannel* frontend_channel_;
-
bool current_time_set_for_test_ = false;
double current_time_ = 0.0;
+ bool log_console_api_message_calls_ = false;
DISALLOW_COPY_AND_ASSIGN(InspectorClientImpl);
};
-class SendMessageToBackendExtension : public v8::Extension {
- public:
- SendMessageToBackendExtension()
- : v8::Extension("v8_inspector/frontend",
- "native function sendMessageToBackend();") {}
- virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Local<v8::String> name);
-
- static void set_backend_task_runner(TaskRunner* task_runner) {
- backend_task_runner_ = task_runner;
- }
-
- private:
- static void SendMessageToBackend(
- const v8::FunctionCallbackInfo<v8::Value>& args);
-
- static TaskRunner* backend_task_runner_;
-};
-
#endif // V8_TEST_INSPECTOR_PROTOCOL_INSPECTOR_IMPL_H_
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
index b2dd6474bf..2e105c54d9 100644
--- a/deps/v8/test/inspector/inspector-test.cc
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -46,125 +46,256 @@ v8::internal::Vector<uint16_t> ToVector(v8::Local<v8::String> str) {
return buffer;
}
-class UtilsExtension : public v8::Extension {
+v8::Local<v8::String> ToV8String(v8::Isolate* isolate, const char* str) {
+ return v8::String::NewFromUtf8(isolate, str, v8::NewStringType::kNormal)
+ .ToLocalChecked();
+}
+
+v8::internal::Vector<uint16_t> ToVector(
+ const v8_inspector::StringView& string) {
+ v8::internal::Vector<uint16_t> buffer =
+ v8::internal::Vector<uint16_t>::New(static_cast<int>(string.length()));
+ for (size_t i = 0; i < string.length(); i++) {
+ if (string.is8Bit())
+ buffer[i] = string.characters8()[i];
+ else
+ buffer[i] = string.characters16()[i];
+ }
+ return buffer;
+}
+
+class CreateContextGroupTask : public TaskRunner::Task {
public:
- UtilsExtension()
- : v8::Extension("v8_inspector/utils",
- "native function print();"
- "native function quit();"
- "native function setlocale();"
- "native function read();"
- "native function load();"
- "native function compileAndRunWithOrigin();"
- "native function setCurrentTimeMSForTest();"
- "native function setMemoryInfoForTest();"
- "native function schedulePauseOnNextStatement();"
- "native function cancelPauseOnNextStatement();"
- "native function reconnect();"
- "native function disconnect();"
- "native function createContextGroup();") {}
- virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Local<v8::String> name) {
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
- if (name->Equals(context, v8::String::NewFromUtf8(
- isolate, "print", v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(isolate, UtilsExtension::Print);
- } else if (name->Equals(context,
- v8::String::NewFromUtf8(isolate, "quit",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(isolate, UtilsExtension::Quit);
- } else if (name->Equals(context,
- v8::String::NewFromUtf8(isolate, "setlocale",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(isolate, UtilsExtension::SetLocale);
- } else if (name->Equals(context,
- v8::String::NewFromUtf8(isolate, "read",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(isolate, UtilsExtension::Read);
- } else if (name->Equals(context,
- v8::String::NewFromUtf8(isolate, "load",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(isolate, UtilsExtension::Load);
- } else if (name->Equals(context, v8::String::NewFromUtf8(
- isolate, "compileAndRunWithOrigin",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(isolate,
- UtilsExtension::CompileAndRunWithOrigin);
- } else if (name->Equals(context, v8::String::NewFromUtf8(
- isolate, "setCurrentTimeMSForTest",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(isolate,
- UtilsExtension::SetCurrentTimeMSForTest);
- } else if (name->Equals(context, v8::String::NewFromUtf8(
- isolate, "setMemoryInfoForTest",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(isolate,
- UtilsExtension::SetMemoryInfoForTest);
- } else if (name->Equals(context,
- v8::String::NewFromUtf8(
- isolate, "schedulePauseOnNextStatement",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(
- isolate, UtilsExtension::SchedulePauseOnNextStatement);
- } else if (name->Equals(context, v8::String::NewFromUtf8(
- isolate, "cancelPauseOnNextStatement",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(
- isolate, UtilsExtension::CancelPauseOnNextStatement);
- } else if (name->Equals(context,
- v8::String::NewFromUtf8(isolate, "reconnect",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(isolate, UtilsExtension::Reconnect);
- } else if (name->Equals(context,
- v8::String::NewFromUtf8(isolate, "disconnect",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(isolate, UtilsExtension::Disconnect);
- } else if (name->Equals(context, v8::String::NewFromUtf8(
- isolate, "createContextGroup",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(isolate,
- UtilsExtension::CreateContextGroup);
- }
- return v8::Local<v8::FunctionTemplate>();
+ CreateContextGroupTask(v8::base::Semaphore* ready_semaphore,
+ int* context_group_id)
+ : ready_semaphore_(ready_semaphore),
+ context_group_id_(context_group_id) {}
+ virtual ~CreateContextGroupTask() = default;
+ bool is_inspector_task() final { return true; }
+
+ private:
+ void Run() override {
+ *context_group_id_ = data()->CreateContextGroup();
+ if (ready_semaphore_) ready_semaphore_->Signal();
}
- static void set_backend_task_runner(TaskRunner* runner) {
- backend_runner_ = runner;
+ v8::base::Semaphore* ready_semaphore_;
+ int* context_group_id_;
+};
+
+class ConnectSessionTask : public TaskRunner::Task {
+ public:
+ ConnectSessionTask(v8::base::Semaphore* ready_semaphore, int context_group_id,
+ const v8::internal::Vector<uint16_t>& state,
+ int* session_id)
+ : ready_semaphore_(ready_semaphore),
+ context_group_id_(context_group_id),
+ state_(state),
+ session_id_(session_id) {}
+ virtual ~ConnectSessionTask() = default;
+ bool is_inspector_task() final { return true; }
+
+ private:
+ void Run() override {
+ v8_inspector::StringView state(state_.start(), state_.length());
+ *session_id_ =
+ data()->inspector()->ConnectSession(context_group_id_, state);
+ if (ready_semaphore_) ready_semaphore_->Signal();
+ }
+
+ v8::base::Semaphore* ready_semaphore_;
+ int context_group_id_;
+ const v8::internal::Vector<uint16_t>& state_;
+ int* session_id_;
+};
+
+class DisconnectSessionTask : public TaskRunner::Task {
+ public:
+ DisconnectSessionTask(v8::base::Semaphore* ready_semaphore, int session_id,
+ v8::internal::Vector<uint16_t>* state)
+ : ready_semaphore_(ready_semaphore),
+ session_id_(session_id),
+ state_(state) {}
+ virtual ~DisconnectSessionTask() = default;
+ bool is_inspector_task() final { return true; }
+
+ private:
+ void Run() override {
+ std::unique_ptr<v8_inspector::StringBuffer> state =
+ data()->inspector()->DisconnectSession(session_id_);
+ *state_ = ToVector(state->string());
+ if (ready_semaphore_) ready_semaphore_->Signal();
+ }
+
+ v8::base::Semaphore* ready_semaphore_;
+ int session_id_;
+ v8::internal::Vector<uint16_t>* state_;
+};
+
+class SendMessageToBackendTask : public TaskRunner::Task {
+ public:
+ explicit SendMessageToBackendTask(
+ int session_id, const v8::internal::Vector<uint16_t>& message)
+ : session_id_(session_id), message_(message) {}
+ bool is_inspector_task() final { return true; }
+
+ private:
+ void Run() override {
+ v8_inspector::StringView message_view(message_.start(), message_.length());
+ data()->inspector()->SendMessage(session_id_, message_view);
+ }
+
+ int session_id_;
+ v8::internal::Vector<uint16_t> message_;
+};
+
+class SchedulePauseOnNextStatementTask : public TaskRunner::Task {
+ public:
+ SchedulePauseOnNextStatementTask(
+ v8::base::Semaphore* ready_semaphore, int context_group_id,
+ const v8::internal::Vector<uint16_t>& reason,
+ const v8::internal::Vector<uint16_t>& details)
+ : ready_semaphore_(ready_semaphore),
+ context_group_id_(context_group_id),
+ reason_(reason),
+ details_(details) {}
+ virtual ~SchedulePauseOnNextStatementTask() = default;
+ bool is_inspector_task() final { return true; }
+
+ private:
+ void Run() override {
+ v8_inspector::StringView reason(reason_.start(), reason_.length());
+ v8_inspector::StringView details(details_.start(), details_.length());
+ data()->inspector()->SchedulePauseOnNextStatement(context_group_id_, reason,
+ details);
+ if (ready_semaphore_) ready_semaphore_->Signal();
}
- static void set_inspector_client(InspectorClientImpl* client) {
- inspector_client_ = client;
+ v8::base::Semaphore* ready_semaphore_;
+ int context_group_id_;
+ const v8::internal::Vector<uint16_t>& reason_;
+ const v8::internal::Vector<uint16_t>& details_;
+};
+
+class CancelPauseOnNextStatementTask : public TaskRunner::Task {
+ public:
+ CancelPauseOnNextStatementTask(v8::base::Semaphore* ready_semaphore,
+ int context_group_id)
+ : ready_semaphore_(ready_semaphore),
+ context_group_id_(context_group_id) {}
+ virtual ~CancelPauseOnNextStatementTask() = default;
+ bool is_inspector_task() final { return true; }
+
+ private:
+ void Run() override {
+ data()->inspector()->CancelPauseOnNextStatement(context_group_id_);
+ if (ready_semaphore_) ready_semaphore_->Signal();
+ }
+
+ v8::base::Semaphore* ready_semaphore_;
+ int context_group_id_;
+};
+
+class SendMessageToFrontendTask : public TaskRunner::Task {
+ public:
+ SendMessageToFrontendTask(int context_group_id, int session_id,
+ const v8::internal::Vector<uint16_t>& message)
+ : context_group_id_(context_group_id),
+ session_id_(session_id),
+ message_(message) {}
+ virtual ~SendMessageToFrontendTask() {}
+
+ bool is_inspector_task() final { return false; }
+
+ static void Register(int session_id, v8::Isolate* isolate,
+ v8::Local<v8::Function> dispatcher) {
+ dispatchers_[session_id].Reset(isolate, dispatcher);
+ }
+
+ static void Unregister(int session_id) { dispatchers_.erase(session_id); }
+
+ private:
+ void Run() override {
+ v8::MicrotasksScope microtasks_scope(isolate(),
+ v8::MicrotasksScope::kRunMicrotasks);
+ v8::HandleScope handle_scope(isolate());
+ v8::Local<v8::Context> context = data()->GetContext(context_group_id_);
+ v8::Context::Scope context_scope(context);
+
+ if (dispatchers_.find(session_id_) == dispatchers_.end()) return;
+ v8::Local<v8::Function> function = dispatchers_[session_id_].Get(isolate());
+ v8::Local<v8::Value> message =
+ v8::String::NewFromTwoByte(isolate(), message_.start(),
+ v8::NewStringType::kNormal,
+ static_cast<int>(message_.size()))
+ .ToLocalChecked();
+ v8::MaybeLocal<v8::Value> result;
+ result = function->Call(context, context->Global(), 1, &message);
+ }
+
+ static std::map<int, v8::Global<v8::Function>> dispatchers_;
+ int context_group_id_;
+ int session_id_;
+ v8::internal::Vector<uint16_t> message_;
+};
+
+std::map<int, v8::Global<v8::Function>> SendMessageToFrontendTask::dispatchers_;
+
+class UtilsExtension : public IsolateData::SetupGlobalTask {
+ public:
+ ~UtilsExtension() override = default;
+ void Run(v8::Isolate* isolate,
+ v8::Local<v8::ObjectTemplate> global) override {
+ v8::Local<v8::ObjectTemplate> utils = v8::ObjectTemplate::New(isolate);
+ utils->Set(ToV8String(isolate, "print"),
+ v8::FunctionTemplate::New(isolate, &UtilsExtension::Print));
+ utils->Set(ToV8String(isolate, "quit"),
+ v8::FunctionTemplate::New(isolate, &UtilsExtension::Quit));
+ utils->Set(ToV8String(isolate, "setlocale"),
+ v8::FunctionTemplate::New(isolate, &UtilsExtension::Setlocale));
+ utils->Set(ToV8String(isolate, "read"),
+ v8::FunctionTemplate::New(isolate, &UtilsExtension::Read));
+ utils->Set(ToV8String(isolate, "load"),
+ v8::FunctionTemplate::New(isolate, &UtilsExtension::Load));
+ utils->Set(ToV8String(isolate, "compileAndRunWithOrigin"),
+ v8::FunctionTemplate::New(
+ isolate, &UtilsExtension::CompileAndRunWithOrigin));
+ utils->Set(ToV8String(isolate, "setCurrentTimeMSForTest"),
+ v8::FunctionTemplate::New(
+ isolate, &UtilsExtension::SetCurrentTimeMSForTest));
+ utils->Set(ToV8String(isolate, "setMemoryInfoForTest"),
+ v8::FunctionTemplate::New(
+ isolate, &UtilsExtension::SetMemoryInfoForTest));
+ utils->Set(ToV8String(isolate, "schedulePauseOnNextStatement"),
+ v8::FunctionTemplate::New(
+ isolate, &UtilsExtension::SchedulePauseOnNextStatement));
+ utils->Set(ToV8String(isolate, "cancelPauseOnNextStatement"),
+ v8::FunctionTemplate::New(
+ isolate, &UtilsExtension::CancelPauseOnNextStatement));
+ utils->Set(ToV8String(isolate, "setLogConsoleApiMessageCalls"),
+ v8::FunctionTemplate::New(
+ isolate, &UtilsExtension::SetLogConsoleApiMessageCalls));
+ utils->Set(ToV8String(isolate, "createContextGroup"),
+ v8::FunctionTemplate::New(isolate,
+ &UtilsExtension::CreateContextGroup));
+ utils->Set(
+ ToV8String(isolate, "connectSession"),
+ v8::FunctionTemplate::New(isolate, &UtilsExtension::ConnectSession));
+ utils->Set(
+ ToV8String(isolate, "disconnectSession"),
+ v8::FunctionTemplate::New(isolate, &UtilsExtension::DisconnectSession));
+ utils->Set(ToV8String(isolate, "sendMessageToBackend"),
+ v8::FunctionTemplate::New(
+ isolate, &UtilsExtension::SendMessageToBackend));
+ global->Set(ToV8String(isolate, "utils"), utils);
+ }
+
+ static void set_backend_task_runner(TaskRunner* runner) {
+ backend_runner_ = runner;
}
private:
static TaskRunner* backend_runner_;
- static InspectorClientImpl* inspector_client_;
static void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
for (int i = 0; i < args.Length(); i++) {
@@ -201,7 +332,7 @@ class UtilsExtension : public v8::Extension {
static void Quit(const v8::FunctionCallbackInfo<v8::Value>& args) { Exit(); }
- static void SetLocale(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ static void Setlocale(const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 1 || !args[0]->IsString()) {
fprintf(stderr, "Internal error: setlocale get one string argument.");
Exit();
@@ -248,27 +379,31 @@ class UtilsExtension : public v8::Extension {
}
v8::internal::Vector<const char> chars;
v8::Isolate* isolate = args.GetIsolate();
+ v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ int context_group_id = data->GetContextGroupId(context);
if (ReadFile(isolate, args[0], &chars)) {
- ExecuteStringTask task(chars);
- v8::Global<v8::Context> context(isolate, isolate->GetCurrentContext());
- task.Run(isolate, context);
+ ExecuteStringTask(chars, context_group_id).RunOnIsolate(data);
}
}
static void CompileAndRunWithOrigin(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (args.Length() != 5 || !args[0]->IsString() || !args[1]->IsString() ||
- !args[2]->IsInt32() || !args[3]->IsInt32() || !args[4]->IsBoolean()) {
+ if (args.Length() != 6 || !args[0]->IsInt32() || !args[1]->IsString() ||
+ !args[2]->IsString() || !args[3]->IsInt32() || !args[4]->IsInt32() ||
+ !args[5]->IsBoolean()) {
fprintf(stderr,
- "Internal error: compileAndRunWithOrigin(source, name, line, "
+ "Internal error: compileAndRunWithOrigin(context_group_id, "
+ "source, name, line, "
"column, is_module).");
Exit();
}
backend_runner_->Append(new ExecuteStringTask(
- ToVector(args[0].As<v8::String>()), args[1].As<v8::String>(),
- args[2].As<v8::Int32>(), args[3].As<v8::Int32>(),
- args[4].As<v8::Boolean>(), nullptr, nullptr));
+ nullptr, args[0].As<v8::Int32>()->Value(), nullptr,
+ ToVector(args[1].As<v8::String>()), args[2].As<v8::String>(),
+ args[3].As<v8::Int32>(), args[4].As<v8::Int32>(),
+ args[5].As<v8::Boolean>()));
}
static void SetCurrentTimeMSForTest(
@@ -277,7 +412,7 @@ class UtilsExtension : public v8::Extension {
fprintf(stderr, "Internal error: setCurrentTimeMSForTest(time).");
Exit();
}
- inspector_client_->setCurrentTimeMSForTest(
+ backend_runner_->data()->inspector()->SetCurrentTimeMSForTest(
args[0].As<v8::Number>()->Value());
}
@@ -287,112 +422,153 @@ class UtilsExtension : public v8::Extension {
fprintf(stderr, "Internal error: setMemoryInfoForTest(value).");
Exit();
}
- inspector_client_->setMemoryInfoForTest(args[0]);
+ backend_runner_->data()->inspector()->SetMemoryInfoForTest(args[0]);
}
static void SchedulePauseOnNextStatement(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (args.Length() != 2 || !args[0]->IsString() || !args[1]->IsString()) {
- fprintf(
- stderr,
- "Internal error: schedulePauseOnNextStatement('reason', 'details').");
+ if (args.Length() != 3 || !args[0]->IsInt32() || !args[1]->IsString() ||
+ !args[2]->IsString()) {
+ fprintf(stderr,
+ "Internal error: schedulePauseOnNextStatement(context_group_id, "
+ "'reason', 'details').");
Exit();
}
- v8::internal::Vector<uint16_t> reason = ToVector(args[0].As<v8::String>());
- v8_inspector::StringView reason_view(reason.start(), reason.length());
- v8::internal::Vector<uint16_t> details = ToVector(args[1].As<v8::String>());
- v8_inspector::StringView details_view(details.start(), details.length());
- inspector_client_->session()->schedulePauseOnNextStatement(reason_view,
- details_view);
+ v8::internal::Vector<uint16_t> reason = ToVector(args[1].As<v8::String>());
+ v8::internal::Vector<uint16_t> details = ToVector(args[2].As<v8::String>());
+ v8::base::Semaphore ready_semaphore(0);
+ backend_runner_->Append(new SchedulePauseOnNextStatementTask(
+ &ready_semaphore, args[0].As<v8::Int32>()->Value(), reason, details));
+ ready_semaphore.Wait();
}
static void CancelPauseOnNextStatement(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (args.Length() != 0) {
- fprintf(stderr, "Internal error: cancelPauseOnNextStatement().");
+ if (args.Length() != 1 || !args[0]->IsInt32()) {
+ fprintf(stderr,
+ "Internal error: cancelPauseOnNextStatement(context_group_id).");
+ Exit();
+ }
+ v8::base::Semaphore ready_semaphore(0);
+ backend_runner_->Append(new CancelPauseOnNextStatementTask(
+ &ready_semaphore, args[0].As<v8::Int32>()->Value()));
+ ready_semaphore.Wait();
+ }
+
+ static void SetLogConsoleApiMessageCalls(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 1 || !args[0]->IsBoolean()) {
+ fprintf(stderr, "Internal error: setLogConsoleApiMessageCalls(bool).");
Exit();
}
- inspector_client_->session()->cancelPauseOnNextStatement();
+ backend_runner_->data()->inspector()->SetLogConsoleApiMessageCalls(
+ args[0].As<v8::Boolean>()->Value());
}
- static void Reconnect(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ static void CreateContextGroup(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 0) {
- fprintf(stderr, "Internal error: reconnect().");
+ fprintf(stderr, "Internal error: createContextGroup().");
Exit();
}
v8::base::Semaphore ready_semaphore(0);
- inspector_client_->scheduleReconnect(&ready_semaphore);
+ int context_group_id = 0;
+ backend_runner_->Append(
+ new CreateContextGroupTask(&ready_semaphore, &context_group_id));
ready_semaphore.Wait();
+ args.GetReturnValue().Set(
+ v8::Int32::New(args.GetIsolate(), context_group_id));
}
- static void Disconnect(const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (args.Length() != 0) {
- fprintf(stderr, "Internal error: disconnect().");
+ static void ConnectSession(const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 3 || !args[0]->IsInt32() || !args[1]->IsString() ||
+ !args[2]->IsFunction()) {
+ fprintf(stderr,
+ "Internal error: connectionSession(context_group_id, state, "
+ "dispatch).");
Exit();
}
+ v8::internal::Vector<uint16_t> state = ToVector(args[1].As<v8::String>());
v8::base::Semaphore ready_semaphore(0);
- inspector_client_->scheduleDisconnect(&ready_semaphore);
+ int session_id = 0;
+ backend_runner_->Append(new ConnectSessionTask(
+ &ready_semaphore, args[0].As<v8::Int32>()->Value(), state,
+ &session_id));
ready_semaphore.Wait();
+ SendMessageToFrontendTask::Register(session_id, args.GetIsolate(),
+ args[2].As<v8::Function>());
+ args.GetReturnValue().Set(v8::Int32::New(args.GetIsolate(), session_id));
}
- static void CreateContextGroup(
+ static void DisconnectSession(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- if (args.Length() != 0) {
- fprintf(stderr, "Internal error: createContextGroup().");
+ if (args.Length() != 1 || !args[0]->IsInt32()) {
+ fprintf(stderr, "Internal error: disconnectionSession(session_id).");
Exit();
}
- const char* backend_extensions[] = {"v8_inspector/setTimeout",
- "v8_inspector/inspector"};
- v8::ExtensionConfiguration backend_configuration(
- arraysize(backend_extensions), backend_extensions);
+ int session_id = args[0].As<v8::Int32>()->Value();
+ SendMessageToFrontendTask::Unregister(session_id);
v8::base::Semaphore ready_semaphore(0);
- int context_group_id = 0;
- inspector_client_->scheduleCreateContextGroup(
- &backend_configuration, &ready_semaphore, &context_group_id);
+ v8::internal::Vector<uint16_t> state;
+ backend_runner_->Append(
+ new DisconnectSessionTask(&ready_semaphore, session_id, &state));
ready_semaphore.Wait();
args.GetReturnValue().Set(
- v8::Int32::New(args.GetIsolate(), context_group_id));
+ v8::String::NewFromTwoByte(args.GetIsolate(), state.start(),
+ v8::NewStringType::kNormal,
+ static_cast<int>(state.size()))
+ .ToLocalChecked());
+ }
+
+ static void SendMessageToBackend(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 2 || !args[0]->IsInt32() || !args[1]->IsString()) {
+ fprintf(stderr,
+ "Internal error: sendMessageToBackend(session_id, message).");
+ Exit();
+ }
+ backend_runner_->Append(new SendMessageToBackendTask(
+ args[0].As<v8::Int32>()->Value(), ToVector(args[1].As<v8::String>())));
}
};
TaskRunner* UtilsExtension::backend_runner_ = nullptr;
-InspectorClientImpl* UtilsExtension::inspector_client_ = nullptr;
class SetTimeoutTask : public AsyncTask {
public:
- SetTimeoutTask(v8::Isolate* isolate, v8::Local<v8::Function> function,
- const char* task_name, v8_inspector::V8Inspector* inspector)
- : AsyncTask(task_name, inspector), function_(isolate, function) {}
+ SetTimeoutTask(IsolateData* data, int context_group_id, const char* task_name,
+ v8::Local<v8::Function> function)
+ : AsyncTask(data, task_name),
+ function_(data->isolate(), function),
+ context_group_id_(context_group_id) {}
virtual ~SetTimeoutTask() {}
bool is_inspector_task() final { return false; }
- void AsyncRun(v8::Isolate* isolate,
- const v8::Global<v8::Context>& global_context) override {
- v8::MicrotasksScope microtasks_scope(isolate,
+ private:
+ void AsyncRun() override {
+ v8::MicrotasksScope microtasks_scope(isolate(),
v8::MicrotasksScope::kRunMicrotasks);
- v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Context> context = global_context.Get(isolate);
+ v8::HandleScope handle_scope(isolate());
+ v8::Local<v8::Context> context = data()->GetContext(context_group_id_);
v8::Context::Scope context_scope(context);
- v8::Local<v8::Function> function = function_.Get(isolate);
+ v8::Local<v8::Function> function = function_.Get(isolate());
v8::MaybeLocal<v8::Value> result;
result = function->Call(context, context->Global(), 0, nullptr);
}
- private:
v8::Global<v8::Function> function_;
+ int context_group_id_;
};
-class SetTimeoutExtension : public v8::Extension {
+class SetTimeoutExtension : public IsolateData::SetupGlobalTask {
public:
- SetTimeoutExtension()
- : v8::Extension("v8_inspector/setTimeout",
- "native function setTimeout();") {}
-
- virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Local<v8::String> name) {
- return v8::FunctionTemplate::New(isolate, SetTimeoutExtension::SetTimeout);
+ void Run(v8::Isolate* isolate,
+ v8::Local<v8::ObjectTemplate> global) override {
+ global->Set(
+ ToV8String(isolate, "setTimeout"),
+ v8::FunctionTemplate::New(isolate, &SetTimeoutExtension::SetTimeout));
}
private:
@@ -400,26 +576,27 @@ class SetTimeoutExtension : public v8::Extension {
if (args.Length() != 2 || !args[1]->IsNumber() ||
(!args[0]->IsFunction() && !args[0]->IsString()) ||
args[1].As<v8::Number>()->Value() != 0.0) {
- fprintf(stderr,
- "Internal error: only setTimeout(function, 0) is supported.");
+ fprintf(
+ stderr,
+ "Internal error: only setTimeout(function|code, 0) is supported.");
Exit();
}
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::Context> context = isolate->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ int context_group_id = data->GetContextGroupId(context);
std::unique_ptr<TaskRunner::Task> task;
- v8_inspector::V8Inspector* inspector =
- InspectorClientImpl::InspectorFromContext(context);
if (args[0]->IsFunction()) {
- task.reset(new SetTimeoutTask(isolate,
- v8::Local<v8::Function>::Cast(args[0]),
- "setTimeout", inspector));
+ task.reset(new SetTimeoutTask(data, context_group_id, "setTimeout",
+ v8::Local<v8::Function>::Cast(args[0])));
} else {
task.reset(new ExecuteStringTask(
+ data, context_group_id, "setTimeout",
ToVector(args[0].As<v8::String>()), v8::String::Empty(isolate),
v8::Integer::New(isolate, 0), v8::Integer::New(isolate, 0),
- v8::Boolean::New(isolate, false), "setTimeout", inspector));
+ v8::Boolean::New(isolate, false)));
}
- TaskRunner::FromContext(context)->Append(task.release());
+ data->task_runner()->Append(task.release());
}
};
@@ -430,96 +607,55 @@ bool StrictAccessCheck(v8::Local<v8::Context> accessing_context,
return accessing_context.IsEmpty();
}
-class InspectorExtension : public v8::Extension {
+class InspectorExtension : public IsolateData::SetupGlobalTask {
public:
- InspectorExtension()
- : v8::Extension("v8_inspector/inspector",
- "native function attachInspector();"
- "native function detachInspector();"
- "native function setMaxAsyncTaskStacks();"
- "native function breakProgram();"
- "native function createObjectWithStrictCheck();"
- "native function callWithScheduledBreak();"
- "native function allowAccessorFormatting();") {}
-
- virtual v8::Local<v8::FunctionTemplate> GetNativeFunctionTemplate(
- v8::Isolate* isolate, v8::Local<v8::String> name) {
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
- if (name->Equals(context,
- v8::String::NewFromUtf8(isolate, "attachInspector",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(isolate, InspectorExtension::Attach);
- } else if (name->Equals(context,
- v8::String::NewFromUtf8(isolate, "detachInspector",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(isolate, InspectorExtension::Detach);
- } else if (name->Equals(context, v8::String::NewFromUtf8(
- isolate, "setMaxAsyncTaskStacks",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(
- isolate, InspectorExtension::SetMaxAsyncTaskStacks);
- } else if (name->Equals(context,
- v8::String::NewFromUtf8(isolate, "breakProgram",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(isolate,
- InspectorExtension::BreakProgram);
- } else if (name->Equals(context, v8::String::NewFromUtf8(
- isolate, "createObjectWithStrictCheck",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(
- isolate, InspectorExtension::CreateObjectWithStrictCheck);
- } else if (name->Equals(context, v8::String::NewFromUtf8(
- isolate, "callWithScheduledBreak",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(
- isolate, InspectorExtension::CallWithScheduledBreak);
- } else if (name->Equals(context, v8::String::NewFromUtf8(
- isolate, "allowAccessorFormatting",
- v8::NewStringType::kNormal)
- .ToLocalChecked())
- .FromJust()) {
- return v8::FunctionTemplate::New(
- isolate, InspectorExtension::AllowAccessorFormatting);
- }
- return v8::Local<v8::FunctionTemplate>();
+ ~InspectorExtension() override = default;
+ void Run(v8::Isolate* isolate,
+ v8::Local<v8::ObjectTemplate> global) override {
+ v8::Local<v8::ObjectTemplate> inspector = v8::ObjectTemplate::New(isolate);
+ inspector->Set(ToV8String(isolate, "fireContextCreated"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::FireContextCreated));
+ inspector->Set(ToV8String(isolate, "fireContextDestroyed"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::FireContextDestroyed));
+ inspector->Set(ToV8String(isolate, "setMaxAsyncTaskStacks"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::SetMaxAsyncTaskStacks));
+ inspector->Set(
+ ToV8String(isolate, "dumpAsyncTaskStacksStateForTest"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::DumpAsyncTaskStacksStateForTest));
+ inspector->Set(
+ ToV8String(isolate, "breakProgram"),
+ v8::FunctionTemplate::New(isolate, &InspectorExtension::BreakProgram));
+ inspector->Set(
+ ToV8String(isolate, "createObjectWithStrictCheck"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::CreateObjectWithStrictCheck));
+ inspector->Set(ToV8String(isolate, "callWithScheduledBreak"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::CallWithScheduledBreak));
+ inspector->Set(ToV8String(isolate, "allowAccessorFormatting"),
+ v8::FunctionTemplate::New(
+ isolate, &InspectorExtension::AllowAccessorFormatting));
+ global->Set(ToV8String(isolate, "inspector"), inspector);
}
private:
- static void Attach(const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::Isolate* isolate = args.GetIsolate();
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
- v8_inspector::V8Inspector* inspector =
- InspectorClientImpl::InspectorFromContext(context);
- if (!inspector) {
- fprintf(stderr, "Inspector client not found - cannot attach!");
- Exit();
- }
- inspector->contextCreated(
- v8_inspector::V8ContextInfo(context, 1, v8_inspector::StringView()));
+ static void FireContextCreated(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ data->inspector()->ContextCreated(context,
+ data->GetContextGroupId(context));
}
- static void Detach(const v8::FunctionCallbackInfo<v8::Value>& args) {
- v8::Isolate* isolate = args.GetIsolate();
- v8::Local<v8::Context> context = isolate->GetCurrentContext();
- v8_inspector::V8Inspector* inspector =
- InspectorClientImpl::InspectorFromContext(context);
- if (!inspector) {
- fprintf(stderr, "Inspector client not found - cannot detach!");
- Exit();
- }
- inspector->contextDestroyed(context);
+ static void FireContextDestroyed(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ data->inspector()->ContextDestroyed(context);
}
static void SetMaxAsyncTaskStacks(
@@ -528,12 +664,23 @@ class InspectorExtension : public v8::Extension {
fprintf(stderr, "Internal error: setMaxAsyncTaskStacks(max).");
Exit();
}
- v8_inspector::V8Inspector* inspector =
- InspectorClientImpl::InspectorFromContext(
- args.GetIsolate()->GetCurrentContext());
- CHECK(inspector);
v8_inspector::SetMaxAsyncTaskStacksForTest(
- inspector, args[0].As<v8::Int32>()->Value());
+ IsolateData::FromContext(args.GetIsolate()->GetCurrentContext())
+ ->inspector()
+ ->inspector(),
+ args[0].As<v8::Int32>()->Value());
+ }
+
+ static void DumpAsyncTaskStacksStateForTest(
+ const v8::FunctionCallbackInfo<v8::Value>& args) {
+ if (args.Length() != 0) {
+ fprintf(stderr, "Internal error: dumpAsyncTaskStacksStateForTest().");
+ Exit();
+ }
+ v8_inspector::DumpAsyncTaskStacksStateForTest(
+ IsolateData::FromContext(args.GetIsolate()->GetCurrentContext())
+ ->inspector()
+ ->inspector());
}
static void BreakProgram(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -541,16 +688,14 @@ class InspectorExtension : public v8::Extension {
fprintf(stderr, "Internal error: breakProgram('reason', 'details').");
Exit();
}
- v8_inspector::V8InspectorSession* session =
- InspectorClientImpl::SessionFromContext(
- args.GetIsolate()->GetCurrentContext());
- CHECK(session);
-
+ v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
v8::internal::Vector<uint16_t> reason = ToVector(args[0].As<v8::String>());
v8_inspector::StringView reason_view(reason.start(), reason.length());
v8::internal::Vector<uint16_t> details = ToVector(args[1].As<v8::String>());
v8_inspector::StringView details_view(details.start(), details.length());
- session->breakProgram(reason_view, details_view);
+ data->inspector()->BreakProgram(data->GetContextGroupId(context),
+ reason_view, details_view);
}
static void CreateObjectWithStrictCheck(
@@ -571,24 +716,23 @@ class InspectorExtension : public v8::Extension {
const v8::FunctionCallbackInfo<v8::Value>& args) {
if (args.Length() != 3 || !args[0]->IsFunction() || !args[1]->IsString() ||
!args[2]->IsString()) {
- fprintf(stderr, "Internal error: breakProgram('reason', 'details').");
+ fprintf(stderr,
+ "Internal error: callWithScheduledBreak('reason', 'details').");
Exit();
}
- v8_inspector::V8InspectorSession* session =
- InspectorClientImpl::SessionFromContext(
- args.GetIsolate()->GetCurrentContext());
- CHECK(session);
-
v8::internal::Vector<uint16_t> reason = ToVector(args[1].As<v8::String>());
v8_inspector::StringView reason_view(reason.start(), reason.length());
v8::internal::Vector<uint16_t> details = ToVector(args[2].As<v8::String>());
v8_inspector::StringView details_view(details.start(), details.length());
- session->schedulePauseOnNextStatement(reason_view, details_view);
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
+ IsolateData* data = IsolateData::FromContext(context);
+ int context_group_id = data->GetContextGroupId(context);
+ data->inspector()->SchedulePauseOnNextStatement(context_group_id,
+ reason_view, details_view);
v8::MaybeLocal<v8::Value> result;
result = args[0].As<v8::Function>()->Call(context, context->Global(), 0,
nullptr);
- session->cancelPauseOnNextStatement();
+ data->inspector()->CancelPauseOnNextStatement(context_group_id);
}
static void AllowAccessorFormatting(
@@ -610,50 +754,22 @@ class InspectorExtension : public v8::Extension {
}
};
-v8::Local<v8::String> ToString(v8::Isolate* isolate,
- const v8_inspector::StringView& string) {
- if (string.is8Bit())
- return v8::String::NewFromOneByte(isolate, string.characters8(),
- v8::NewStringType::kNormal,
- static_cast<int>(string.length()))
- .ToLocalChecked();
- else
- return v8::String::NewFromTwoByte(isolate, string.characters16(),
- v8::NewStringType::kNormal,
- static_cast<int>(string.length()))
- .ToLocalChecked();
-}
-
class FrontendChannelImpl : public InspectorClientImpl::FrontendChannel {
public:
- explicit FrontendChannelImpl(TaskRunner* frontend_task_runner)
- : frontend_task_runner_(frontend_task_runner) {}
+ FrontendChannelImpl(TaskRunner* frontend_task_runner, int context_group_id)
+ : frontend_task_runner_(frontend_task_runner),
+ context_group_id_(context_group_id) {}
virtual ~FrontendChannelImpl() {}
- void SendMessageToFrontend(const v8_inspector::StringView& message) final {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
- v8::HandleScope scope(v8::Isolate::GetCurrent());
-
- v8::Local<v8::String> prefix =
- v8::String::NewFromUtf8(isolate, "InspectorTest._dispatchMessage(",
- v8::NewStringType::kInternalized)
- .ToLocalChecked();
- v8::Local<v8::String> message_string = ToString(isolate, message);
- v8::Local<v8::String> suffix =
- v8::String::NewFromUtf8(isolate, ")", v8::NewStringType::kInternalized)
- .ToLocalChecked();
-
- v8::Local<v8::String> result = v8::String::Concat(prefix, message_string);
- result = v8::String::Concat(result, suffix);
-
- frontend_task_runner_->Append(new ExecuteStringTask(
- ToVector(result), v8::String::Empty(isolate),
- v8::Integer::New(isolate, 0), v8::Integer::New(isolate, 0),
- v8::Boolean::New(isolate, false), nullptr, nullptr));
+ void SendMessageToFrontend(int session_id,
+ const v8_inspector::StringView& message) final {
+ frontend_task_runner_->Append(new SendMessageToFrontendTask(
+ context_group_id_, session_id, ToVector(message)));
}
private:
TaskRunner* frontend_task_runner_;
+ int context_group_id_;
};
} // namespace
@@ -666,45 +782,46 @@ int main(int argc, char* argv[]) {
v8::V8::InitializeExternalStartupData(argv[0]);
v8::V8::Initialize();
- SetTimeoutExtension set_timeout_extension;
- v8::RegisterExtension(&set_timeout_extension);
- InspectorExtension inspector_extension;
- v8::RegisterExtension(&inspector_extension);
- UtilsExtension utils_extension;
- v8::RegisterExtension(&utils_extension);
- SendMessageToBackendExtension send_message_to_backend_extension;
- v8::RegisterExtension(&send_message_to_backend_extension);
-
v8::base::Semaphore ready_semaphore(0);
- const char* backend_extensions[] = {"v8_inspector/setTimeout",
- "v8_inspector/inspector"};
- v8::ExtensionConfiguration backend_configuration(
- arraysize(backend_extensions), backend_extensions);
- TaskRunner backend_runner(&backend_configuration, false, &ready_semaphore);
+ v8::StartupData startup_data = {nullptr, 0};
+ for (int i = 1; i < argc; ++i) {
+ if (strcmp(argv[i], "--embed") == 0) {
+ argv[i++] = nullptr;
+ printf("Embedding script '%s'\n", argv[i]);
+ startup_data = v8::V8::CreateSnapshotDataBlob(argv[i]);
+ argv[i] = nullptr;
+ }
+ }
+
+ IsolateData::SetupGlobalTasks frontend_extensions;
+ frontend_extensions.emplace_back(new UtilsExtension());
+ TaskRunner frontend_runner(std::move(frontend_extensions), true,
+ &ready_semaphore, nullptr, nullptr);
ready_semaphore.Wait();
- SendMessageToBackendExtension::set_backend_task_runner(&backend_runner);
- UtilsExtension::set_backend_task_runner(&backend_runner);
- const char* frontend_extensions[] = {"v8_inspector/utils",
- "v8_inspector/frontend"};
- v8::ExtensionConfiguration frontend_configuration(
- arraysize(frontend_extensions), frontend_extensions);
- TaskRunner frontend_runner(&frontend_configuration, true, &ready_semaphore);
+ int frontend_context_group_id = 0;
+ frontend_runner.Append(
+ new CreateContextGroupTask(&ready_semaphore, &frontend_context_group_id));
ready_semaphore.Wait();
- FrontendChannelImpl frontend_channel(&frontend_runner);
- InspectorClientImpl inspector_client(&backend_runner, &frontend_channel,
- &ready_semaphore);
+ IsolateData::SetupGlobalTasks backend_extensions;
+ backend_extensions.emplace_back(new SetTimeoutExtension());
+ backend_extensions.emplace_back(new InspectorExtension());
+ FrontendChannelImpl frontend_channel(&frontend_runner,
+ frontend_context_group_id);
+ TaskRunner backend_runner(
+ std::move(backend_extensions), false, &ready_semaphore,
+ startup_data.data ? &startup_data : nullptr, &frontend_channel);
ready_semaphore.Wait();
- UtilsExtension::set_inspector_client(&inspector_client);
+ UtilsExtension::set_backend_task_runner(&backend_runner);
task_runners.push_back(&frontend_runner);
task_runners.push_back(&backend_runner);
for (int i = 1; i < argc; ++i) {
// Ignore unknown flags.
- if (argv[i][0] == '-') continue;
+ if (argv[i] == nullptr || argv[i][0] == '-') continue;
bool exists = false;
v8::internal::Vector<const char> chars =
@@ -714,10 +831,13 @@ int main(int argc, char* argv[]) {
argv[i]);
Exit();
}
- frontend_runner.Append(new ExecuteStringTask(chars));
+ frontend_runner.Append(
+ new ExecuteStringTask(chars, frontend_context_group_id));
}
frontend_runner.Join();
backend_runner.Join();
+
+ delete startup_data.data;
return 0;
}
diff --git a/deps/v8/test/inspector/inspector.gyp b/deps/v8/test/inspector/inspector.gyp
index 8c96ae5d75..26f44d4812 100644
--- a/deps/v8/test/inspector/inspector.gyp
+++ b/deps/v8/test/inspector/inspector.gyp
@@ -23,9 +23,19 @@
'inspector-impl.cc',
'inspector-impl.h',
'inspector-test.cc',
+ 'isolate-data.cc',
+ 'isolate-data.h',
'task-runner.cc',
'task-runner.h',
],
+ 'conditions': [
+ ['v8_enable_i18n_support==1', {
+ 'dependencies': [
+ '<(icu_gyp_path):icui18n',
+ '<(icu_gyp_path):icuuc',
+ ],
+ }],
+ ],
},
],
'conditions': [
diff --git a/deps/v8/test/inspector/inspector.status b/deps/v8/test/inspector/inspector.status
index cec99e0779..5eb0a30ec6 100644
--- a/deps/v8/test/inspector/inspector.status
+++ b/deps/v8/test/inspector/inspector.status
@@ -21,6 +21,12 @@
}], # variant != default
##############################################################################
+['variant == noturbofan', {
+ # Crashes due to missing source position in ToBooleanICStub?
+ 'runtime/command-line-api': [SKIP],
+}], # variant == noturbofan
+
+##############################################################################
['variant == wasm_traps', {
'*': [SKIP],
}], # variant == wasm_traps
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
new file mode 100644
index 0000000000..927bd31ef4
--- /dev/null
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -0,0 +1,95 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/inspector/isolate-data.h"
+
+#include "test/inspector/inspector-impl.h"
+#include "test/inspector/task-runner.h"
+
+namespace {
+
+const int kIsolateDataIndex = 2;
+const int kContextGroupIdIndex = 3;
+
+v8::internal::Vector<uint16_t> ToVector(v8::Local<v8::String> str) {
+ v8::internal::Vector<uint16_t> buffer =
+ v8::internal::Vector<uint16_t>::New(str->Length());
+ str->Write(buffer.start(), 0, str->Length());
+ return buffer;
+}
+
+} // namespace
+
+IsolateData::IsolateData(TaskRunner* task_runner,
+ IsolateData::SetupGlobalTasks setup_global_tasks,
+ v8::StartupData* startup_data,
+ InspectorClientImpl::FrontendChannel* channel)
+ : task_runner_(task_runner),
+ setup_global_tasks_(std::move(setup_global_tasks)) {
+ v8::Isolate::CreateParams params;
+ params.array_buffer_allocator =
+ v8::ArrayBuffer::Allocator::NewDefaultAllocator();
+ params.snapshot_blob = startup_data;
+ isolate_ = v8::Isolate::New(params);
+ isolate_->SetMicrotasksPolicy(v8::MicrotasksPolicy::kScoped);
+ if (channel)
+ inspector_.reset(new InspectorClientImpl(isolate_, task_runner, channel));
+}
+
+IsolateData* IsolateData::FromContext(v8::Local<v8::Context> context) {
+ return static_cast<IsolateData*>(
+ context->GetAlignedPointerFromEmbedderData(kIsolateDataIndex));
+}
+
+int IsolateData::CreateContextGroup() {
+ v8::HandleScope handle_scope(isolate_);
+ v8::Local<v8::ObjectTemplate> global_template =
+ v8::ObjectTemplate::New(isolate_);
+ for (auto it = setup_global_tasks_.begin(); it != setup_global_tasks_.end();
+ ++it) {
+ (*it)->Run(isolate_, global_template);
+ }
+ v8::Local<v8::Context> context =
+ v8::Context::New(isolate_, nullptr, global_template);
+ context->SetAlignedPointerInEmbedderData(kIsolateDataIndex, this);
+ int context_group_id = ++last_context_group_id_;
+ // Should be 2-byte aligned.
+ context->SetAlignedPointerInEmbedderData(
+ kContextGroupIdIndex, reinterpret_cast<void*>(context_group_id * 2));
+ contexts_[context_group_id].Reset(isolate_, context);
+ if (inspector_) inspector_->ContextCreated(context, context_group_id);
+ return context_group_id;
+}
+
+v8::Local<v8::Context> IsolateData::GetContext(int context_group_id) {
+ return contexts_[context_group_id].Get(isolate_);
+}
+
+int IsolateData::GetContextGroupId(v8::Local<v8::Context> context) {
+ return static_cast<int>(
+ reinterpret_cast<intptr_t>(
+ context->GetAlignedPointerFromEmbedderData(kContextGroupIdIndex)) /
+ 2);
+}
+
+void IsolateData::RegisterModule(v8::Local<v8::Context> context,
+ v8::internal::Vector<uint16_t> name,
+ v8::ScriptCompiler::Source* source) {
+ v8::Local<v8::Module> module;
+ if (!v8::ScriptCompiler::CompileModule(isolate(), source).ToLocal(&module))
+ return;
+ if (!module->Instantiate(context, &IsolateData::ModuleResolveCallback))
+ return;
+ v8::Local<v8::Value> result;
+ if (!module->Evaluate(context).ToLocal(&result)) return;
+ modules_[name] = v8::Global<v8::Module>(isolate_, module);
+}
+
+v8::MaybeLocal<v8::Module> IsolateData::ModuleResolveCallback(
+ v8::Local<v8::Context> context, v8::Local<v8::String> specifier,
+ v8::Local<v8::Module> referrer) {
+ std::string str = *v8::String::Utf8Value(specifier);
+ IsolateData* data = IsolateData::FromContext(context);
+ return data->modules_[ToVector(specifier)].Get(data->isolate_);
+}
diff --git a/deps/v8/test/inspector/isolate-data.h b/deps/v8/test/inspector/isolate-data.h
new file mode 100644
index 0000000000..34f0ae8308
--- /dev/null
+++ b/deps/v8/test/inspector/isolate-data.h
@@ -0,0 +1,67 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TEST_INSPECTOR_PROTOCOL_ISOLATE_DATA_H_
+#define V8_TEST_INSPECTOR_PROTOCOL_ISOLATE_DATA_H_
+
+#include <map>
+
+#include "include/v8-inspector.h"
+#include "include/v8-platform.h"
+#include "include/v8.h"
+#include "src/vector.h"
+#include "test/inspector/inspector-impl.h"
+
+class TaskRunner;
+
+class IsolateData {
+ public:
+ class SetupGlobalTask {
+ public:
+ virtual ~SetupGlobalTask() = default;
+ virtual void Run(v8::Isolate* isolate,
+ v8::Local<v8::ObjectTemplate> global) = 0;
+ };
+ using SetupGlobalTasks = std::vector<std::unique_ptr<SetupGlobalTask>>;
+
+ IsolateData(TaskRunner* task_runner, SetupGlobalTasks setup_global_tasks,
+ v8::StartupData* startup_data,
+ InspectorClientImpl::FrontendChannel* channel);
+ static IsolateData* FromContext(v8::Local<v8::Context> context);
+
+ v8::Isolate* isolate() const { return isolate_; }
+ InspectorClientImpl* inspector() const { return inspector_.get(); }
+ TaskRunner* task_runner() const { return task_runner_; }
+ int CreateContextGroup();
+ v8::Local<v8::Context> GetContext(int context_group_id);
+ int GetContextGroupId(v8::Local<v8::Context> context);
+ void RegisterModule(v8::Local<v8::Context> context,
+ v8::internal::Vector<uint16_t> name,
+ v8::ScriptCompiler::Source* source);
+
+ private:
+ struct VectorCompare {
+ bool operator()(const v8::internal::Vector<uint16_t>& lhs,
+ const v8::internal::Vector<uint16_t>& rhs) const {
+ for (int i = 0; i < lhs.length() && i < rhs.length(); ++i) {
+ if (lhs[i] != rhs[i]) return lhs[i] < rhs[i];
+ }
+ return false;
+ }
+ };
+ static v8::MaybeLocal<v8::Module> ModuleResolveCallback(
+ v8::Local<v8::Context> context, v8::Local<v8::String> specifier,
+ v8::Local<v8::Module> referrer);
+
+ TaskRunner* task_runner_;
+ SetupGlobalTasks setup_global_tasks_;
+ v8::Isolate* isolate_;
+ std::unique_ptr<InspectorClientImpl> inspector_;
+ int last_context_group_id_ = 0;
+ std::map<int, v8::Global<v8::Context>> contexts_;
+ std::map<v8::internal::Vector<uint16_t>, v8::Global<v8::Module>,
+ VectorCompare>
+ modules_;
+};
+#endif // V8_TEST_INSPECTOR_PROTOCOL_ISOLATE_DATA_H_
diff --git a/deps/v8/test/inspector/json-parse-expected.txt b/deps/v8/test/inspector/json-parse-expected.txt
index b11d6e2ee0..80d80ab12c 100644
--- a/deps/v8/test/inspector/json-parse-expected.txt
+++ b/deps/v8/test/inspector/json-parse-expected.txt
@@ -1,3 +1,4 @@
+Tests that json parser on backend correctly works with unicode
{
id : 1
result : {
diff --git a/deps/v8/test/inspector/json-parse.js b/deps/v8/test/inspector/json-parse.js
index 2d88fea0f0..92ec84bc5e 100644
--- a/deps/v8/test/inspector/json-parse.js
+++ b/deps/v8/test/inspector/json-parse.js
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-const id = ++InspectorTest._requestId;
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests that json parser on backend correctly works with unicode');
+
+const id = 100500;
var command = { "method": "Runtime.evaluate", "params": { expression: "\"!!!\"" }, "id": id };
-InspectorTest.sendRawCommand(id, JSON.stringify(command).replace("!!!", "\\u041F\\u0440\\u0438\\u0432\\u0435\\u0442 \\u043C\\u0438\\u0440"), step2);
+session.sendRawCommand(id, JSON.stringify(command).replace("!!!", "\\u041F\\u0440\\u0438\\u0432\\u0435\\u0442 \\u043C\\u0438\\u0440"), step2);
function step2(msg)
{
diff --git a/deps/v8/test/inspector/protocol-test.js b/deps/v8/test/inspector/protocol-test.js
index c91055b670..a99a9f536d 100644
--- a/deps/v8/test/inspector/protocol-test.js
+++ b/deps/v8/test/inspector/protocol-test.js
@@ -3,70 +3,36 @@
// found in the LICENSE file.
InspectorTest = {};
-InspectorTest._dispatchTable = new Map();
-InspectorTest._requestId = 0;
InspectorTest._dumpInspectorProtocolMessages = false;
-InspectorTest._eventHandler = {};
InspectorTest._commandsForLogging = new Set();
+InspectorTest._sessions = new Set();
-Protocol = new Proxy({}, {
- get: function(target, agentName, receiver) {
- return new Proxy({}, {
- get: function(target, methodName, receiver) {
- const eventPattern = /^on(ce)?([A-Z][A-Za-z0-9]+)/;
- var match = eventPattern.exec(methodName);
- if (!match) {
- return (args, contextGroupId) => InspectorTest._sendCommandPromise(`${agentName}.${methodName}`, args || {}, contextGroupId);
- } else {
- var eventName = match[2];
- eventName = eventName.charAt(0).toLowerCase() + eventName.slice(1);
- if (match[1])
- return () => InspectorTest._waitForEventPromise(
- `${agentName}.${eventName}`);
- else
- return (listener) => { InspectorTest._eventHandler[`${agentName}.${eventName}`] = listener };
- }
- }
- });
- }
-});
-
-InspectorTest.logProtocolCommandCalls = (command) => InspectorTest._commandsForLogging.add(command);
-
-var utils = {};
-(function setupUtils() {
- utils.load = load;
- this.load = null;
- utils.read = read;
- this.read = null;
- utils.compileAndRunWithOrigin = compileAndRunWithOrigin;
- this.compileAndRunWithOrigin = null;
- utils.quit = quit;
- this.quit = null;
- utils.print = print;
- this.print = null;
- utils.setlocale = setlocale;
- this.setlocale = null;
- utils.setCurrentTimeMSForTest = setCurrentTimeMSForTest;
- this.setCurrentTimeMSForTest = null;
- utils.setMemoryInfoForTest = setMemoryInfoForTest;
- this.setMemoryInfoForTest = null;
- utils.schedulePauseOnNextStatement = schedulePauseOnNextStatement;
- this.schedulePauseOnNextStatement = null;
- utils.cancelPauseOnNextStatement = cancelPauseOnNextStatement;
- this.cancelPauseOnNextStatement = null;
- utils.reconnect = reconnect;
- this.reconnect = null;
- utils.disconnect = disconnect;
- this.disconnect = null;
- utils.createContextGroup = createContextGroup;
- this.createContextGroup = null;
-})();
-
-InspectorTest.log = utils.print.bind(null);
-
-InspectorTest.logMessage = function(originalMessage)
-{
+InspectorTest.log = utils.print.bind(utils);
+InspectorTest.quitImmediately = utils.quit.bind(utils);
+
+InspectorTest.logProtocolCommandCalls = function(command) {
+ InspectorTest._commandsForLogging.add(command);
+}
+
+InspectorTest.completeTest = function() {
+ var promises = [];
+ for (var session of InspectorTest._sessions)
+ promises.push(session.Protocol.Debugger.disable());
+ Promise.all(promises).then(() => utils.quit());
+}
+
+InspectorTest.waitForPendingTasks = function() {
+ var promises = [];
+ for (var session of InspectorTest._sessions)
+ promises.push(session.Protocol.Runtime.evaluate({ expression: "new Promise(r => setTimeout(r, 0))//# sourceURL=wait-for-pending-tasks.js", awaitPromise: true }));
+ return Promise.all(promises);
+}
+
+InspectorTest.startDumpingProtocolMessages = function() {
+ InspectorTest._dumpInspectorProtocolMessages = true;
+}
+
+InspectorTest.logMessage = function(originalMessage) {
var message = JSON.parse(JSON.stringify(originalMessage));
if (message.id)
message.id = "<messageId>";
@@ -90,12 +56,10 @@ InspectorTest.logMessage = function(originalMessage)
return originalMessage;
}
-InspectorTest.logObject = function(object, title)
-{
+InspectorTest.logObject = function(object, title) {
var lines = [];
- function dumpValue(value, prefix, prefixWithName)
- {
+ function dumpValue(value, prefix, prefixWithName) {
if (typeof value === "object" && value !== null) {
if (value instanceof Array)
dumpItems(value, prefix, prefixWithName);
@@ -106,8 +70,7 @@ InspectorTest.logObject = function(object, title)
}
}
- function dumpProperties(object, prefix, firstLinePrefix)
- {
+ function dumpProperties(object, prefix, firstLinePrefix) {
prefix = prefix || "";
firstLinePrefix = firstLinePrefix || prefix;
lines.push(firstLinePrefix + "{");
@@ -124,8 +87,7 @@ InspectorTest.logObject = function(object, title)
lines.push(prefix + "}");
}
- function dumpItems(object, prefix, firstLinePrefix)
- {
+ function dumpItems(object, prefix, firstLinePrefix) {
prefix = prefix || "";
firstLinePrefix = firstLinePrefix || prefix;
lines.push(firstLinePrefix + "[");
@@ -138,118 +100,222 @@ InspectorTest.logObject = function(object, title)
InspectorTest.log(lines.join("\n"));
}
-InspectorTest.logCallFrames = function(callFrames)
-{
- for (var frame of callFrames) {
- var functionName = frame.functionName || '(anonymous)';
- var url = frame.url ? frame.url : InspectorTest._scriptMap.get(frame.location.scriptId).url;
- var lineNumber = frame.location ? frame.location.lineNumber : frame.lineNumber;
- var columnNumber = frame.location ? frame.location.columnNumber : frame.columnNumber;
- InspectorTest.log(`${functionName} (${url}:${lineNumber}:${columnNumber})`);
+InspectorTest.ContextGroup = class {
+ constructor() {
+ this.id = utils.createContextGroup();
}
-}
-InspectorTest.logSourceLocation = function(location)
-{
- var scriptId = location.scriptId;
- if (!InspectorTest._scriptMap || !InspectorTest._scriptMap.has(scriptId)) {
- InspectorTest.log("InspectorTest.setupScriptMap should be called before Protocol.Debugger.enable.");
- InspectorTest.completeTest();
+ schedulePauseOnNextStatement(reason, details) {
+ utils.schedulePauseOnNextStatement(this.id, reason, details);
}
- var script = InspectorTest._scriptMap.get(scriptId);
- if (!script.scriptSource) {
- // TODO(kozyatinskiy): doesn't assume that contextId == contextGroupId.
- return Protocol.Debugger.getScriptSource({ scriptId }, script.executionContextId)
- .then(message => script.scriptSource = message.result.scriptSource)
- .then(dumpSourceWithLocation);
+
+ cancelPauseOnNextStatement() {
+ utils.cancelPauseOnNextStatement(this.id);
}
- return Promise.resolve().then(dumpSourceWithLocation);
-
- function dumpSourceWithLocation() {
- var lines = script.scriptSource.split('\n');
- var line = lines[location.lineNumber];
- line = line.slice(0, location.columnNumber) + '#' + (line.slice(location.columnNumber) || '');
- lines[location.lineNumber] = line;
- lines = lines.filter(line => line.indexOf('//# sourceURL=') === -1);
- InspectorTest.log(lines.slice(Math.max(location.lineNumber - 1, 0), location.lineNumber + 2).join('\n'));
- InspectorTest.log('');
+
+ addScript(string, lineOffset, columnOffset, url) {
+ utils.compileAndRunWithOrigin(this.id, string, url || '', lineOffset || 0, columnOffset || 0, false);
}
-}
-InspectorTest.logSourceLocations = function(locations) {
- if (locations.length == 0) return Promise.resolve();
- return InspectorTest.logSourceLocation(locations[0])
- .then(() => InspectorTest.logSourceLocations(locations.splice(1)));
-}
+ addModule(string, url, lineOffset, columnOffset) {
+ utils.compileAndRunWithOrigin(this.id, string, url, lineOffset || 0, columnOffset || 0, true);
+ }
-InspectorTest.logAsyncStackTrace = function(asyncStackTrace)
-{
- while (asyncStackTrace) {
- if (asyncStackTrace.promiseCreationFrame) {
- var frame = asyncStackTrace.promiseCreationFrame;
- InspectorTest.log(`-- ${asyncStackTrace.description} (${frame.url
- }:${frame.lineNumber}:${frame.columnNumber})--`);
- } else {
- InspectorTest.log(`-- ${asyncStackTrace.description} --`);
+ loadScript(fileName) {
+ this.addScript(utils.read(fileName));
+ }
+
+ connect() {
+ return new InspectorTest.Session(this);
+ }
+
+ setupInjectedScriptEnvironment(debug) {
+ let scriptSource = '';
+ // First define all getters on Object.prototype.
+ let injectedScriptSource = utils.read('src/inspector/injected-script-source.js');
+ let getterRegex = /\.[a-zA-Z0-9]+/g;
+ let match;
+ let getters = new Set();
+ while (match = getterRegex.exec(injectedScriptSource)) {
+ getters.add(match[0].substr(1));
+ }
+ scriptSource += `(function installSettersAndGetters() {
+ let defineProperty = Object.defineProperty;
+ let ObjectPrototype = Object.prototype;\n`;
+ scriptSource += Array.from(getters).map(getter => `
+ defineProperty(ObjectPrototype, '${getter}', {
+ set() { debugger; throw 42; }, get() { debugger; throw 42; },
+ __proto__: null
+ });
+ `).join('\n') + '})();';
+ this.addScript(scriptSource);
+
+ if (debug) {
+ InspectorTest.log('WARNING: setupInjectedScriptEnvironment with debug flag for debugging only and should not be landed.');
+ InspectorTest.log('WARNING: run test with --expose-inspector-scripts flag to get more details.');
+ InspectorTest.log('WARNING: you can additionally comment rjsmin in xxd.py to get unminified injected-script-source.js.');
+ var session = InspectorTest._sessions.next().vale;
+ session.setupScriptMap();
+ sesison.Protocol.Debugger.enable();
+ session.Protocol.Debugger.onPaused(message => {
+ let callFrames = message.params.callFrames;
+ session.logSourceLocations(callFrames.map(frame => frame.location));
+ })
}
- InspectorTest.logCallFrames(asyncStackTrace.callFrames);
- asyncStackTrace = asyncStackTrace.parent;
}
-}
+};
+
+InspectorTest.Session = class {
+ constructor(contextGroup) {
+ this.contextGroup = contextGroup;
+ this._dispatchTable = new Map();
+ this._eventHandlers = new Map();
+ this._requestId = 0;
+ this.Protocol = this._setupProtocol();
+ InspectorTest._sessions.add(this);
+ this.id = utils.connectSession(contextGroup.id, '', this._dispatchMessage.bind(this));
+ }
-InspectorTest.completeTest = () => Protocol.Debugger.disable().then(() => utils.quit());
+ disconnect() {
+ InspectorTest._sessions.delete(this);
+ utils.disconnectSession(this.id);
+ }
-InspectorTest.completeTestAfterPendingTimeouts = function()
-{
- InspectorTest.waitPendingTasks().then(InspectorTest.completeTest);
-}
+ reconnect() {
+ var state = utils.disconnectSession(this.id);
+ this.id = utils.connectSession(this.contextGroup.id, state, this._dispatchMessage.bind(this));
+ }
-InspectorTest.waitPendingTasks = function()
-{
- return Protocol.Runtime.evaluate({ expression: "new Promise(r => setTimeout(r, 0))//# sourceURL=wait-pending-tasks.js", awaitPromise: true });
-}
+ sendRawCommand(requestId, command, handler) {
+ if (InspectorTest._dumpInspectorProtocolMessages)
+ utils.print("frontend: " + command);
+ this._dispatchTable.set(requestId, handler);
+ utils.sendMessageToBackend(this.id, command);
+ }
-InspectorTest.addScript = (string, lineOffset, columnOffset) => utils.compileAndRunWithOrigin(string, "", lineOffset || 0, columnOffset || 0, false);
-InspectorTest.addScriptWithUrl = (string, url) => utils.compileAndRunWithOrigin(string, url, 0, 0, false);
-InspectorTest.addModule = (string, url, lineOffset, columnOffset) => utils.compileAndRunWithOrigin(string, url, lineOffset || 0, columnOffset || 0, true);
+ setupScriptMap() {
+ if (this._scriptMap)
+ return;
+ this._scriptMap = new Map();
+ }
-InspectorTest.startDumpingProtocolMessages = function()
-{
- InspectorTest._dumpInspectorProtocolMessages = true;
-}
+ logCallFrames(callFrames) {
+ for (var frame of callFrames) {
+ var functionName = frame.functionName || '(anonymous)';
+ var url = frame.url ? frame.url : this._scriptMap.get(frame.location.scriptId).url;
+ var lineNumber = frame.location ? frame.location.lineNumber : frame.lineNumber;
+ var columnNumber = frame.location ? frame.location.columnNumber : frame.columnNumber;
+ InspectorTest.log(`${functionName} (${url}:${lineNumber}:${columnNumber})`);
+ }
+ }
-InspectorTest.sendRawCommand = function(requestId, command, handler, contextGroupId)
-{
- if (InspectorTest._dumpInspectorProtocolMessages)
- utils.print("frontend: " + command);
- InspectorTest._dispatchTable.set(requestId, handler);
- sendMessageToBackend(command, contextGroupId || 0);
-}
+ logSourceLocation(location) {
+ var scriptId = location.scriptId;
+ if (!this._scriptMap || !this._scriptMap.has(scriptId)) {
+ InspectorTest.log("setupScriptMap should be called before Protocol.Debugger.enable.");
+ InspectorTest.completeTest();
+ }
+ var script = this._scriptMap.get(scriptId);
+ if (!script.scriptSource) {
+ return this.Protocol.Debugger.getScriptSource({ scriptId })
+ .then(message => script.scriptSource = message.result.scriptSource)
+ .then(dumpSourceWithLocation);
+ }
+ return Promise.resolve().then(dumpSourceWithLocation);
+
+ function dumpSourceWithLocation() {
+ var lines = script.scriptSource.split('\n');
+ var line = lines[location.lineNumber];
+ line = line.slice(0, location.columnNumber) + '#' + (line.slice(location.columnNumber) || '');
+ lines[location.lineNumber] = line;
+ lines = lines.filter(line => line.indexOf('//# sourceURL=') === -1);
+ InspectorTest.log(lines.slice(Math.max(location.lineNumber - 1, 0), location.lineNumber + 2).join('\n'));
+ InspectorTest.log('');
+ }
+ }
-InspectorTest.checkExpectation = function(fail, name, messageObject)
-{
- if (fail === !!messageObject.error) {
- InspectorTest.log("PASS: " + name);
- return true;
+ logSourceLocations(locations) {
+ if (locations.length == 0) return Promise.resolve();
+ return this.logSourceLocation(locations[0]).then(() => this.logSourceLocations(locations.splice(1)));
}
- InspectorTest.log("FAIL: " + name + ": " + JSON.stringify(messageObject));
- InspectorTest.completeTest();
- return false;
-}
-InspectorTest.expectedSuccess = InspectorTest.checkExpectation.bind(null, false);
-InspectorTest.expectedError = InspectorTest.checkExpectation.bind(null, true);
+ logAsyncStackTrace(asyncStackTrace) {
+ while (asyncStackTrace) {
+ if (asyncStackTrace.promiseCreationFrame) {
+ var frame = asyncStackTrace.promiseCreationFrame;
+ InspectorTest.log(`-- ${asyncStackTrace.description} (${frame.url}:${frame.lineNumber}:${frame.columnNumber})--`);
+ } else {
+ InspectorTest.log(`-- ${asyncStackTrace.description} --`);
+ }
+ this.logCallFrames(asyncStackTrace.callFrames);
+ asyncStackTrace = asyncStackTrace.parent;
+ }
+ }
-InspectorTest.setupScriptMap = function() {
- if (InspectorTest._scriptMap)
- return;
- InspectorTest._scriptMap = new Map();
-}
+ _sendCommandPromise(method, params) {
+ if (InspectorTest._commandsForLogging.has(method))
+ utils.print(method + ' called');
+ var requestId = ++this._requestId;
+ var messageObject = { "id": requestId, "method": method, "params": params };
+ return new Promise(fulfill => this.sendRawCommand(requestId, JSON.stringify(messageObject), fulfill));
+ }
+
+ _setupProtocol() {
+ return new Proxy({}, { get: (target, agentName, receiver) => new Proxy({}, {
+ get: (target, methodName, receiver) => {
+ const eventPattern = /^on(ce)?([A-Z][A-Za-z0-9]+)/;
+ var match = eventPattern.exec(methodName);
+ if (!match)
+ return args => this._sendCommandPromise(`${agentName}.${methodName}`, args || {});
+ var eventName = match[2];
+ eventName = eventName.charAt(0).toLowerCase() + eventName.slice(1);
+ if (match[1])
+ return () => this._waitForEventPromise(`${agentName}.${eventName}`);
+ return listener => this._eventHandlers.set(`${agentName}.${eventName}`, listener);
+ }
+ })});
+ }
+
+ _dispatchMessage(messageString) {
+ var messageObject = JSON.parse(messageString);
+ if (InspectorTest._dumpInspectorProtocolMessages)
+ utils.print("backend: " + JSON.stringify(messageObject));
+ try {
+ var messageId = messageObject["id"];
+ if (typeof messageId === "number") {
+ var handler = this._dispatchTable.get(messageId);
+ if (handler) {
+ handler(messageObject);
+ this._dispatchTable.delete(messageId);
+ }
+ } else {
+ var eventName = messageObject["method"];
+ var eventHandler = this._eventHandlers.get(eventName);
+ if (this._scriptMap && eventName === "Debugger.scriptParsed")
+ this._scriptMap.set(messageObject.params.scriptId, JSON.parse(JSON.stringify(messageObject.params)));
+ if (eventName === "Debugger.scriptParsed" && messageObject.params.url === "wait-for-pending-tasks.js")
+ return;
+ if (eventHandler)
+ eventHandler(messageObject);
+ }
+ } catch (e) {
+ InspectorTest.log("Exception when dispatching message: " + e + "\n" + e.stack + "\n message = " + JSON.stringify(messageObject, null, 2));
+ InspectorTest.completeTest();
+ }
+ };
+
+ _waitForEventPromise(eventName) {
+ return new Promise(fulfill => {
+ this._eventHandlers.set(eventName, result => {
+ delete this._eventHandlers.delete(eventName);
+ fulfill(result);
+ });
+ });
+ }
+};
-InspectorTest.runTestSuite = function(testSuite)
-{
- function nextTest()
- {
+InspectorTest.runTestSuite = function(testSuite) {
+ function nextTest() {
if (!testSuite.length) {
InspectorTest.completeTest();
return;
@@ -264,97 +330,22 @@ InspectorTest.runTestSuite = function(testSuite)
InspectorTest.runAsyncTestSuite = async function(testSuite) {
for (var test of testSuite) {
InspectorTest.log("\nRunning test: " + test.name);
- await test();
+ try {
+ await test();
+ } catch (e) {
+ utils.print(e.stack);
+ }
}
InspectorTest.completeTest();
}
-InspectorTest._sendCommandPromise = function(method, params, contextGroupId)
-{
- var requestId = ++InspectorTest._requestId;
- var messageObject = { "id": requestId, "method": method, "params": params };
- var fulfillCallback;
- var promise = new Promise(fulfill => fulfillCallback = fulfill);
- if (InspectorTest._commandsForLogging.has(method)) {
- utils.print(method + ' called');
- }
- InspectorTest.sendRawCommand(requestId, JSON.stringify(messageObject), fulfillCallback, contextGroupId);
- return promise;
-}
-
-InspectorTest._waitForEventPromise = function(eventName)
-{
- return new Promise(fulfill => InspectorTest._eventHandler[eventName] = fullfillAndClearListener.bind(null, fulfill));
-
- function fullfillAndClearListener(fulfill, result)
- {
- delete InspectorTest._eventHandler[eventName];
- fulfill(result);
- }
-}
-
-InspectorTest._dispatchMessage = function(messageObject)
-{
- if (InspectorTest._dumpInspectorProtocolMessages)
- utils.print("backend: " + JSON.stringify(messageObject));
+InspectorTest.start = function(description) {
try {
- var messageId = messageObject["id"];
- if (typeof messageId === "number") {
- var handler = InspectorTest._dispatchTable.get(messageId);
- if (handler) {
- handler(messageObject);
- InspectorTest._dispatchTable.delete(messageId);
- }
- } else {
- var eventName = messageObject["method"];
- var eventHandler = InspectorTest._eventHandler[eventName];
- if (InspectorTest._scriptMap && eventName === "Debugger.scriptParsed")
- InspectorTest._scriptMap.set(messageObject.params.scriptId, JSON.parse(JSON.stringify(messageObject.params)));
- if (eventName === "Debugger.scriptParsed" && messageObject.params.url === "wait-pending-tasks.js")
- return;
- if (eventHandler)
- eventHandler(messageObject);
- }
+ InspectorTest.log(description);
+ var contextGroup = new InspectorTest.ContextGroup();
+ var session = contextGroup.connect();
+ return { session: session, contextGroup: contextGroup, Protocol: session.Protocol };
} catch (e) {
- InspectorTest.log("Exception when dispatching message: " + e + "\n" + e.stack + "\n message = " + JSON.stringify(messageObject, null, 2));
- InspectorTest.completeTest();
- }
-}
-
-InspectorTest.loadScript = function(fileName) {
- InspectorTest.addScript(utils.read(fileName));
-}
-
-InspectorTest.setupInjectedScriptEnvironment = function(debug) {
- let scriptSource = '';
- // First define all getters on Object.prototype.
- let injectedScriptSource = utils.read('src/inspector/injected-script-source.js');
- let getterRegex = /\.[a-zA-Z0-9]+/g;
- let match;
- let getters = new Set();
- while (match = getterRegex.exec(injectedScriptSource)) {
- getters.add(match[0].substr(1));
- }
- scriptSource += `(function installSettersAndGetters() {
- let defineProperty = Object.defineProperty;
- let ObjectPrototype = Object.prototype;\n`;
- scriptSource += Array.from(getters).map(getter => `
- defineProperty(ObjectPrototype, '${getter}', {
- set() { debugger; throw 42; }, get() { debugger; throw 42; },
- __proto__: null
- });
- `).join('\n') + '})();';
- InspectorTest.addScript(scriptSource);
-
- if (debug) {
- InspectorTest.log('WARNING: InspectorTest.setupInjectedScriptEnvironment with debug flag for debugging only and should not be landed.');
- InspectorTest.log('WARNING: run test with --expose-inspector-scripts flag to get more details.');
- InspectorTest.log('WARNING: you can additionally comment rjsmin in xxd.py to get unminified injected-script-source.js.');
- InspectorTest.setupScriptMap();
- Protocol.Debugger.enable();
- Protocol.Debugger.onPaused(message => {
- let callFrames = message.params.callFrames;
- InspectorTest.logSourceLocations(callFrames.map(frame => frame.location));
- })
+ utils.print(e.stack);
}
}
diff --git a/deps/v8/test/inspector/runtime/await-promise.js b/deps/v8/test/inspector/runtime/await-promise.js
index a09eb05a4c..042a1332bf 100644
--- a/deps/v8/test/inspector/runtime/await-promise.js
+++ b/deps/v8/test/inspector/runtime/await-promise.js
@@ -3,9 +3,9 @@
// found in the LICENSE file.
// Flags: --expose_gc
-InspectorTest.log("Tests that Runtime.awaitPromise works.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Tests that Runtime.awaitPromise works.");
-InspectorTest.addScript(
+contextGroup.addScript(
`
var resolveCallback;
var rejectCallback;
diff --git a/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt b/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt
index 2d558b85dd..f396b0540e 100644
--- a/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt
+++ b/deps/v8/test/inspector/runtime/call-function-on-async-expected.txt
@@ -125,10 +125,6 @@ Running test: testFunctionReturnRejectedPromise
}
exceptionId : <exceptionId>
lineNumber : 0
- stackTrace : {
- callFrames : [
- ]
- }
text : Uncaught (in promise)
}
result : {
@@ -138,4 +134,4 @@ Running test: testFunctionReturnRejectedPromise
}
}
}
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/inspector/runtime/call-function-on-async.js b/deps/v8/test/inspector/runtime/call-function-on-async.js
index e6cf5f9edf..cce28565c1 100644
--- a/deps/v8/test/inspector/runtime/call-function-on-async.js
+++ b/deps/v8/test/inspector/runtime/call-function-on-async.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Tests that Runtime.callFunctionOn works with awaitPromise flag.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Tests that Runtime.callFunctionOn works with awaitPromise flag.");
InspectorTest.runTestSuite([
function testArguments(next)
diff --git a/deps/v8/test/inspector/runtime/clear-of-command-line-api-expected.txt b/deps/v8/test/inspector/runtime/clear-of-command-line-api-expected.txt
index 142989b731..f63b91859a 100644
--- a/deps/v8/test/inspector/runtime/clear-of-command-line-api-expected.txt
+++ b/deps/v8/test/inspector/runtime/clear-of-command-line-api-expected.txt
@@ -174,4 +174,4 @@ redefineGetOwnPropertyDescriptors()
value : 42
}
}
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/inspector/runtime/clear-of-command-line-api.js b/deps/v8/test/inspector/runtime/clear-of-command-line-api.js
index e8e8513204..d67a1c5447 100644
--- a/deps/v8/test/inspector/runtime/clear-of-command-line-api.js
+++ b/deps/v8/test/inspector/runtime/clear-of-command-line-api.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Tests that CommandLineAPI is presented only while evaluation.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Tests that CommandLineAPI is presented only while evaluation.");
-InspectorTest.addScript(
+contextGroup.addScript(
`
var methods = ["dir","dirxml","profile","profileEnd","clear","table","keys","values","debug","undebug","monitor","unmonitor","inspect","copy"];
var window = this;
diff --git a/deps/v8/test/inspector/runtime/client-console-api-message-expected.txt b/deps/v8/test/inspector/runtime/client-console-api-message-expected.txt
new file mode 100644
index 0000000000..699b390a8d
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/client-console-api-message-expected.txt
@@ -0,0 +1,8 @@
+Checks that we passed correct arguments in V8InspectorClient::consoleAPIMessage. Note: lines and columns are 1-based.
+42 (:1:9)
+ at (anonymous function) (:1:9)
+239 (:13:15)
+ at b (:13:15)
+ at a (:15:5)
+ at consoleTrace (:17:3)
+ at (anonymous function) (:1:1)
diff --git a/deps/v8/test/inspector/runtime/client-console-api-message.js b/deps/v8/test/inspector/runtime/client-console-api-message.js
new file mode 100644
index 0000000000..15838fa489
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/client-console-api-message.js
@@ -0,0 +1,28 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks that we passed correct arguments in ' +
+ 'V8InspectorClient::consoleAPIMessage. Note: lines and columns are 1-based.');
+
+contextGroup.addScript(`
+function consoleTrace() {
+ function a() {
+ function b() {
+ console.trace(239);
+ }
+ b();
+ }
+ a();
+}
+`, 8, 26);
+
+Protocol.Runtime.enable();
+utils.setLogConsoleApiMessageCalls(true);
+(async function test() {
+ Protocol.Runtime.evaluate({expression: 'console.log(42)'});
+ await Protocol.Runtime.onceConsoleAPICalled()
+ Protocol.Runtime.evaluate({expression: 'consoleTrace()'});
+ await Protocol.Runtime.onceConsoleAPICalled()
+ InspectorTest.completeTest();
+})();
diff --git a/deps/v8/test/inspector/runtime/command-line-api.js b/deps/v8/test/inspector/runtime/command-line-api.js
index 3a6080898b..16abde45e9 100644
--- a/deps/v8/test/inspector/runtime/command-line-api.js
+++ b/deps/v8/test/inspector/runtime/command-line-api.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks command line API.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks command line API.');
InspectorTest.runAsyncTestSuite([
async function testKeys() {
@@ -46,7 +46,7 @@ InspectorTest.runAsyncTestSuite([
},
async function testDebug() {
- InspectorTest.setupScriptMap();
+ session.setupScriptMap();
await Protocol.Debugger.enable();
InspectorTest.logMessage(await Protocol.Runtime.evaluate({expression: 'debug', includeCommandLineAPI: true}));
InspectorTest.logMessage(await Protocol.Runtime.evaluate({expression: 'undebug', includeCommandLineAPI: true}));
@@ -54,7 +54,7 @@ InspectorTest.runAsyncTestSuite([
await Protocol.Runtime.evaluate({expression: 'debug(foo)', includeCommandLineAPI: true});
Protocol.Runtime.evaluate({ expression: 'foo()'});
let message = await Protocol.Debugger.oncePaused();
- InspectorTest.logCallFrames(message.params.callFrames);
+ session.logCallFrames(message.params.callFrames);
InspectorTest.logMessage(message.params.hitBreakpoints);
await Protocol.Debugger.resume();
await Protocol.Runtime.evaluate({expression: 'undebug(foo)', includeCommandLineAPI: true});
@@ -65,7 +65,7 @@ InspectorTest.runAsyncTestSuite([
await Protocol.Runtime.evaluate({expression: 'this.debug(foo)'});
Protocol.Runtime.evaluate({ expression: 'foo()'});
message = await Protocol.Debugger.oncePaused();
- InspectorTest.logCallFrames(message.params.callFrames);
+ session.logCallFrames(message.params.callFrames);
InspectorTest.logMessage(message.params.hitBreakpoints);
await Protocol.Debugger.resume();
await Protocol.Runtime.evaluate({expression: 'this.undebug(foo)'});
diff --git a/deps/v8/test/inspector/runtime/compile-script-expected.txt b/deps/v8/test/inspector/runtime/compile-script-expected.txt
index 3d6d580487..23e6a64dc5 100644
--- a/deps/v8/test/inspector/runtime/compile-script-expected.txt
+++ b/deps/v8/test/inspector/runtime/compile-script-expected.txt
@@ -1,3 +1,4 @@
+Tests Runtime.compileScript
Compiling script: foo1.js
persist: false
compilation result:
@@ -63,4 +64,4 @@ compilation result:
}
}
}
------ \ No newline at end of file
+-----
diff --git a/deps/v8/test/inspector/runtime/compile-script.js b/deps/v8/test/inspector/runtime/compile-script.js
index 4f1c6468e1..6452e07a26 100644
--- a/deps/v8/test/inspector/runtime/compile-script.js
+++ b/deps/v8/test/inspector/runtime/compile-script.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests Runtime.compileScript');
+
var executionContextId;
Protocol.Debugger.enable().then(onDebuggerEnabled);
diff --git a/deps/v8/test/inspector/runtime/console-api-repeated-in-console-expected.txt b/deps/v8/test/inspector/runtime/console-api-repeated-in-console-expected.txt
index 04d2d90265..3ec657bfff 100644
--- a/deps/v8/test/inspector/runtime/console-api-repeated-in-console-expected.txt
+++ b/deps/v8/test/inspector/runtime/console-api-repeated-in-console-expected.txt
@@ -3,4 +3,3 @@ api call: 42
api call: abc
console message: 42
console message: abc
-
diff --git a/deps/v8/test/inspector/runtime/console-api-repeated-in-console.js b/deps/v8/test/inspector/runtime/console-api-repeated-in-console.js
index f7de071654..aafdbdefd9 100644
--- a/deps/v8/test/inspector/runtime/console-api-repeated-in-console.js
+++ b/deps/v8/test/inspector/runtime/console-api-repeated-in-console.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Check that console.log is reported through Console domain as well.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Check that console.log is reported through Console domain as well.");
var expectedMessages = 4;
var messages = [];
diff --git a/deps/v8/test/inspector/runtime/console-assert.js b/deps/v8/test/inspector/runtime/console-assert.js
index 64be5e23d3..355149b2e8 100644
--- a/deps/v8/test/inspector/runtime/console-assert.js
+++ b/deps/v8/test/inspector/runtime/console-assert.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Checks that console.assert works and points to correct call frame.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Checks that console.assert works and points to correct call frame.");
-InspectorTest.addScript(`
+contextGroup.addScript(`
function testFunction() {
Function.prototype.apply = () => console.error('Should never call this');
console.assert(true);
diff --git a/deps/v8/test/inspector/runtime/console-deprecated-methods-expected.txt b/deps/v8/test/inspector/runtime/console-deprecated-methods-expected.txt
index 1b8e4aa2ce..4c1e26518c 100644
--- a/deps/v8/test/inspector/runtime/console-deprecated-methods-expected.txt
+++ b/deps/v8/test/inspector/runtime/console-deprecated-methods-expected.txt
@@ -2,4 +2,3 @@ Tests checks that deprecation messages for console.
'console.timeline' is deprecated. Please use 'console.time' instead.
'console.timelineEnd' is deprecated. Please use 'console.timeEnd' instead.
'console.markTimeline' is deprecated. Please use 'console.timeStamp' instead.
-
diff --git a/deps/v8/test/inspector/runtime/console-deprecated-methods.js b/deps/v8/test/inspector/runtime/console-deprecated-methods.js
index ac13672977..e8ccd2a2fd 100644
--- a/deps/v8/test/inspector/runtime/console-deprecated-methods.js
+++ b/deps/v8/test/inspector/runtime/console-deprecated-methods.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Tests checks that deprecation messages for console.")
+let {session, contextGroup, Protocol} = InspectorTest.start("Tests checks that deprecation messages for console.")
Protocol.Runtime.onConsoleAPICalled(messageAdded);
Protocol.Runtime.enable();
diff --git a/deps/v8/test/inspector/runtime/console-line-and-column-expected.txt b/deps/v8/test/inspector/runtime/console-line-and-column-expected.txt
index 4eab60af0d..4e3ce1e441 100644
--- a/deps/v8/test/inspector/runtime/console-line-and-column-expected.txt
+++ b/deps/v8/test/inspector/runtime/console-line-and-column-expected.txt
@@ -1,3 +1,4 @@
+Tests line and column numbers in console messages
{
method : Runtime.consoleAPICalled
params : {
@@ -49,4 +50,4 @@
timestamp : <timestamp>
type : log
}
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/inspector/runtime/console-line-and-column.js b/deps/v8/test/inspector/runtime/console-line-and-column.js
index fe5c24f27c..e7011ebb19 100644
--- a/deps/v8/test/inspector/runtime/console-line-and-column.js
+++ b/deps/v8/test/inspector/runtime/console-line-and-column.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests line and column numbers in console messages');
+
Protocol.Runtime.enable();
addConsoleMessagePromise("console.log(239)")
diff --git a/deps/v8/test/inspector/runtime/console-log-doesnt-run-microtasks-expected.txt b/deps/v8/test/inspector/runtime/console-log-doesnt-run-microtasks-expected.txt
index 5a234ec78c..d1268b9695 100644
--- a/deps/v8/test/inspector/runtime/console-log-doesnt-run-microtasks-expected.txt
+++ b/deps/v8/test/inspector/runtime/console-log-doesnt-run-microtasks-expected.txt
@@ -18,4 +18,3 @@ Check that console.log doesn't run microtasks.
type : string
value : finished
}
-
diff --git a/deps/v8/test/inspector/runtime/console-log-doesnt-run-microtasks.js b/deps/v8/test/inspector/runtime/console-log-doesnt-run-microtasks.js
index 8320868469..c24e92876c 100644
--- a/deps/v8/test/inspector/runtime/console-log-doesnt-run-microtasks.js
+++ b/deps/v8/test/inspector/runtime/console-log-doesnt-run-microtasks.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Check that console.log doesn't run microtasks.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Check that console.log doesn't run microtasks.");
-InspectorTest.addScript(
+contextGroup.addScript(
`
function testFunction()
{
diff --git a/deps/v8/test/inspector/runtime/console-memory.js b/deps/v8/test/inspector/runtime/console-memory.js
index e756dec794..45f86c035c 100644
--- a/deps/v8/test/inspector/runtime/console-memory.js
+++ b/deps/v8/test/inspector/runtime/console-memory.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks console.memory');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks console.memory');
InspectorTest.runAsyncTestSuite([
async function testWithoutMemory() {
diff --git a/deps/v8/test/inspector/runtime/console-messages-limits.js b/deps/v8/test/inspector/runtime/console-messages-limits.js
index a6a8ccefae..4a8159072e 100644
--- a/deps/v8/test/inspector/runtime/console-messages-limits.js
+++ b/deps/v8/test/inspector/runtime/console-messages-limits.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks that console message storage doesn\'t exceed limits');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks that console message storage doesn\'t exceed limits');
-InspectorTest.addScript(`
+contextGroup.addScript(`
function generateEmptyMessages(n) {
for (var i = 0; i < n; ++i) {
console.log('');
diff --git a/deps/v8/test/inspector/runtime/console-methods.js b/deps/v8/test/inspector/runtime/console-methods.js
index bd24776fbe..38ab5bd83f 100644
--- a/deps/v8/test/inspector/runtime/console-methods.js
+++ b/deps/v8/test/inspector/runtime/console-methods.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks console methods');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks console methods');
-InspectorTest.addScript(`
+contextGroup.addScript(`
function testFunction() {
console.debug('debug');
console.error('error');
diff --git a/deps/v8/test/inspector/runtime/console-spec-expected.txt b/deps/v8/test/inspector/runtime/console-spec-expected.txt
new file mode 100644
index 0000000000..48cbc70fe2
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-spec-expected.txt
@@ -0,0 +1,30 @@
+Tests console object and it's prototype
+
+Running test: consoleExistsOnGlobal
+true
+
+Running test: consoleHasRightPropertyDescriptor
+{
+ configurable : true
+ enumerable : false
+ value : <value>
+ writable : true
+}
+
+Running test: ConsoleNotExistsOnGlobal
+false
+
+Running test: prototypeChainMustBeCorrect
+true
+
+Running test: consoleToString
+[object Object]
+
+Running test: consoleMethodPropertyDescriptor
+{
+ configurable : true
+ enumerable : true
+ value : {
+ }
+ writable : true
+}
diff --git a/deps/v8/test/inspector/runtime/console-spec.js b/deps/v8/test/inspector/runtime/console-spec.js
new file mode 100644
index 0000000000..f37898fbab
--- /dev/null
+++ b/deps/v8/test/inspector/runtime/console-spec.js
@@ -0,0 +1,60 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests console object and it\'s prototype');
+
+contextGroup.addScript(`
+var self = this;
+function checkPrototype() {
+ const prototype1 = Object.getPrototypeOf(console);
+ const prototype2 = Object.getPrototypeOf(prototype1);
+ if (Object.getOwnPropertyNames(prototype1).length !== 0)
+ return "false: The [[Prototype]] must have no properties";
+ if (prototype2 !== Object.prototype)
+ return "false: The [[Prototype]]'s [[Prototype]] must be %ObjectPrototype%";
+ return "true";
+}
+`);
+
+InspectorTest.runAsyncTestSuite([
+ async function consoleExistsOnGlobal() {
+ let message = await Protocol.Runtime.evaluate({
+ expression: 'self.hasOwnProperty(\'console\')', returnByValue: true});
+ InspectorTest.log(message.result.result.value);
+ },
+
+ async function consoleHasRightPropertyDescriptor() {
+ let message = await Protocol.Runtime.evaluate({
+ expression: 'Object.getOwnPropertyDescriptor(self, \'console\')',
+ returnByValue: true});
+ let result = message.result.result.value;
+ result.value = '<value>';
+ InspectorTest.logObject(result);
+ },
+
+ async function ConsoleNotExistsOnGlobal() {
+ let message = await Protocol.Runtime.evaluate({
+ expression: '\'Console\' in self', returnByValue: true})
+ InspectorTest.log(message.result.result.value);
+ },
+
+ async function prototypeChainMustBeCorrect() {
+ let message = await Protocol.Runtime.evaluate({
+ expression: "checkPrototype()", returnByValue: true });
+ InspectorTest.log(message.result.result.value);
+ },
+
+ async function consoleToString() {
+ let message = await Protocol.Runtime.evaluate({
+ expression: 'console.toString()', returnByValue: true})
+ InspectorTest.log(message.result.result.value);
+ },
+
+ async function consoleMethodPropertyDescriptor() {
+ let message = await Protocol.Runtime.evaluate({
+ expression: 'Object.getOwnPropertyDescriptor(console, \'log\')',
+ returnByValue: true});
+ InspectorTest.logObject(message.result.result.value);
+ }
+]);
diff --git a/deps/v8/test/inspector/runtime/console-time-end-format.js b/deps/v8/test/inspector/runtime/console-time-end-format.js
index c87f672e2c..7e4ff788b4 100644
--- a/deps/v8/test/inspector/runtime/console-time-end-format.js
+++ b/deps/v8/test/inspector/runtime/console-time-end-format.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks format of console.timeEnd output');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks format of console.timeEnd output');
Protocol.Runtime.enable();
Protocol.Runtime.onConsoleAPICalled(message => {
diff --git a/deps/v8/test/inspector/runtime/console-timestamp-expected.txt b/deps/v8/test/inspector/runtime/console-timestamp-expected.txt
index 5e4d7b5ada..5710530d1f 100644
--- a/deps/v8/test/inspector/runtime/console-timestamp-expected.txt
+++ b/deps/v8/test/inspector/runtime/console-timestamp-expected.txt
@@ -1,3 +1,4 @@
+Tests timestamps in console
Message has timestamp: true
Message timestamp doesn't differ too much from current time (one minute interval): true
Message 1 has non-decreasing timestamp: true
@@ -6,4 +7,3 @@ Message timestamp doesn't differ too much from current time (one minute interval
Message 2 has non-decreasing timestamp: true
Message has timestamp: true
Message timestamp doesn't differ too much from current time (one minute interval): true
-
diff --git a/deps/v8/test/inspector/runtime/console-timestamp.js b/deps/v8/test/inspector/runtime/console-timestamp.js
index 0dceaed23f..46ee76f925 100644
--- a/deps/v8/test/inspector/runtime/console-timestamp.js
+++ b/deps/v8/test/inspector/runtime/console-timestamp.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests timestamps in console');
+
var messages = [];
function messageAdded(data)
diff --git a/deps/v8/test/inspector/runtime/create-context.js b/deps/v8/test/inspector/runtime/create-context.js
index 4f86e6ec34..105bb59255 100644
--- a/deps/v8/test/inspector/runtime/create-context.js
+++ b/deps/v8/test/inspector/runtime/create-context.js
@@ -4,39 +4,48 @@
InspectorTest.log('Checks createContext().');
-InspectorTest.setupScriptMap();
-Protocol.Runtime.onExecutionContextCreated(InspectorTest.logMessage);
-Protocol.Debugger.onPaused((message) => {
- InspectorTest.logSourceLocation(message.params.callFrames[0].location);
- Protocol.Debugger.stepOut();
-});
var executionContextIds = new Set();
-Protocol.Debugger.onScriptParsed(message => executionContextIds.add(message.params.executionContextId));
-var contextGroupId;
-Protocol.Runtime.enable()
- .then(() => contextGroupId = utils.createContextGroup())
- .then(() => Protocol.Runtime.enable({}, contextGroupId))
- .then(() => Protocol.Debugger.enable())
- .then(() => Protocol.Debugger.enable({}, contextGroupId))
+var contextGroup1 = new InspectorTest.ContextGroup();
+var session1 = contextGroup1.connect();
+setup(session1);
+var contextGroup2 = new InspectorTest.ContextGroup();
+var session2 = contextGroup2.connect();
+setup(session2);
+
+session1.Protocol.Runtime.enable()
+ .then(() => session2.Protocol.Runtime.enable({}))
+ .then(() => session1.Protocol.Debugger.enable())
+ .then(() => session2.Protocol.Debugger.enable({}))
.then(InspectorTest.logMessage)
.then(() => {
- Protocol.Runtime.evaluate({ expression: 'debugger;' })
- Protocol.Runtime.evaluate({ expression: 'setTimeout(x => x * 2, 0)' }, contextGroupId);
- Protocol.Runtime.evaluate({ expression: 'setTimeout(x => x * 3, 0)' });
+ session1.Protocol.Runtime.evaluate({ expression: 'debugger;' });
+ session2.Protocol.Runtime.evaluate({expression: 'setTimeout(x => x * 2, 0)'});
+ session1.Protocol.Runtime.evaluate({ expression: 'setTimeout(x => x * 3, 0)' });
})
- .then(() => InspectorTest.waitPendingTasks())
+ .then(() => InspectorTest.waitForPendingTasks())
.then(() => {
InspectorTest.log(`Reported script's execution id: ${executionContextIds.size}`);
executionContextIds.clear();
})
- .then(() => utils.reconnect())
+ .then(() => session1.reconnect())
+ .then(() => session2.reconnect())
.then(() => {
- Protocol.Runtime.evaluate({ expression: 'debugger;' })
- Protocol.Runtime.evaluate({ expression: 'setTimeout(x => x * 2, 0)' }, contextGroupId);
- Protocol.Runtime.evaluate({ expression: 'setTimeout(x => x * 3, 0)' });
+ session1.Protocol.Runtime.evaluate({ expression: 'debugger;' })
+ session2.Protocol.Runtime.evaluate({ expression: 'setTimeout(x => x * 2, 0)' });
+ session1.Protocol.Runtime.evaluate({ expression: 'setTimeout(x => x * 3, 0)' });
})
- .then(() => InspectorTest.waitPendingTasks())
- .then(() => Protocol.Debugger.disable({}, contextGroupId))
- .then(() => Protocol.Debugger.disable({}))
+ .then(() => InspectorTest.waitForPendingTasks())
+ .then(() => session2.Protocol.Debugger.disable({}))
+ .then(() => session1.Protocol.Debugger.disable({}))
.then(() => InspectorTest.log(`Reported script's execution id: ${executionContextIds.size}`))
.then(InspectorTest.completeTest);
+
+function setup(session) {
+ session.Protocol.Runtime.onExecutionContextCreated(InspectorTest.logMessage);
+ session.setupScriptMap();
+ session.Protocol.Debugger.onPaused((message) => {
+ session.logSourceLocation(message.params.callFrames[0].location);
+ session.Protocol.Debugger.stepOut();
+ });
+ session.Protocol.Debugger.onScriptParsed(message => executionContextIds.add(message.params.executionContextId));
+}
diff --git a/deps/v8/test/inspector/runtime/es6-module-expected.txt b/deps/v8/test/inspector/runtime/es6-module-expected.txt
index 2c812b12bc..cbe63fe718 100644
--- a/deps/v8/test/inspector/runtime/es6-module-expected.txt
+++ b/deps/v8/test/inspector/runtime/es6-module-expected.txt
@@ -232,10 +232,6 @@ console.log(239)
executionContextId : <executionContextId>
lineNumber : 0
scriptId : <scriptId>
- stackTrace : {
- callFrames : [
- ]
- }
text : Uncaught SyntaxError: Unexpected token }
url : module4
}
diff --git a/deps/v8/test/inspector/runtime/es6-module.js b/deps/v8/test/inspector/runtime/es6-module.js
index 76ee91b83f..6d9e43486b 100644
--- a/deps/v8/test/inspector/runtime/es6-module.js
+++ b/deps/v8/test/inspector/runtime/es6-module.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks basic ES6 modules support.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks basic ES6 modules support.');
var module1 = `
export function foo() {
@@ -30,7 +30,7 @@ debugger;
var module4 = '}';
-InspectorTest.setupScriptMap();
+session.setupScriptMap();
// We get scriptParsed events for modules ..
Protocol.Debugger.onScriptParsed(InspectorTest.logMessage);
// .. scriptFailed to parse for modules with syntax error ..
@@ -38,7 +38,7 @@ Protocol.Debugger.onScriptFailedToParse(InspectorTest.logMessage);
// .. API messages from modules contain correct stack trace ..
Protocol.Runtime.onConsoleAPICalled(message => {
InspectorTest.log(`console.log(${message.params.args[0].value})`);
- InspectorTest.logCallFrames(message.params.stackTrace.callFrames);
+ session.logCallFrames(message.params.stackTrace.callFrames);
InspectorTest.log('');
});
// .. we could break inside module and scope contains correct list of variables ..
@@ -53,9 +53,9 @@ Protocol.Runtime.onExceptionThrown(InspectorTest.logMessage);
Protocol.Runtime.enable();
Protocol.Debugger.enable()
- .then(() => InspectorTest.addModule(module1, "module1"))
- .then(() => InspectorTest.addModule(module2, "module2"))
- .then(() => InspectorTest.addModule(module3, "module3"))
- .then(() => InspectorTest.addModule(module4, "module4"))
- .then(() => InspectorTest.waitPendingTasks())
+ .then(() => contextGroup.addModule(module1, "module1"))
+ .then(() => contextGroup.addModule(module2, "module2"))
+ .then(() => contextGroup.addModule(module3, "module3"))
+ .then(() => contextGroup.addModule(module4, "module4"))
+ .then(() => InspectorTest.waitForPendingTasks())
.then(InspectorTest.completeTest);
diff --git a/deps/v8/test/inspector/runtime/evaluate-async-expected.txt b/deps/v8/test/inspector/runtime/evaluate-async-expected.txt
index c03dd7a409..c521648f7b 100644
--- a/deps/v8/test/inspector/runtime/evaluate-async-expected.txt
+++ b/deps/v8/test/inspector/runtime/evaluate-async-expected.txt
@@ -25,10 +25,6 @@ Running test: testRejectedPromise
}
exceptionId : <exceptionId>
lineNumber : 0
- stackTrace : {
- callFrames : [
- ]
- }
text : Uncaught (in promise)
}
result : {
diff --git a/deps/v8/test/inspector/runtime/evaluate-async-with-wrap-error.js b/deps/v8/test/inspector/runtime/evaluate-async-with-wrap-error.js
index 676b843a97..43b71ada32 100644
--- a/deps/v8/test/inspector/runtime/evaluate-async-with-wrap-error.js
+++ b/deps/v8/test/inspector/runtime/evaluate-async-with-wrap-error.js
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Test that Runtime.evaluate correctly process errors during wrapping \
-async result.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Test that " +
+ "Runtime.evaluate correctly process errors during wrapping async result.");
var evaluateArguments = {
expression: "Promise.resolve(Symbol(123))",
diff --git a/deps/v8/test/inspector/runtime/evaluate-async.js b/deps/v8/test/inspector/runtime/evaluate-async.js
index c7ccc17f62..db02d9c150 100644
--- a/deps/v8/test/inspector/runtime/evaluate-async.js
+++ b/deps/v8/test/inspector/runtime/evaluate-async.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Tests that Runtime.evaluate works with awaitPromise flag.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Tests that Runtime.evaluate works with awaitPromise flag.");
-InspectorTest.addScript(`
+contextGroup.addScript(`
function createPromiseAndScheduleResolve()
{
var resolveCallback;
diff --git a/deps/v8/test/inspector/runtime/evaluate-empty-stack.js b/deps/v8/test/inspector/runtime/evaluate-empty-stack.js
index 0147401112..c09dfd18b8 100644
--- a/deps/v8/test/inspector/runtime/evaluate-empty-stack.js
+++ b/deps/v8/test/inspector/runtime/evaluate-empty-stack.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Tests that Runtime.evaluate works with an empty stack");
+let {session, contextGroup, Protocol} = InspectorTest.start("Tests that Runtime.evaluate works with an empty stack");
-InspectorTest.addScript("var text = [48116210, 34460128, 1406661984071834]");
+contextGroup.addScript("var text = [48116210, 34460128, 1406661984071834]");
var message = { expression: "text.map(x => x.toString(36)).join(' ')" };
diff --git a/deps/v8/test/inspector/runtime/evaluate-with-context-id-equal-zero-expected.txt b/deps/v8/test/inspector/runtime/evaluate-with-context-id-equal-zero-expected.txt
index 9521a06c06..5e26aa9b46 100644
--- a/deps/v8/test/inspector/runtime/evaluate-with-context-id-equal-zero-expected.txt
+++ b/deps/v8/test/inspector/runtime/evaluate-with-context-id-equal-zero-expected.txt
@@ -6,4 +6,3 @@ Tests that DevTools doesn't crash on Runtime.evaluate with contextId equals 0.
}
id : <messageId>
}
-
diff --git a/deps/v8/test/inspector/runtime/evaluate-with-context-id-equal-zero.js b/deps/v8/test/inspector/runtime/evaluate-with-context-id-equal-zero.js
index c55284ae55..272c820115 100644
--- a/deps/v8/test/inspector/runtime/evaluate-with-context-id-equal-zero.js
+++ b/deps/v8/test/inspector/runtime/evaluate-with-context-id-equal-zero.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Tests that DevTools doesn't crash on Runtime.evaluate with contextId equals 0.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Tests that DevTools doesn't crash on Runtime.evaluate with contextId equals 0.");
Protocol.Runtime.evaluate({ "contextId": 0, "expression": "" })
.then(message => InspectorTest.logMessage(message))
diff --git a/deps/v8/test/inspector/runtime/evaluate-with-generate-preview.js b/deps/v8/test/inspector/runtime/evaluate-with-generate-preview.js
index 1387413344..8ea0ea4faf 100644
--- a/deps/v8/test/inspector/runtime/evaluate-with-generate-preview.js
+++ b/deps/v8/test/inspector/runtime/evaluate-with-generate-preview.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Tests that Runtime.evaluate will generate correct previews.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Tests that Runtime.evaluate will generate correct previews.");
-InspectorTest.addScript(
+contextGroup.addScript(
`
var f1 = function(){};
@@ -62,10 +62,10 @@ Object.defineProperty(parentObj, 'propNotNamedProto', {
set: function() {}
});
var objInheritsGetterProperty = {__proto__: parentObj};
-allowAccessorFormatting(objInheritsGetterProperty);
+inspector.allowAccessorFormatting(objInheritsGetterProperty);
`);
-InspectorTest.setupInjectedScriptEnvironment();
+contextGroup.setupInjectedScriptEnvironment();
InspectorTest.runTestSuite([
function testObjectPropertiesPreview(next)
diff --git a/deps/v8/test/inspector/runtime/exception-thrown-expected.txt b/deps/v8/test/inspector/runtime/exception-thrown-expected.txt
index 228c348298..fb4cf70ae0 100644
--- a/deps/v8/test/inspector/runtime/exception-thrown-expected.txt
+++ b/deps/v8/test/inspector/runtime/exception-thrown-expected.txt
@@ -77,10 +77,6 @@ Check that exceptionThrown is supported by test runner.
executionContextId : <executionContextId>
lineNumber : 0
scriptId : <scriptId>
- stackTrace : {
- callFrames : [
- ]
- }
text : Uncaught SyntaxError: Unexpected token }
}
timestamp : <timestamp>
diff --git a/deps/v8/test/inspector/runtime/exception-thrown.js b/deps/v8/test/inspector/runtime/exception-thrown.js
index da437fc158..91f48bc0a8 100644
--- a/deps/v8/test/inspector/runtime/exception-thrown.js
+++ b/deps/v8/test/inspector/runtime/exception-thrown.js
@@ -2,11 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Check that exceptionThrown is supported by test runner.")
+let {session, contextGroup, Protocol} = InspectorTest.start("Check that exceptionThrown is supported by test runner.")
Protocol.Runtime.enable();
Protocol.Runtime.onExceptionThrown(message => InspectorTest.logMessage(message));
Protocol.Runtime.evaluate({ expression: "setTimeout(() => { \n throw new Error() }, 0)" });
Protocol.Runtime.evaluate({ expression: "setTimeout(\" }\", 0)" });
Protocol.Runtime.evaluate({ expression: "setTimeout(() => { \n throw 239; }, 0)" });
-InspectorTest.completeTestAfterPendingTimeouts();
+InspectorTest.waitForPendingTasks().then(InspectorTest.completeTest);
diff --git a/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt b/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt
index b36c811771..339595608a 100644
--- a/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt
+++ b/deps/v8/test/inspector/runtime/get-properties-on-proxy-expected.txt
@@ -8,4 +8,4 @@ Check that while Runtime.getProperties call on proxy object no user defined trap
value : 0
}
}
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/inspector/runtime/get-properties-on-proxy.js b/deps/v8/test/inspector/runtime/get-properties-on-proxy.js
index 71bbdc1878..b2981df290 100644
--- a/deps/v8/test/inspector/runtime/get-properties-on-proxy.js
+++ b/deps/v8/test/inspector/runtime/get-properties-on-proxy.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Check that while Runtime.getProperties call on proxy object no user defined trap will be executed.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Check that while Runtime.getProperties call on proxy object no user defined trap will be executed.");
-InspectorTest.addScript(`
+contextGroup.addScript(`
var self = this;
function testFunction()
{
diff --git a/deps/v8/test/inspector/runtime/get-properties-preview-expected.txt b/deps/v8/test/inspector/runtime/get-properties-preview-expected.txt
index fd1f31a4c2..a3f4eb40f9 100644
--- a/deps/v8/test/inspector/runtime/get-properties-preview-expected.txt
+++ b/deps/v8/test/inspector/runtime/get-properties-preview-expected.txt
@@ -1,3 +1,4 @@
+Tests generated previews in Runtime.getProperties
p1 : Object
p2 : Object
p1 : {
@@ -29,4 +30,3 @@ p2 : {
}
]
}
-
diff --git a/deps/v8/test/inspector/runtime/get-properties-preview.js b/deps/v8/test/inspector/runtime/get-properties-preview.js
index 62d853a48d..92c6e56426 100644
--- a/deps/v8/test/inspector/runtime/get-properties-preview.js
+++ b/deps/v8/test/inspector/runtime/get-properties-preview.js
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.setupInjectedScriptEnvironment();
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests generated previews in Runtime.getProperties');
+
+contextGroup.setupInjectedScriptEnvironment();
Protocol.Runtime.evaluate({ "expression": "({p1: {a:1}, p2: {b:'foo', bb:'bar'}})" }).then(callbackEvaluate);
diff --git a/deps/v8/test/inspector/runtime/get-properties.js b/deps/v8/test/inspector/runtime/get-properties.js
index 2c1222b581..56a4c4be0b 100644
--- a/deps/v8/test/inspector/runtime/get-properties.js
+++ b/deps/v8/test/inspector/runtime/get-properties.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks Runtime.getProperties method');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks Runtime.getProperties method');
InspectorTest.runAsyncTestSuite([
async function testObject5() {
diff --git a/deps/v8/test/inspector/runtime/internal-properties-entries.js b/deps/v8/test/inspector/runtime/internal-properties-entries.js
index 4897bd139d..9555ae5558 100644
--- a/deps/v8/test/inspector/runtime/internal-properties-entries.js
+++ b/deps/v8/test/inspector/runtime/internal-properties-entries.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks internal [[Entries]] in Runtime.getProperties output');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks internal [[Entries]] in Runtime.getProperties output');
Protocol.Runtime.enable();
diff --git a/deps/v8/test/inspector/runtime/internal-properties.js b/deps/v8/test/inspector/runtime/internal-properties.js
index 2ee4c459fa..1e42f917ea 100644
--- a/deps/v8/test/inspector/runtime/internal-properties.js
+++ b/deps/v8/test/inspector/runtime/internal-properties.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks internal properties in Runtime.getProperties output');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks internal properties in Runtime.getProperties output');
-InspectorTest.addScript(`
+contextGroup.addScript(`
function* foo() {
yield 1;
}
diff --git a/deps/v8/test/inspector/runtime/length-or-size-description.js b/deps/v8/test/inspector/runtime/length-or-size-description.js
index ec3ed6d263..0006bfb61d 100644
--- a/deps/v8/test/inspector/runtime/length-or-size-description.js
+++ b/deps/v8/test/inspector/runtime/length-or-size-description.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Test that descriptions for arrays, maps, and sets include the correct length or size.")
+let {session, contextGroup, Protocol} = InspectorTest.start("Test that descriptions for arrays, maps, and sets include the correct length or size.")
-InspectorTest.setupInjectedScriptEnvironment();
+contextGroup.setupInjectedScriptEnvironment();
Promise.all([
testExpression("new Set()"),
diff --git a/deps/v8/test/inspector/runtime/property-on-console-proto-expected.txt b/deps/v8/test/inspector/runtime/property-on-console-proto-expected.txt
index 6e75294e82..f64914a2d0 100644
--- a/deps/v8/test/inspector/runtime/property-on-console-proto-expected.txt
+++ b/deps/v8/test/inspector/runtime/property-on-console-proto-expected.txt
@@ -9,4 +9,3 @@ Tests that property defined on console.__proto__ doesn't observable on other Obj
}
}
}
-
diff --git a/deps/v8/test/inspector/runtime/property-on-console-proto.js b/deps/v8/test/inspector/runtime/property-on-console-proto.js
index 29a206d210..c7ee6c2d14 100644
--- a/deps/v8/test/inspector/runtime/property-on-console-proto.js
+++ b/deps/v8/test/inspector/runtime/property-on-console-proto.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Tests that property defined on console.__proto__ doesn't observable on other Objects.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Tests that property defined on console.__proto__ doesn't observable on other Objects.");
-InspectorTest.addScript(`
+contextGroup.addScript(`
function testFunction()
{
var amountOfProperties = 0;
diff --git a/deps/v8/test/inspector/runtime/protocol-works-with-different-locale-expected.txt b/deps/v8/test/inspector/runtime/protocol-works-with-different-locale-expected.txt
index ee70e94c2a..da9eccc694 100644
--- a/deps/v8/test/inspector/runtime/protocol-works-with-different-locale-expected.txt
+++ b/deps/v8/test/inspector/runtime/protocol-works-with-different-locale-expected.txt
@@ -1,3 +1,4 @@
+Tests that protocol works with different locales
Running test: consoleLogWithDefaultLocale
{
diff --git a/deps/v8/test/inspector/runtime/protocol-works-with-different-locale.js b/deps/v8/test/inspector/runtime/protocol-works-with-different-locale.js
index 950bfdbf6b..833b927c16 100644
--- a/deps/v8/test/inspector/runtime/protocol-works-with-different-locale.js
+++ b/deps/v8/test/inspector/runtime/protocol-works-with-different-locale.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+let {session, contextGroup, Protocol} = InspectorTest.start('Tests that protocol works with different locales');
+
Protocol.Runtime.enable();
Protocol.Runtime.onConsoleAPICalled(InspectorTest.logMessage);
diff --git a/deps/v8/test/inspector/runtime/run-script-async-expected.txt b/deps/v8/test/inspector/runtime/run-script-async-expected.txt
index c6a53caee6..fc1ce0eb97 100644
--- a/deps/v8/test/inspector/runtime/run-script-async-expected.txt
+++ b/deps/v8/test/inspector/runtime/run-script-async-expected.txt
@@ -175,10 +175,6 @@ Running test: testAwaitRejectedPromise
}
exceptionId : <exceptionId>
lineNumber : 0
- stackTrace : {
- callFrames : [
- ]
- }
text : Uncaught (in promise)
}
result : {
@@ -188,4 +184,4 @@ Running test: testAwaitRejectedPromise
}
}
}
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/inspector/runtime/run-script-async.js b/deps/v8/test/inspector/runtime/run-script-async.js
index a99d1efc3e..484ad37ef9 100644
--- a/deps/v8/test/inspector/runtime/run-script-async.js
+++ b/deps/v8/test/inspector/runtime/run-script-async.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Tests that Runtime.compileScript and Runtime.runScript work with awaitPromise flag.");
+let {session, contextGroup, Protocol} = InspectorTest.start("Tests that Runtime.compileScript and Runtime.runScript work with awaitPromise flag.");
InspectorTest.runTestSuite([
function testRunAndCompileWithoutAgentEnable(next)
diff --git a/deps/v8/test/inspector/runtime/runtime-evaluate-with-dirty-context.js b/deps/v8/test/inspector/runtime/runtime-evaluate-with-dirty-context.js
index 32c2e325cc..5f42d49abd 100644
--- a/deps/v8/test/inspector/runtime/runtime-evaluate-with-dirty-context.js
+++ b/deps/v8/test/inspector/runtime/runtime-evaluate-with-dirty-context.js
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log('Checks that Runtime.evaluate works with dirty context.');
-InspectorTest.setupInjectedScriptEnvironment();
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks that Runtime.evaluate works with dirty context.');
+contextGroup.setupInjectedScriptEnvironment();
Protocol.Runtime.enable();
Protocol.Runtime.onConsoleAPICalled(InspectorTest.logMessage);
Protocol.Runtime.evaluate({expression: 'console.log(42)'})
diff --git a/deps/v8/test/inspector/runtime/runtime-restore.js b/deps/v8/test/inspector/runtime/runtime-restore.js
index 5c2fea5768..09e44677e5 100644
--- a/deps/v8/test/inspector/runtime/runtime-restore.js
+++ b/deps/v8/test/inspector/runtime/runtime-restore.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.v8
-InspectorTest.log('Checks that Runtime agent correctly restore its state.');
+let {session, contextGroup, Protocol} = InspectorTest.start('Checks that Runtime agent correctly restore its state.');
-InspectorTest.addScript(`
+contextGroup.addScript(`
var formatter = {
header: function(x)
{
@@ -56,9 +56,9 @@ InspectorTest.runTestSuite([
function testSetCustomObjectFormatterEnabled(next) {
Protocol.Runtime.onConsoleAPICalled(InspectorTest.logMessage);
- // cleanup console message storage
- reconnect();
- Protocol.Runtime.enable()
+ Protocol.Runtime.discardConsoleEntries()
+ .then(reconnect)
+ .then(() => Protocol.Runtime.enable())
.then(() => Protocol.Runtime.setCustomObjectFormatterEnabled({ enabled: true }))
.then(reconnect)
.then(() => Protocol.Runtime.evaluate({ expression: 'console.log({ name: 42 })'}))
@@ -73,5 +73,5 @@ InspectorTest.runTestSuite([
function reconnect() {
InspectorTest.logMessage('will reconnect..');
- utils.reconnect();
+ session.reconnect();
}
diff --git a/deps/v8/test/inspector/runtime/set-or-map-entries.js b/deps/v8/test/inspector/runtime/set-or-map-entries.js
index 300b842154..4206c87de3 100644
--- a/deps/v8/test/inspector/runtime/set-or-map-entries.js
+++ b/deps/v8/test/inspector/runtime/set-or-map-entries.js
@@ -2,9 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-InspectorTest.log("Test that Runtime.getProperties doesn't truncate set and map entries in internalProperties.")
+let {session, contextGroup, Protocol} = InspectorTest.start("Test that Runtime.getProperties doesn't truncate set and map entries in internalProperties.")
-InspectorTest.addScript(`
+contextGroup.addScript(`
function createSet(size) {
var s = new Set();
var a = {};
@@ -22,7 +22,7 @@ InspectorTest.addScript(`
}
`);
-InspectorTest.setupInjectedScriptEnvironment();
+contextGroup.setupInjectedScriptEnvironment();
Protocol.Debugger.enable();
Protocol.Runtime.enable();
diff --git a/deps/v8/test/inspector/task-runner.cc b/deps/v8/test/inspector/task-runner.cc
index 577ab306c7..79ccc8a8ec 100644
--- a/deps/v8/test/inspector/task-runner.cc
+++ b/deps/v8/test/inspector/task-runner.cc
@@ -12,15 +12,20 @@
namespace {
-const int kTaskRunnerIndex = 2;
-const int kContextGroupIdIndex = 3;
-
void ReportUncaughtException(v8::Isolate* isolate,
const v8::TryCatch& try_catch) {
CHECK(try_catch.HasCaught());
v8::HandleScope handle_scope(isolate);
std::string message = *v8::String::Utf8Value(try_catch.Message()->Get());
- fprintf(stderr, "Unhandle exception: %s\n", message.data());
+ int line = try_catch.Message()
+ ->GetLineNumber(isolate->GetCurrentContext())
+ .FromJust();
+ std::string source_line =
+ *v8::String::Utf8Value(try_catch.Message()
+ ->GetSourceLine(isolate->GetCurrentContext())
+ .ToLocalChecked());
+ fprintf(stderr, "Unhandle exception: %s @%s[%d]\n", message.data(),
+ source_line.data(), line);
}
v8::internal::Vector<uint16_t> ToVector(v8::Local<v8::String> str) {
@@ -32,14 +37,18 @@ v8::internal::Vector<uint16_t> ToVector(v8::Local<v8::String> str) {
} // namespace
-TaskRunner::TaskRunner(v8::ExtensionConfiguration* extensions,
+TaskRunner::TaskRunner(IsolateData::SetupGlobalTasks setup_global_tasks,
bool catch_exceptions,
- v8::base::Semaphore* ready_semaphore)
+ v8::base::Semaphore* ready_semaphore,
+ v8::StartupData* startup_data,
+ InspectorClientImpl::FrontendChannel* channel)
: Thread(Options("Task Runner")),
- extensions_(extensions),
+ setup_global_tasks_(std::move(setup_global_tasks)),
+ startup_data_(startup_data),
+ channel_(channel),
catch_exceptions_(catch_exceptions),
ready_semaphore_(ready_semaphore),
- isolate_(nullptr),
+ data_(nullptr),
process_queue_semaphore_(0),
nested_loop_count_(0) {
Start();
@@ -47,45 +56,10 @@ TaskRunner::TaskRunner(v8::ExtensionConfiguration* extensions,
TaskRunner::~TaskRunner() { Join(); }
-void TaskRunner::InitializeIsolate() {
- v8::Isolate::CreateParams params;
- params.array_buffer_allocator =
- v8::ArrayBuffer::Allocator::NewDefaultAllocator();
- isolate_ = v8::Isolate::New(params);
- isolate_->SetMicrotasksPolicy(v8::MicrotasksPolicy::kScoped);
- v8::Isolate::Scope isolate_scope(isolate_);
- v8::HandleScope handle_scope(isolate_);
- NewContextGroup();
- if (ready_semaphore_) ready_semaphore_->Signal();
-}
-
-v8::Local<v8::Context> TaskRunner::NewContextGroup() {
- v8::Local<v8::ObjectTemplate> global_template =
- v8::ObjectTemplate::New(isolate_);
- v8::Local<v8::Context> context =
- v8::Context::New(isolate_, extensions_, global_template);
- context->SetAlignedPointerInEmbedderData(kTaskRunnerIndex, this);
- intptr_t context_group_id = ++last_context_group_id_;
- // Should be 2-byte aligned.
- context->SetAlignedPointerInEmbedderData(
- kContextGroupIdIndex, reinterpret_cast<void*>(context_group_id * 2));
- contexts_[context_group_id].Reset(isolate_, context);
- return context;
-}
-
-v8::Local<v8::Context> TaskRunner::GetContext(int context_group_id) {
- return contexts_[context_group_id].Get(isolate_);
-}
-
-int TaskRunner::GetContextGroupId(v8::Local<v8::Context> context) {
- return static_cast<int>(
- reinterpret_cast<intptr_t>(
- context->GetAlignedPointerFromEmbedderData(kContextGroupIdIndex)) /
- 2);
-}
-
void TaskRunner::Run() {
- InitializeIsolate();
+ data_.reset(new IsolateData(this, std::move(setup_global_tasks_),
+ startup_data_, channel_));
+ if (ready_semaphore_) ready_semaphore_->Signal();
RunMessageLoop(false);
}
@@ -94,19 +68,19 @@ void TaskRunner::RunMessageLoop(bool only_protocol) {
while (nested_loop_count_ == loop_number && !is_terminated_.Value()) {
TaskRunner::Task* task = GetNext(only_protocol);
if (!task) return;
- v8::Isolate::Scope isolate_scope(isolate_);
+ v8::Isolate::Scope isolate_scope(isolate());
if (catch_exceptions_) {
- v8::TryCatch try_catch(isolate_);
- task->Run(isolate_, contexts_.begin()->second);
+ v8::TryCatch try_catch(isolate());
+ task->RunOnIsolate(data_.get());
delete task;
if (try_catch.HasCaught()) {
- ReportUncaughtException(isolate_, try_catch);
+ ReportUncaughtException(isolate(), try_catch);
fflush(stdout);
fflush(stderr);
_exit(0);
}
} else {
- task->Run(isolate_, contexts_.begin()->second);
+ task->RunOnIsolate(data_.get());
delete task;
}
}
@@ -127,19 +101,6 @@ void TaskRunner::Terminate() {
process_queue_semaphore_.Signal();
}
-void TaskRunner::RegisterModule(v8::internal::Vector<uint16_t> name,
- v8::Local<v8::Module> module) {
- modules_[name] = v8::Global<v8::Module>(isolate_, module);
-}
-
-v8::MaybeLocal<v8::Module> TaskRunner::ModuleResolveCallback(
- v8::Local<v8::Context> context, v8::Local<v8::String> specifier,
- v8::Local<v8::Module> referrer) {
- std::string str = *v8::String::Utf8Value(specifier);
- TaskRunner* runner = TaskRunner::FromContext(context);
- return runner->modules_[ToVector(specifier)].Get(runner->isolate_);
-}
-
TaskRunner::Task* TaskRunner::GetNext(bool only_protocol) {
for (;;) {
if (is_terminated_.Value()) return nullptr;
@@ -159,60 +120,55 @@ TaskRunner::Task* TaskRunner::GetNext(bool only_protocol) {
return nullptr;
}
-TaskRunner* TaskRunner::FromContext(v8::Local<v8::Context> context) {
- return static_cast<TaskRunner*>(
- context->GetAlignedPointerFromEmbedderData(kTaskRunnerIndex));
+AsyncTask::AsyncTask(IsolateData* data, const char* task_name)
+ : instrumenting_(data && task_name) {
+ if (!instrumenting_) return;
+ data->inspector()->inspector()->asyncTaskScheduled(
+ v8_inspector::StringView(reinterpret_cast<const uint8_t*>(task_name),
+ strlen(task_name)),
+ this, false);
}
-AsyncTask::AsyncTask(const char* task_name,
- v8_inspector::V8Inspector* inspector)
- : inspector_(task_name ? inspector : nullptr) {
- if (inspector_) {
- inspector_->asyncTaskScheduled(
- v8_inspector::StringView(reinterpret_cast<const uint8_t*>(task_name),
- strlen(task_name)),
- this, false);
- }
-}
-
-void AsyncTask::Run(v8::Isolate* isolate,
- const v8::Global<v8::Context>& context) {
- if (inspector_) inspector_->asyncTaskStarted(this);
- AsyncRun(isolate, context);
- if (inspector_) inspector_->asyncTaskFinished(this);
+void AsyncTask::Run() {
+ if (instrumenting_) data()->inspector()->inspector()->asyncTaskStarted(this);
+ AsyncRun();
+ if (instrumenting_) data()->inspector()->inspector()->asyncTaskFinished(this);
}
ExecuteStringTask::ExecuteStringTask(
+ IsolateData* data, int context_group_id, const char* task_name,
const v8::internal::Vector<uint16_t>& expression,
v8::Local<v8::String> name, v8::Local<v8::Integer> line_offset,
- v8::Local<v8::Integer> column_offset, v8::Local<v8::Boolean> is_module,
- const char* task_name, v8_inspector::V8Inspector* inspector)
- : AsyncTask(task_name, inspector),
+ v8::Local<v8::Integer> column_offset, v8::Local<v8::Boolean> is_module)
+ : AsyncTask(data, task_name),
expression_(expression),
name_(ToVector(name)),
line_offset_(line_offset.As<v8::Int32>()->Value()),
column_offset_(column_offset.As<v8::Int32>()->Value()),
- is_module_(is_module->Value()) {}
+ is_module_(is_module->Value()),
+ context_group_id_(context_group_id) {}
ExecuteStringTask::ExecuteStringTask(
- const v8::internal::Vector<const char>& expression)
- : AsyncTask(nullptr, nullptr), expression_utf8_(expression) {}
+ const v8::internal::Vector<const char>& expression, int context_group_id)
+ : AsyncTask(nullptr, nullptr),
+ expression_utf8_(expression),
+ context_group_id_(context_group_id) {}
-void ExecuteStringTask::AsyncRun(v8::Isolate* isolate,
- const v8::Global<v8::Context>& context) {
- v8::MicrotasksScope microtasks_scope(isolate,
+void ExecuteStringTask::AsyncRun() {
+ v8::MicrotasksScope microtasks_scope(isolate(),
v8::MicrotasksScope::kRunMicrotasks);
- v8::HandleScope handle_scope(isolate);
- v8::Local<v8::Context> local_context = context.Get(isolate);
- v8::Context::Scope context_scope(local_context);
+ v8::HandleScope handle_scope(isolate());
+ v8::Local<v8::Context> context = data()->GetContext(context_group_id_);
+ v8::Context::Scope context_scope(context);
v8::Local<v8::String> name =
- v8::String::NewFromTwoByte(isolate, name_.start(),
+ v8::String::NewFromTwoByte(isolate(), name_.start(),
v8::NewStringType::kNormal, name_.length())
.ToLocalChecked();
- v8::Local<v8::Integer> line_offset = v8::Integer::New(isolate, line_offset_);
+ v8::Local<v8::Integer> line_offset =
+ v8::Integer::New(isolate(), line_offset_);
v8::Local<v8::Integer> column_offset =
- v8::Integer::New(isolate, column_offset_);
+ v8::Integer::New(isolate(), column_offset_);
v8::ScriptOrigin origin(
name, line_offset, column_offset,
@@ -221,15 +177,15 @@ void ExecuteStringTask::AsyncRun(v8::Isolate* isolate,
/* source_map_url */ v8::Local<v8::Value>(),
/* resource_is_opaque */ v8::Local<v8::Boolean>(),
/* is_wasm */ v8::Local<v8::Boolean>(),
- v8::Boolean::New(isolate, is_module_));
+ v8::Boolean::New(isolate(), is_module_));
v8::Local<v8::String> source;
if (expression_.length()) {
- source = v8::String::NewFromTwoByte(isolate, expression_.start(),
+ source = v8::String::NewFromTwoByte(isolate(), expression_.start(),
v8::NewStringType::kNormal,
expression_.length())
.ToLocalChecked();
} else {
- source = v8::String::NewFromUtf8(isolate, expression_utf8_.start(),
+ source = v8::String::NewFromUtf8(isolate(), expression_utf8_.start(),
v8::NewStringType::kNormal,
expression_utf8_.length())
.ToLocalChecked();
@@ -238,22 +194,11 @@ void ExecuteStringTask::AsyncRun(v8::Isolate* isolate,
v8::ScriptCompiler::Source scriptSource(source, origin);
if (!is_module_) {
v8::Local<v8::Script> script;
- if (!v8::ScriptCompiler::Compile(local_context, &scriptSource)
- .ToLocal(&script))
+ if (!v8::ScriptCompiler::Compile(context, &scriptSource).ToLocal(&script))
return;
v8::MaybeLocal<v8::Value> result;
- result = script->Run(local_context);
+ result = script->Run(context);
} else {
- v8::Local<v8::Module> module;
- if (!v8::ScriptCompiler::CompileModule(isolate, &scriptSource)
- .ToLocal(&module)) {
- return;
- }
- if (!module->Instantiate(local_context, &TaskRunner::ModuleResolveCallback))
- return;
- v8::Local<v8::Value> result;
- if (!module->Evaluate(local_context).ToLocal(&result)) return;
- TaskRunner* runner = TaskRunner::FromContext(local_context);
- runner->RegisterModule(name_, module);
+ data()->RegisterModule(context, name_, &scriptSource);
}
}
diff --git a/deps/v8/test/inspector/task-runner.h b/deps/v8/test/inspector/task-runner.h
index 9bb135efb9..c99bc6c95a 100644
--- a/deps/v8/test/inspector/task-runner.h
+++ b/deps/v8/test/inspector/task-runner.h
@@ -15,16 +15,7 @@
#include "src/base/platform/platform.h"
#include "src/locked-queue-inl.h"
#include "src/vector.h"
-
-struct VectorCompare {
- bool operator()(const v8::internal::Vector<uint16_t>& lhs,
- const v8::internal::Vector<uint16_t>& rhs) const {
- for (int i = 0; i < lhs.length() && i < rhs.length(); ++i) {
- if (lhs[i] != rhs[i]) return lhs[i] < rhs[i];
- }
- return false;
- }
-};
+#include "test/inspector/isolate-data.h"
class TaskRunner : public v8::base::Thread {
public:
@@ -32,13 +23,27 @@ class TaskRunner : public v8::base::Thread {
public:
virtual ~Task() {}
virtual bool is_inspector_task() = 0;
- virtual void Run(v8::Isolate* isolate,
- const v8::Global<v8::Context>& context) = 0;
+ void RunOnIsolate(IsolateData* data) {
+ data_ = data;
+ Run();
+ data_ = nullptr;
+ }
+
+ protected:
+ virtual void Run() = 0;
+ v8::Isolate* isolate() const { return data_->isolate(); }
+ IsolateData* data() const { return data_; }
+
+ private:
+ IsolateData* data_ = nullptr;
};
- TaskRunner(v8::ExtensionConfiguration* extensions, bool catch_exceptions,
- v8::base::Semaphore* ready_semaphore);
+ TaskRunner(IsolateData::SetupGlobalTasks setup_global_tasks,
+ bool catch_exceptions, v8::base::Semaphore* ready_semaphore,
+ v8::StartupData* startup_data,
+ InspectorClientImpl::FrontendChannel* channel);
virtual ~TaskRunner();
+ IsolateData* data() const { return data_.get(); }
// Thread implementation.
void Run() override;
@@ -50,31 +55,18 @@ class TaskRunner : public v8::base::Thread {
// TaskRunner takes ownership.
void Append(Task* task);
- static TaskRunner* FromContext(v8::Local<v8::Context>);
-
- v8::Local<v8::Context> NewContextGroup();
- v8::Local<v8::Context> GetContext(int context_group_id);
- static int GetContextGroupId(v8::Local<v8::Context> context);
-
void Terminate();
- void RegisterModule(v8::internal::Vector<uint16_t> name,
- v8::Local<v8::Module> module);
- static v8::MaybeLocal<v8::Module> ModuleResolveCallback(
- v8::Local<v8::Context> context, v8::Local<v8::String> specifier,
- v8::Local<v8::Module> referrer);
-
private:
- void InitializeIsolate();
Task* GetNext(bool only_protocol);
+ v8::Isolate* isolate() const { return data_->isolate(); }
- v8::ExtensionConfiguration* extensions_;
+ IsolateData::SetupGlobalTasks setup_global_tasks_;
+ v8::StartupData* startup_data_;
+ InspectorClientImpl::FrontendChannel* channel_;
bool catch_exceptions_;
v8::base::Semaphore* ready_semaphore_;
-
- v8::Isolate* isolate_;
- intptr_t last_context_group_id_ = 0;
- std::map<intptr_t, v8::Global<v8::Context>> contexts_;
+ std::unique_ptr<IsolateData> data_;
// deferred_queue_ combined with queue_ (in this order) have all tasks in the
// correct order. Sometimes we skip non-protocol tasks by moving them from
@@ -83,10 +75,6 @@ class TaskRunner : public v8::base::Thread {
v8::internal::LockedQueue<Task*> deffered_queue_;
v8::base::Semaphore process_queue_semaphore_;
- std::map<v8::internal::Vector<uint16_t>, v8::Global<v8::Module>,
- VectorCompare>
- modules_;
-
int nested_loop_count_;
v8::base::AtomicNumber<int> is_terminated_;
@@ -96,40 +84,39 @@ class TaskRunner : public v8::base::Thread {
class AsyncTask : public TaskRunner::Task {
public:
- AsyncTask(const char* task_name, v8_inspector::V8Inspector* inspector);
+ AsyncTask(IsolateData* data, const char* task_name);
virtual ~AsyncTask() = default;
- void Run(v8::Isolate* isolate,
- const v8::Global<v8::Context>& context) override;
- virtual void AsyncRun(v8::Isolate* isolate,
- const v8::Global<v8::Context>& context) = 0;
-
protected:
- v8_inspector::V8Inspector* inspector_;
+ virtual void AsyncRun() = 0;
+ void Run() override;
+
+ bool instrumenting_;
};
class ExecuteStringTask : public AsyncTask {
public:
- ExecuteStringTask(const v8::internal::Vector<uint16_t>& expression,
+ ExecuteStringTask(IsolateData* data, int context_group_id,
+ const char* task_name,
+ const v8::internal::Vector<uint16_t>& expression,
v8::Local<v8::String> name,
v8::Local<v8::Integer> line_offset,
v8::Local<v8::Integer> column_offset,
- v8::Local<v8::Boolean> is_module, const char* task_name,
- v8_inspector::V8Inspector* inspector);
- explicit ExecuteStringTask(
- const v8::internal::Vector<const char>& expression);
+ v8::Local<v8::Boolean> is_module);
+ ExecuteStringTask(const v8::internal::Vector<const char>& expression,
+ int context_group_id);
bool is_inspector_task() override { return false; }
- void AsyncRun(v8::Isolate* isolate,
- const v8::Global<v8::Context>& context) override;
-
private:
+ void AsyncRun() override;
+
v8::internal::Vector<uint16_t> expression_;
v8::internal::Vector<const char> expression_utf8_;
v8::internal::Vector<uint16_t> name_;
int32_t line_offset_ = 0;
int32_t column_offset_ = 0;
bool is_module_ = false;
+ int context_group_id_;
DISALLOW_COPY_AND_ASSIGN(ExecuteStringTask);
};
diff --git a/deps/v8/test/inspector/testcfg.py b/deps/v8/test/inspector/testcfg.py
index 8dea3c96e8..9c943d9848 100644
--- a/deps/v8/test/inspector/testcfg.py
+++ b/deps/v8/test/inspector/testcfg.py
@@ -5,6 +5,7 @@
import itertools
import os
import re
+import shlex
from testrunner.local import testsuite
from testrunner.local import utils
@@ -43,7 +44,7 @@ class InspectorProtocolTestSuite(testsuite.TestSuite):
flags = [] + context.mode_flags
flags_match = re.findall(FLAGS_PATTERN, source)
for match in flags_match:
- flags += match.strip().split()
+ flags += shlex.split(match.strip())
testname = testcase.path.split(os.path.sep)[-1]
testfilename = os.path.join(self.root, testcase.path + self.suffix())
protocoltestfilename = os.path.join(self.root, PROTOCOL_TEST_JS)
diff --git a/deps/v8/test/intl/date-format/date-format-to-parts.js b/deps/v8/test/intl/date-format/date-format-to-parts.js
index cd954acc79..fd04dc5bd0 100644
--- a/deps/v8/test/intl/date-format/date-format-to-parts.js
+++ b/deps/v8/test/intl/date-format/date-format-to-parts.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --datetime-format-to-parts
-
var d = new Date(2016, 11, 15, 14, 10, 34);
var df = Intl.DateTimeFormat("ja",
{hour: 'numeric', minute: 'numeric', second: 'numeric', year: 'numeric',
diff --git a/deps/v8/test/intl/general/case-mapping.js b/deps/v8/test/intl/general/case-mapping.js
index feb0ff54f6..0028c4150f 100644
--- a/deps/v8/test/intl/general/case-mapping.js
+++ b/deps/v8/test/intl/general/case-mapping.js
@@ -128,6 +128,8 @@ assertEquals("abci\u0307", "aBcI\u0307".toLowerCase());
assertEquals("abci\u0307", "aBcI\u0307".toLocaleLowerCase("fil"));
assertEquals("abci\u0307", "aBcI\u0307".toLocaleLowerCase("zh-Hant-TW"));
assertEquals("abci\u0307", "aBcI\u0307".toLocaleLowerCase("i-klingon"));
+assertEquals("abci\u0307", "aBcI\u0307".toLocaleLowerCase("i-enochian"));
+assertEquals("abci\u0307", "aBcI\u0307".toLocaleLowerCase("x-foobar"));
// Up to 8 chars are allowed for the primary language tag in BCP 47.
assertEquals("abci\u0307", "aBcI\u0307".toLocaleLowerCase("longlang"));
diff --git a/deps/v8/test/js-perf-test/BytecodeHandlers/compare.js b/deps/v8/test/js-perf-test/BytecodeHandlers/compare.js
new file mode 100644
index 0000000000..ea12ff4b21
--- /dev/null
+++ b/deps/v8/test/js-perf-test/BytecodeHandlers/compare.js
@@ -0,0 +1,157 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function addBenchmark(name, test) {
+ new BenchmarkSuite(name, [1000],
+ [
+ new Benchmark(name, false, false, 0, test)
+ ]);
+}
+
+addBenchmark('Smi-StrictEquals-True', SmiStrictEqualsTrue);
+addBenchmark('Smi-StrictEquals-False', SmiStrictEqualsFalse);
+addBenchmark('Number-StrictEquals-True', NumberStrictEqualsTrue);
+addBenchmark('Number-StrictEquals-False', NumberStrictEqualsFalse);
+addBenchmark('String-StrictEquals-True', StringStrictEqualsTrue);
+addBenchmark('String-StrictEquals-False', StringStrictEqualsFalse);
+addBenchmark('SmiString-StrictEquals', MixedStrictEquals);
+addBenchmark('Smi-Equals-True', SmiEqualsTrue);
+addBenchmark('Smi-Equals-False', SmiEqualsFalse);
+addBenchmark('Number-Equals-True', NumberEqualsTrue);
+addBenchmark('Number-Equals-False', NumberEqualsFalse);
+addBenchmark('String-Equals-True', StringEqualsTrue);
+addBenchmark('String-Equals-False', StringEqualsFalse);
+addBenchmark('SmiString-Equals', MixedEquals);
+addBenchmark('ObjectNull-Equals', ObjectEqualsNull);
+addBenchmark('Smi-RelationalCompare', SmiRelationalCompare);
+addBenchmark('Number-RelationalCompare', NumberRelationalCompare);
+addBenchmark('String-RelationalCompare', StringRelationalCompare);
+addBenchmark('SmiString-RelationalCompare', MixedRelationalCompare);
+
+var null_object;
+
+function strictEquals(a, b) {
+ for (var i = 0; i < 1000; ++i) {
+ a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b;
+ a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b;
+ a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b;
+ a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b;
+ a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b;
+ a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b;
+ a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b;
+ a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b;
+ a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b;
+ a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b; a === b;
+ }
+}
+
+function equals(a, b) {
+ for (var i = 0; i < 1000; ++i) {
+ a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b;
+ a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b;
+ a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b;
+ a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b;
+ a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b;
+ a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b;
+ a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b;
+ a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b;
+ a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b;
+ a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b; a == b;
+ }
+}
+
+// Relational comparison handlers are similar, so use one benchmark to measure
+// all of them.
+function relationalCompare(a, b) {
+ for (var i = 0; i < 1000; ++i) {
+ a > b; a > b; a > b; a > b; a > b; a > b; a > b; a > b; a > b; a > b;
+ a > b; a > b; a > b; a > b; a > b; a > b; a > b; a > b; a > b; a > b;
+ a > b; a > b; a > b; a > b; a > b; a > b; a > b; a > b; a > b; a > b;
+ a >= b; a >= b; a >= b; a >= b; a >= b; a >= b; a >= b; a >= b; a >= b; a >= b;
+ a >= b; a >= b; a >= b; a >= b; a >= b; a >= b; a >= b; a >= b; a >= b; a >= b;
+ a >= b; a >= b; a >= b; a >= b; a >= b; a >= b; a >= b; a >= b; a >= b; a >= b;
+ a < b; a < b; a < b; a < b; a < b; a < b; a < b; a < b; a < b; a < b;
+ a < b; a < b; a < b; a < b; a < b; a < b; a < b; a < b; a < b; a < b;
+ a < b; a < b; a < b; a < b; a < b; a < b; a < b; a < b; a < b; a < b;
+ a <= b; a <= b; a <= b; a <= b; a <= b; a <= b; a <= b; a <= b; a <= b; a <= b;
+ a <= b; a <= b; a <= b; a <= b; a <= b; a <= b; a <= b; a <= b; a <= b; a <= b;
+ a <= b; a <= b; a <= b; a <= b; a <= b; a <= b; a <= b; a <= b; a <= b; a <= b;
+ }
+}
+
+function SmiStrictEqualsFalse() {
+ strictEquals(10, 20);
+}
+
+function SmiStrictEqualsTrue() {
+ strictEquals(10, 10);
+}
+
+function NumberStrictEqualsFalse() {
+ strictEquals(0.3333, 0.3334);
+}
+
+function NumberStrictEqualsTrue() {
+ strictEquals(0.3333, 0.3333);
+}
+
+function StringStrictEqualsFalse() {
+ strictEquals("abc", "def");
+}
+
+function StringStrictEqualsTrue() {
+ strictEquals("abc", "abc");
+}
+
+function MixedStrictEquals() {
+ strictEquals(10, "10");
+}
+
+function SmiEqualsFalse() {
+ equals(10, 20);
+}
+
+function SmiEqualsTrue() {
+ equals(10, 10);
+}
+
+function NumberEqualsFalse() {
+ equals(0.3333, 0.3334);
+}
+
+function NumberEqualsTrue() {
+ equals(0.3333, 0.3333);
+}
+
+function StringEqualsFalse() {
+ equals("abc", "def");
+}
+
+function StringEqualsTrue() {
+ equals("abc", "abc");
+}
+
+function MixedEquals() {
+ equals(10, "10");
+}
+
+function ObjectEqualsNull(null_object) {
+ equals(null_object, null);
+}
+
+function SmiRelationalCompare() {
+ relationalCompare(10, 20);
+}
+
+function NumberRelationalCompare() {
+ relationalCompare(0.3333, 0.3334);
+}
+
+function StringRelationalCompare() {
+ relationalCompare("abc", "def");
+}
+
+function MixedRelationalCompare() {
+ relationalCompare(10, "10");
+}
diff --git a/deps/v8/test/js-perf-test/BytecodeHandlers/run.js b/deps/v8/test/js-perf-test/BytecodeHandlers/run.js
new file mode 100644
index 0000000000..9333e9313e
--- /dev/null
+++ b/deps/v8/test/js-perf-test/BytecodeHandlers/run.js
@@ -0,0 +1,29 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+load('../base.js');
+
+load('compare.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-BytecodeHandler(Score): ' + result);
+}
+
+function PrintStep(name) {}
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+ success = false;
+}
+
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+ NotifyError: PrintError,
+ NotifyStep: PrintStep });
diff --git a/deps/v8/test/js-perf-test/ForLoops/for_loop.js b/deps/v8/test/js-perf-test/ForLoops/for_loop.js
new file mode 100644
index 0000000000..a7324b0c10
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ForLoops/for_loop.js
@@ -0,0 +1,42 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+new BenchmarkSuite('Let-Standard', [1000], [
+ new Benchmark('Let-Standard', false, false, 0, LetLoop),
+]);
+
+new BenchmarkSuite('Var-Standard', [1000], [
+ new Benchmark('Var-Standard', false, false, 0, VarLoop),
+]);
+
+var x = [-1, 1, 4];
+var y = [-11, -1, 1, 2, 3, 4, 5, 6, 20, 44, 87, 99, 100];
+
+function LetLoop() {
+ "use strict";
+ const ret = [];
+ for (let i = 0; i < x.length; i++) {
+ for (let z = 0; z < y.length; z++) {
+ if (x[i] == y[z]) {
+ ret.push(x[i]);
+ break;
+ }
+ }
+ }
+ return ret;
+}
+
+function VarLoop() {
+ "use strict";
+ const ret = [];
+ for (var i = 0; i < x.length; i++) {
+ for (var z = 0; z < y.length; z++) {
+ if (x[i] == y[z]) {
+ ret.push(x[i]);
+ break;
+ }
+ }
+ }
+ return ret;
+}
diff --git a/deps/v8/test/js-perf-test/ForLoops/run.js b/deps/v8/test/js-perf-test/ForLoops/run.js
new file mode 100644
index 0000000000..3894d64ae9
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ForLoops/run.js
@@ -0,0 +1,25 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('../base.js');
+load('for_loop.js');
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-ForLoop(Score): ' + result);
+}
+
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+ success = false;
+}
+
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+ NotifyError: PrintError });
diff --git a/deps/v8/test/js-perf-test/JSTests.json b/deps/v8/test/js-perf-test/JSTests.json
index 2911ea36ca..1b80626b28 100644
--- a/deps/v8/test/js-perf-test/JSTests.json
+++ b/deps/v8/test/js-perf-test/JSTests.json
@@ -34,30 +34,6 @@
"results_regexp": "^Generators\\-Generators\\(Score\\): (.+)$"
},
{
- "name": "GeneratorsIgnition",
- "path": ["Generators"],
- "main": "run.js",
- "resources": ["generators.js"],
- "flags": [
- "--harmony-function-sent",
- "--ignition"
- ],
- "results_regexp": "^Generators\\-Generators\\(Score\\): (.+)$"
- },
- {
- "name": "GeneratorsIgnitionTurbofan",
- "path": ["Generators"],
- "main": "run.js",
- "resources": ["generators.js"],
- "flags": [
- "--harmony-function-sent",
- "--ignition",
- "--turbo",
- "--turbo-from-bytecode"
- ],
- "results_regexp": "^Generators\\-Generators\\(Score\\): (.+)$"
- },
- {
"name": "RestParameters",
"path": ["RestParameters"],
"main": "run.js",
@@ -65,7 +41,9 @@
"units": "score",
"results_regexp": "^%s\\-RestParameters\\(Score\\): (.+)$",
"tests": [
- {"name": "Basic1"}
+ {"name": "Basic1"},
+ {"name": "ReturnArgsBabel"},
+ {"name": "ReturnArgsNative"}
]
},
{
@@ -382,6 +360,64 @@
{"name": "FastMap"},
{"name": "ObjectMap"}
]
+ },
+ {
+ "name": "ForLoops",
+ "path": ["ForLoops"],
+ "main": "run.js",
+ "resources": [
+ "for_loop.js"
+ ],
+ "results_regexp": "^%s\\-ForLoop\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Let-Standard"},
+ {"name": "Var-Standard"}
+ ]
+ },
+ {
+ "name": "Modules",
+ "path": ["Modules"],
+ "main": "run.js",
+ "resources": ["basic-export.js", "basic-import.js", "basic-namespace.js", "value.js"],
+ "flags": [
+ "--allow-natives-syntax",
+ "--harmony-dynamic-import"
+ ],
+ "results_regexp": "^%s\\-Modules\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "BasicExport"},
+ {"name": "BasicImport"},
+ {"name": "BasicNamespace"}
+ ]
+ },
+ {
+ "name": "BytecodeHandlers",
+ "path": ["BytecodeHandlers"],
+ "main": "run.js",
+ "resources": [ "compare.js" ],
+ "flags": [ "--no-opt" ],
+ "results_regexp": "^%s\\-BytecodeHandler\\(Score\\): (.+)$",
+ "tests": [
+ {"name": "Smi-StrictEquals-True"},
+ {"name": "Smi-StrictEquals-False"},
+ {"name": "Number-StrictEquals-True"},
+ {"name": "Number-StrictEquals-False"},
+ {"name": "String-StrictEquals-True"},
+ {"name": "String-StrictEquals-False"},
+ {"name": "SmiString-StrictEquals"},
+ {"name": "Smi-Equals-True"},
+ {"name": "Smi-Equals-False"},
+ {"name": "Number-Equals-True"},
+ {"name": "Number-Equals-False"},
+ {"name": "String-Equals-True"},
+ {"name": "String-Equals-False"},
+ {"name": "ObjectNull-Equals"},
+ {"name": "SmiString-Equals"},
+ {"name": "Smi-RelationalCompare"},
+ {"name": "Number-RelationalCompare"},
+ {"name": "String-RelationalCompare"},
+ {"name": "SmiString-RelationalCompare"}
+ ]
}
]
}
diff --git a/deps/v8/test/js-perf-test/Modules/basic-export.js b/deps/v8/test/js-perf-test/Modules/basic-export.js
new file mode 100644
index 0000000000..3b889747af
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Modules/basic-export.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export let value = 0;
+for (let i = 0; i < iterations; ++i) ++value;
+if (value != iterations) throw value;
diff --git a/deps/v8/test/js-perf-test/Modules/basic-import.js b/deps/v8/test/js-perf-test/Modules/basic-import.js
new file mode 100644
index 0000000000..be7be0f34c
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Modules/basic-import.js
@@ -0,0 +1,8 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import {value, set} from "value.js";
+for (let i = 0; i < iterations; ++i) set(value + 1);
+if (value != iterations) throw value;
+set(0);
diff --git a/deps/v8/test/js-perf-test/Modules/basic-namespace.js b/deps/v8/test/js-perf-test/Modules/basic-namespace.js
new file mode 100644
index 0000000000..4fcdcb5fd9
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Modules/basic-namespace.js
@@ -0,0 +1,8 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import * as m from "value.js";
+for (let i = 0; i < iterations; ++i) m.set(m.value + 1);
+if (m.value != iterations) throw m.value;
+m.set(0);
diff --git a/deps/v8/test/js-perf-test/Modules/run.js b/deps/v8/test/js-perf-test/Modules/run.js
new file mode 100644
index 0000000000..81174d2aa7
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Modules/run.js
@@ -0,0 +1,63 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+load('../base.js');
+
+
+new BenchmarkSuite('BasicExport', [100], [
+ new Benchmark('BasicExport', false, false, 0, BasicExport)
+]);
+
+new BenchmarkSuite('BasicImport', [100], [
+ new Benchmark('BasicImport', false, false, 0, BasicImport)
+]);
+
+new BenchmarkSuite('BasicNamespace', [100], [
+ new Benchmark('BasicNamespace', false, false, 0, BasicNamespace)
+]);
+
+
+const iterations = 3000000;
+
+
+function BasicExport() {
+ let success = false;
+ import("basic-export.js").then(_ => success = true);
+ %RunMicrotasks();
+ if (!success) throw new Error(666);
+}
+
+function BasicImport() {
+ let success = false;
+ import("basic-import.js").then(_ => success = true);
+ %RunMicrotasks();
+ if (!success) throw new Error(666);
+}
+
+function BasicNamespace() {
+ let success = false;
+ import("basic-namespace.js").then(_ => success = true);
+ %RunMicrotasks();
+ if (!success) throw new Error(666);
+}
+
+
+var success = true;
+
+function PrintResult(name, result) {
+ print(name + '-Modules(Score): ' + result);
+}
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+ success = false;
+}
+
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+ NotifyError: PrintError });
diff --git a/deps/v8/test/js-perf-test/Modules/value.js b/deps/v8/test/js-perf-test/Modules/value.js
new file mode 100644
index 0000000000..8c6a88e7d1
--- /dev/null
+++ b/deps/v8/test/js-perf-test/Modules/value.js
@@ -0,0 +1,6 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export let value = 0;
+export function set(x) { value = x };
diff --git a/deps/v8/test/js-perf-test/PropertyQueries/property-queries.js b/deps/v8/test/js-perf-test/PropertyQueries/property-queries.js
index f763d262d4..06146c752a 100644
--- a/deps/v8/test/js-perf-test/PropertyQueries/property-queries.js
+++ b/deps/v8/test/js-perf-test/PropertyQueries/property-queries.js
@@ -71,7 +71,10 @@ function IntArray(size) {
// Switch object's properties and elements to dictionary mode.
function MakeDictionaryMode(obj) {
obj.foo = 0;
+ obj.bar = 0;
+ // Delete the second-to-last property first to force normalization.
delete obj.foo;
+ delete obj.bar;
obj[1e9] = 0;
return obj;
}
diff --git a/deps/v8/test/js-perf-test/RestParameters/rest.js b/deps/v8/test/js-perf-test/RestParameters/rest.js
index cf52f5f45f..1e5f67a362 100644
--- a/deps/v8/test/js-perf-test/RestParameters/rest.js
+++ b/deps/v8/test/js-perf-test/RestParameters/rest.js
@@ -7,6 +7,18 @@ new BenchmarkSuite('Basic1', [1000], [
BasicRest1, BasicRest1Setup, BasicRest1TearDown)
]);
+new BenchmarkSuite('ReturnArgsBabel', [10000], [
+ new Benchmark('ReturnArgsBabel', false, false, 0,
+ ReturnArgsBabel, ReturnArgsBabelSetup,
+ ReturnArgsBabelTearDown)
+]);
+
+new BenchmarkSuite('ReturnArgsNative', [10000], [
+ new Benchmark('ReturnArgsNative', false, false, 0,
+ ReturnArgsNative, ReturnArgsNativeSetup,
+ ReturnArgsNativeTearDown)
+]);
+
// ----------------------------------------------------------------------------
var result;
@@ -28,3 +40,57 @@ function BasicRest1() {
function BasicRest1TearDown() {
return result == 550;
}
+
+// ----------------------------------------------------------------------------
+
+var length = 50;
+var numbers = Array.apply(null, {length}).map(Number.call, Number);
+var strings = numbers.map(String.call, String);
+
+function ReturnArgsBabelFunction(unused) {
+ "use strict";
+ for (var _len = arguments.length, args = Array(_len > 1 ? _len - 1 : 0),
+ _key = 1;
+ _key < _len; _key++) {
+ args[_key - 1] = arguments[_key];
+ }
+ return args;
+}
+
+function ReturnArgsBabelSetup() {
+ // Warm up with FAST_HOLEY_ELEMENTS
+ result = ReturnArgsBabelFunction(...strings);
+ // Warm up with FAST_HOLEY_SMI_ELEMENTS
+ result = ReturnArgsBabelFunction(...numbers);
+}
+
+function ReturnArgsBabel() {
+ result = ReturnArgsBabelFunction(...strings);
+ result = ReturnArgsBabelFunction(...numbers);
+}
+
+function ReturnArgsBabelTearDown() {
+ return result.indexOf(0) === 0;
+}
+
+// ----------------------------------------------------------------------------
+
+function ReturnArgsNativeFunction(unused, ...args) {
+ return args;
+}
+
+function ReturnArgsNativeSetup() {
+ // Warm up with FAST_HOLEY_ELEMENTS
+ result = ReturnArgsNativeFunction(...strings);
+ // Warm up with FAST_HOLEY_SMI_ELEMENTS
+ result = ReturnArgsNativeFunction(...numbers);
+}
+
+function ReturnArgsNative() {
+ result = ReturnArgsNativeFunction(...strings);
+ result = ReturnArgsNativeFunction(...numbers);
+}
+
+function ReturnArgsNativeTearDown() {
+ return result.indexOf(0) === 0;
+}
diff --git a/deps/v8/test/js-perf-test/SixSpeed.json b/deps/v8/test/js-perf-test/SixSpeed.json
index 0a0dcc024a..15a2792c6a 100644
--- a/deps/v8/test/js-perf-test/SixSpeed.json
+++ b/deps/v8/test/js-perf-test/SixSpeed.json
@@ -28,6 +28,31 @@
]
},
{
+ "name": "Classes",
+ "path": ["SixSpeed"],
+ "results_regexp": "^%s\\(Score\\): (.+)$",
+ "tests": [
+ {
+ "name": "ES5",
+ "main": "run.js",
+ "resources": ["classes/es5.js"],
+ "test_flags": ["classes/es5"]
+ },
+ {
+ "name": "Babel",
+ "main": "run.js",
+ "resources": ["classes/babel.js"],
+ "test_flags": ["classes/babel"]
+ },
+ {
+ "name": "ES6",
+ "main": "run.js",
+ "resources": ["classes/es6.js"],
+ "test_flags": ["classes/es6"]
+ }
+ ]
+ },
+ {
"name": "Computed property names in object literals",
"path": ["SixSpeed"],
"results_regexp": "^%s\\(Score\\): (.+)$",
@@ -92,6 +117,31 @@
]
},
{
+ "name": "Super",
+ "path": ["SixSpeed"],
+ "results_regexp": "^%s\\(Score\\): (.+)$",
+ "tests": [
+ {
+ "name": "ES5",
+ "main": "run.js",
+ "resources": ["super/es5.js"],
+ "test_flags": ["super/es5"]
+ },
+ {
+ "name": "Babel",
+ "main": "run.js",
+ "resources": ["super/babel.js"],
+ "test_flags": ["super/babel"]
+ },
+ {
+ "name": "ES6",
+ "main": "run.js",
+ "resources": ["super/es6.js"],
+ "test_flags": ["super/es6"]
+ }
+ ]
+ },
+ {
"name": "SuperSpread",
"path": ["SixSpeed"],
"flags": ["--future"],
@@ -142,6 +192,82 @@
"test_flags": ["spread_literal/es6"]
}
]
+ },
+ {
+ "name": "Map-Set has",
+ "path": ["SixSpeed"],
+ "results_regexp": "^%s\\(Score\\): (.+)$",
+ "tests": [
+ {
+ "name": "ES5",
+ "main": "run.js",
+ "resources": ["map_set_lookup/es5.js"],
+ "test_flags": ["map_set_lookup/es5"]
+ },
+ {
+ "name": "ES6",
+ "main": "run.js",
+ "resources": ["map_set_lookup/es6.js"],
+ "test_flags": ["map_set_lookup/es6"]
+ }
+ ]
+ },
+ {
+ "name": "Map-Set add-set-has",
+ "path": ["SixSpeed"],
+ "results_regexp": "^%s\\(Score\\): (.+)$",
+ "tests": [
+ {
+ "name": "ES5",
+ "main": "run.js",
+ "resources": ["map_set_add/es5.js"],
+ "test_flags": ["map_set_add/es5"]
+ },
+ {
+ "name": "ES6",
+ "main": "run.js",
+ "resources": ["map_set_add/es6.js"],
+ "test_flags": ["map_set_add/es6"]
+ }
+ ]
+ },
+ {
+ "name": "Map-Set add-set-has object",
+ "path": ["SixSpeed"],
+ "results_regexp": "^%s\\(Score\\): (.+)$",
+ "tests": [
+ {
+ "name": "ES5",
+ "main": "run.js",
+ "resources": ["map_set_object/es5.js"],
+ "test_flags": ["map_set_object/es5"]
+ },
+ {
+ "name": "ES6",
+ "main": "run.js",
+ "resources": ["map_set_object/es6.js"],
+ "test_flags": ["map_set_object/es6"]
+ }
+ ]
+ },
+ {
+ "name": "Map get string",
+ "path": ["SixSpeed"],
+ "results_regexp": "^%s\\(Score\\): (.+)$",
+ "tests": [
+ {
+ "name": "ES5",
+ "main": "run.js",
+ "resources": ["map_set_add/es5.js"],
+ "test_flags": ["map_set_add/es5"]
+ },
+ {
+ "name": "ES6",
+ "main": "run.js",
+ "resources": ["map_set_add/es6.js"],
+ "test_flags": ["map_set_add/es6"]
+ }
+ ]
}
]
}
diff --git a/deps/v8/test/js-perf-test/SixSpeed/classes/babel.js b/deps/v8/test/js-perf-test/SixSpeed/classes/babel.js
new file mode 100644
index 0000000000..e73ec1ae0c
--- /dev/null
+++ b/deps/v8/test/js-perf-test/SixSpeed/classes/babel.js
@@ -0,0 +1,51 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This benchmark is based on the six-speed benchmark build output.
+// Copyright 2014 Kevin Decker <https://github.com/kpdecker/six-speed/>
+
+'use strict';
+
+new BenchmarkSuite('Babel', [1000], [
+ new Benchmark('Babel', false, false, 0, Babel),
+]);
+
+var _createClass = function() {
+ function defineProperties(target, props) {
+ for (var i = 0; i < props.length; i++) {
+ var descriptor = props[i];
+ descriptor.enumerable = descriptor.enumerable || false;
+ descriptor.configurable = true;
+ if ('value' in descriptor) descriptor.writable = true;
+ Object.defineProperty(target, descriptor.key, descriptor);
+ }
+ }
+ return function(Constructor, protoProps, staticProps) {
+ if (protoProps) defineProperties(Constructor.prototype, protoProps);
+ if (staticProps) defineProperties(Constructor, staticProps);
+ return Constructor;
+ };
+}();
+
+function _classCallCheck(instance, Constructor) {
+ if (!(instance instanceof Constructor)) {
+ throw new TypeError('Cannot call a class as a function');
+ }
+}
+
+var C = function() {
+ function C() {
+ _classCallCheck(this, C);
+
+ this.foo = 'bar';
+ }
+
+ _createClass(C, [{key: 'bar', value: function bar() {}}]);
+
+ return C;
+}();
+
+function Babel() {
+ return new C();
+}
diff --git a/deps/v8/test/js-perf-test/SixSpeed/classes/es5.js b/deps/v8/test/js-perf-test/SixSpeed/classes/es5.js
new file mode 100644
index 0000000000..410aa33b8d
--- /dev/null
+++ b/deps/v8/test/js-perf-test/SixSpeed/classes/es5.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This benchmark is based on the six-speed benchmark build output.
+// Copyright 2014 Kevin Decker <https://github.com/kpdecker/six-speed/>
+
+new BenchmarkSuite('ES5', [1000], [
+ new Benchmark('ES5', false, false, 0, ES5),
+]);
+
+function C() {
+ this.foo = 'bar';
+}
+
+C.prototype.bar = function() {};
+
+function ES5() {
+ return new C();
+};
diff --git a/deps/v8/test/js-perf-test/SixSpeed/classes/es6.js b/deps/v8/test/js-perf-test/SixSpeed/classes/es6.js
new file mode 100644
index 0000000000..d7ccc87e22
--- /dev/null
+++ b/deps/v8/test/js-perf-test/SixSpeed/classes/es6.js
@@ -0,0 +1,21 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This benchmark is based on the six-speed benchmark build output.
+// Copyright 2014 Kevin Decker <https://github.com/kpdecker/six-speed/>
+
+new BenchmarkSuite('ES6', [1000], [
+ new Benchmark('ES6', false, false, 0, ES6),
+]);
+
+class C {
+ constructor() {
+ this.foo = 'bar';
+ }
+ bar() {}
+}
+
+function ES6() {
+ return new C();
+}
diff --git a/deps/v8/test/js-perf-test/SixSpeed/map_set_add/es5.js b/deps/v8/test/js-perf-test/SixSpeed/map_set_add/es5.js
new file mode 100644
index 0000000000..69f2907565
--- /dev/null
+++ b/deps/v8/test/js-perf-test/SixSpeed/map_set_add/es5.js
@@ -0,0 +1,21 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This benchmark is based on the six-speed benchmark build output.
+// Copyright 2014 Kevin Decker <https://github.com/kpdecker/six-speed/>
+
+new BenchmarkSuite("ES5", [1000], [new Benchmark("ES5", false, false, 0, ES5)]);
+
+function ES5() {
+ var map = {}, set = [];
+
+ for (var i = 0; i < 250; i++) {
+ map[i] = i;
+ set.push(i);
+ }
+
+ map.foo = "bar";
+ set.push("bar");
+ return "foo" in map && set.indexOf("bar") >= 0;
+}
diff --git a/deps/v8/test/js-perf-test/SixSpeed/map_set_add/es6.js b/deps/v8/test/js-perf-test/SixSpeed/map_set_add/es6.js
new file mode 100644
index 0000000000..8ea533c5f9
--- /dev/null
+++ b/deps/v8/test/js-perf-test/SixSpeed/map_set_add/es6.js
@@ -0,0 +1,22 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This benchmark is based on the six-speed benchmark build output.
+// Copyright 2014 Kevin Decker <https://github.com/kpdecker/six-speed/>
+
+new BenchmarkSuite("ES6", [1000], [new Benchmark("ES6", false, false, 0, ES6)]);
+
+function ES6() {
+ var map = new Map(), set = new Set();
+
+ for (var i = 0; i < 250; i++) {
+ map.set(i, i);
+ set.add(i);
+ }
+
+ map.set("foo", "bar");
+ set.add("bar");
+
+ return map.has("foo") && set.has("bar");
+}
diff --git a/deps/v8/test/js-perf-test/SixSpeed/map_set_lookup/es5.js b/deps/v8/test/js-perf-test/SixSpeed/map_set_lookup/es5.js
new file mode 100644
index 0000000000..ddc4ebad89
--- /dev/null
+++ b/deps/v8/test/js-perf-test/SixSpeed/map_set_lookup/es5.js
@@ -0,0 +1,32 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This benchmark is based on the six-speed benchmark build output.
+// Copyright 2014 Kevin Decker <https://github.com/kpdecker/six-speed/>
+
+new BenchmarkSuite(
+ "ES5",
+ [1000],
+ [new Benchmark("ES5", false, false, 0, ES5, Setup)]
+);
+
+var keys, values, set, key;
+
+function Setup() {
+ (keys = []), (values = []), (set = []), (key = {});
+
+ for (var i = 0; i < 500; i++) {
+ keys.push(i);
+ values.push(i);
+ set.push(i);
+ }
+
+ keys.push(key);
+ values.push("bar");
+ set.push(key);
+}
+
+function ES5() {
+ return set.indexOf(key) >= 0 && keys.indexOf(key) >= 0;
+}
diff --git a/deps/v8/test/js-perf-test/SixSpeed/map_set_lookup/es6.js b/deps/v8/test/js-perf-test/SixSpeed/map_set_lookup/es6.js
new file mode 100644
index 0000000000..c12b4abf59
--- /dev/null
+++ b/deps/v8/test/js-perf-test/SixSpeed/map_set_lookup/es6.js
@@ -0,0 +1,30 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This benchmark is based on the six-speed benchmark build output.
+// Copyright 2014 Kevin Decker <https://github.com/kpdecker/six-speed/>
+
+new BenchmarkSuite(
+ "ES6",
+ [1000],
+ [new Benchmark("ES6", false, false, 0, ES6, Setup)]
+);
+
+var map, set, key;
+
+function Setup() {
+ (map = new Map()), (set = new Set()), (key = {});
+
+ for (var i = 0; i < 500; i++) {
+ map.set(i, i);
+ set.add(i);
+ }
+
+ map.set(key, "bar");
+ set.add(key);
+}
+
+function ES6() {
+ return map.has(key) && set.has(key);
+}
diff --git a/deps/v8/test/js-perf-test/SixSpeed/map_set_object/es5.js b/deps/v8/test/js-perf-test/SixSpeed/map_set_object/es5.js
new file mode 100644
index 0000000000..9eba1fb2fc
--- /dev/null
+++ b/deps/v8/test/js-perf-test/SixSpeed/map_set_object/es5.js
@@ -0,0 +1,24 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This benchmark is based on the six-speed benchmark build output.
+// Copyright 2014 Kevin Decker <https://github.com/kpdecker/six-speed/>
+
+new BenchmarkSuite("ES5", [1000], [new Benchmark("ES5", false, false, 0, ES5)]);
+
+function ES5() {
+ var keys = [], values = [], set = [], key = {};
+
+ for (var i = 0; i < 500; i++) {
+ keys.push(i);
+ values.push(i);
+ set.push(i);
+ }
+
+ keys.push(key);
+ values.push("bar");
+ set.push(key);
+
+ return set.indexOf(key) >= 0 && keys.indexOf(key) >= 0;
+}
diff --git a/deps/v8/test/js-perf-test/SixSpeed/map_set_object/es6.js b/deps/v8/test/js-perf-test/SixSpeed/map_set_object/es6.js
new file mode 100644
index 0000000000..ed24e16e3f
--- /dev/null
+++ b/deps/v8/test/js-perf-test/SixSpeed/map_set_object/es6.js
@@ -0,0 +1,22 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This benchmark is based on the six-speed benchmark build output.
+// Copyright 2014 Kevin Decker <https://github.com/kpdecker/six-speed/>
+
+new BenchmarkSuite("ES6", [1000], [new Benchmark("ES6", false, false, 0, ES6)]);
+
+function ES6() {
+ var map = new Map(), set = new Set(), key = {};
+
+ for (var i = 0; i < 500; i++) {
+ map.set(i, i);
+ set.add(i);
+ }
+
+ map.set(key, "bar");
+ set.add(key);
+
+ return map.has(key) && set.has(key);
+}
diff --git a/deps/v8/test/js-perf-test/SixSpeed/map_string/es5.js b/deps/v8/test/js-perf-test/SixSpeed/map_string/es5.js
new file mode 100644
index 0000000000..98411082ef
--- /dev/null
+++ b/deps/v8/test/js-perf-test/SixSpeed/map_string/es5.js
@@ -0,0 +1,26 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This benchmark is based on the six-speed benchmark build output.
+// Copyright 2014 Kevin Decker <https://github.com/kpdecker/six-speed/>
+
+new BenchmarkSuite(
+ "ES5",
+ [1000],
+ [new Benchmark("ES5", false, false, 0, ES5, Setup)]
+);
+
+var map;
+
+function Setup() {
+ map = {};
+
+ for (var i = 0; i < 500; i++) {
+ map[i] = i;
+ }
+}
+
+function ES5() {
+ return map["499"] === 499;
+}
diff --git a/deps/v8/test/js-perf-test/SixSpeed/map_string/es6.js b/deps/v8/test/js-perf-test/SixSpeed/map_string/es6.js
new file mode 100644
index 0000000000..5fa9c269f7
--- /dev/null
+++ b/deps/v8/test/js-perf-test/SixSpeed/map_string/es6.js
@@ -0,0 +1,26 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This benchmark is based on the six-speed benchmark build output.
+// Copyright 2014 Kevin Decker <https://github.com/kpdecker/six-speed/>
+
+new BenchmarkSuite(
+ "ES6",
+ [1000],
+ [new Benchmark("ES6", false, false, 0, ES6, Setup)]
+);
+
+var map;
+
+function Setup() {
+ map = new Map();
+
+ for (var i = 0; i < 500; i++) {
+ map.set(i + "", i);
+ }
+}
+
+function ES6() {
+ return map.get("499") === 499;
+}
diff --git a/deps/v8/test/js-perf-test/SixSpeed/super/babel.js b/deps/v8/test/js-perf-test/SixSpeed/super/babel.js
new file mode 100644
index 0000000000..02ca7203aa
--- /dev/null
+++ b/deps/v8/test/js-perf-test/SixSpeed/super/babel.js
@@ -0,0 +1,135 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This benchmark is based on the six-speed benchmark build output.
+// Copyright 2014 Kevin Decker <https://github.com/kpdecker/six-speed/>
+
+'use strict';
+
+new BenchmarkSuite('Babel', [1000], [
+ new Benchmark('Babel', false, false, 0, Babel),
+]);
+
+var _get = function get(object, property, receiver) {
+ if (object === null) object = Function.prototype;
+ var desc = Object.getOwnPropertyDescriptor(object, property);
+ if (desc === undefined) {
+ var parent = Object.getPrototypeOf(object);
+ if (parent === null) {
+ return undefined;
+ } else {
+ return get(parent, property, receiver);
+ }
+ } else if ('value' in desc) {
+ return desc.value;
+ } else {
+ var getter = desc.get;
+ if (getter === undefined) {
+ return undefined;
+ }
+ return getter.call(receiver);
+ }
+};
+
+var _createClass = function() {
+ function defineProperties(target, props) {
+ for (var i = 0; i < props.length; i++) {
+ var descriptor = props[i];
+ descriptor.enumerable = descriptor.enumerable || false;
+ descriptor.configurable = true;
+ if ('value' in descriptor) descriptor.writable = true;
+ Object.defineProperty(target, descriptor.key, descriptor);
+ }
+ }
+ return function(Constructor, protoProps, staticProps) {
+ if (protoProps) defineProperties(Constructor.prototype, protoProps);
+ if (staticProps) defineProperties(Constructor, staticProps);
+ return Constructor;
+ };
+}();
+
+function _possibleConstructorReturn(self, call) {
+ if (!self) {
+ throw new ReferenceError(
+ 'this hasn\'t been initialised - super() hasn\'t been called');
+ }
+ return call && (typeof call === 'object' || typeof call === 'function') ?
+ call :
+ self;
+}
+
+function _inherits(subClass, superClass) {
+ if (typeof superClass !== 'function' && superClass !== null) {
+ throw new TypeError(
+ 'Super expression must either be null or a function, not ' +
+ typeof superClass);
+ }
+ subClass.prototype = Object.create(superClass && superClass.prototype, {
+ constructor: {
+ value: subClass,
+ enumerable: false,
+ writable: true,
+ configurable: true
+ }
+ });
+ if (superClass)
+ Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) :
+ subClass.__proto__ = superClass;
+}
+
+function _classCallCheck(instance, Constructor) {
+ if (!(instance instanceof Constructor)) {
+ throw new TypeError('Cannot call a class as a function');
+ }
+}
+
+var C = function() {
+ function C() {
+ _classCallCheck(this, C);
+
+ this.foo = 'bar';
+ }
+
+ _createClass(C, [{
+ key: 'bar',
+ value: function bar() {
+ return 41;
+ }
+ }]);
+
+ return C;
+}();
+
+var D = function(_C) {
+ _inherits(D, _C);
+
+ function D() {
+ _classCallCheck(this, D);
+
+ var _this = _possibleConstructorReturn(
+ this, (D.__proto__ || Object.getPrototypeOf(D)).call(this));
+
+ _this.baz = 'bat';
+ return _this;
+ }
+
+ _createClass(D, [{
+ key: 'bar',
+ value: function bar() {
+ return _get(
+ D.prototype.__proto__ ||
+ Object.getPrototypeOf(D.prototype),
+ 'bar', this)
+ .call(this) +
+ 1;
+ }
+ }]);
+
+ return D;
+}(C);
+
+function Babel() {
+ var d = new D();
+ return d.bar();
+}
diff --git a/deps/v8/test/js-perf-test/SixSpeed/super/es5.js b/deps/v8/test/js-perf-test/SixSpeed/super/es5.js
new file mode 100644
index 0000000000..5986fd19cc
--- /dev/null
+++ b/deps/v8/test/js-perf-test/SixSpeed/super/es5.js
@@ -0,0 +1,34 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This benchmark is based on the six-speed benchmark build output.
+// Copyright 2014 Kevin Decker <https://github.com/kpdecker/six-speed/>
+
+new BenchmarkSuite('ES5', [1000], [
+ new Benchmark('ES5', false, false, 0, ES5),
+]);
+
+function C() {
+ this.foo = 'bar';
+}
+
+C.prototype.bar = function() {
+ return 41;
+};
+
+function D() {
+ C.call(this);
+ this.baz = 'bat';
+}
+
+D.prototype = Object.create(C.prototype);
+
+D.prototype.bar = function() {
+ return C.prototype.bar.call(this) + 1;
+};
+
+function ES5() {
+ var d = new D();
+ return d.bar();
+}
diff --git a/deps/v8/test/js-perf-test/SixSpeed/super/es6.js b/deps/v8/test/js-perf-test/SixSpeed/super/es6.js
new file mode 100644
index 0000000000..4855a4d684
--- /dev/null
+++ b/deps/v8/test/js-perf-test/SixSpeed/super/es6.js
@@ -0,0 +1,34 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This benchmark is based on the six-speed benchmark build output.
+// Copyright 2014 Kevin Decker <https://github.com/kpdecker/six-speed/>
+
+new BenchmarkSuite('ES6', [1000], [
+ new Benchmark('ES6', false, false, 0, ES6),
+]);
+
+class C {
+ constructor() {
+ this.foo = 'bar';
+ }
+ bar() {
+ return 41;
+ }
+}
+
+class D extends C {
+ constructor() {
+ super();
+ this.baz = 'bat';
+ }
+ bar() {
+ return super.bar() + 1;
+ }
+}
+
+function ES6() {
+ var d = new D();
+ return d.bar();
+}
diff --git a/deps/v8/test/message/arrow-invalid-rest-2.out b/deps/v8/test/message/arrow-invalid-rest-2.out
index ad6bcb034d..aef0fb0041 100644
--- a/deps/v8/test/message/arrow-invalid-rest-2.out
+++ b/deps/v8/test/message/arrow-invalid-rest-2.out
@@ -1,4 +1,4 @@
-*%(basename)s:7: SyntaxError: Unexpected token =
+*%(basename)s:7: SyntaxError: Rest parameter may not have a default initializer
var f = (a, ...x = 10) => x;
- ^
-SyntaxError: Unexpected token =
+ ^
+SyntaxError: Rest parameter may not have a default initializer
diff --git a/deps/v8/test/message/arrow-invalid-rest.out b/deps/v8/test/message/arrow-invalid-rest.out
index 99a8557f1e..520c67393f 100644
--- a/deps/v8/test/message/arrow-invalid-rest.out
+++ b/deps/v8/test/message/arrow-invalid-rest.out
@@ -1,4 +1,4 @@
-*%(basename)s:7: SyntaxError: Unexpected token =
+*%(basename)s:7: SyntaxError: Rest parameter may not have a default initializer
var f = (...x = 10) => x;
- ^
-SyntaxError: Unexpected token =
+ ^
+SyntaxError: Rest parameter may not have a default initializer
diff --git a/deps/v8/test/message/asm-assignment-undeclared.js b/deps/v8/test/message/asm-assignment-undeclared.js
index 61454c77e2..f7f530b5a2 100644
--- a/deps/v8/test/message/asm-assignment-undeclared.js
+++ b/deps/v8/test/message/asm-assignment-undeclared.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages --fast-validate-asm
+// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
function Module() {
"use asm"
diff --git a/deps/v8/test/message/asm-function-mismatch-def.js b/deps/v8/test/message/asm-function-mismatch-def.js
index bb570ba8db..84b46af397 100644
--- a/deps/v8/test/message/asm-function-mismatch-def.js
+++ b/deps/v8/test/message/asm-function-mismatch-def.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages --fast-validate-asm
+// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
// Violates asm.js because use of {g} in {f} has return type different from {g}.
diff --git a/deps/v8/test/message/asm-function-mismatch-use.js b/deps/v8/test/message/asm-function-mismatch-use.js
index 9aab940d7d..0f0935af88 100644
--- a/deps/v8/test/message/asm-function-mismatch-use.js
+++ b/deps/v8/test/message/asm-function-mismatch-use.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages --fast-validate-asm
+// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
// Violates asm.js because {g} has return type different from use of {g} in {f}.
diff --git a/deps/v8/test/message/asm-function-redefined.js b/deps/v8/test/message/asm-function-redefined.js
index 5281b057ed..77f6aac4d5 100644
--- a/deps/v8/test/message/asm-function-redefined.js
+++ b/deps/v8/test/message/asm-function-redefined.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages --fast-validate-asm
+// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
// Violates asm.js because symbol {f} is defined as module function twice.
diff --git a/deps/v8/test/message/asm-function-undefined.js b/deps/v8/test/message/asm-function-undefined.js
index 052ca7163b..ce39409963 100644
--- a/deps/v8/test/message/asm-function-undefined.js
+++ b/deps/v8/test/message/asm-function-undefined.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages --fast-validate-asm
+// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
function Module() {
"use asm"
diff --git a/deps/v8/test/message/asm-function-variable-collision.js b/deps/v8/test/message/asm-function-variable-collision.js
index 44c1c835fe..fbea44b1ad 100644
--- a/deps/v8/test/message/asm-function-variable-collision.js
+++ b/deps/v8/test/message/asm-function-variable-collision.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages --fast-validate-asm
+// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
// Violates asm.js because symbol {f} is used as module variable and function.
diff --git a/deps/v8/test/message/asm-import-wrong-annotation.js b/deps/v8/test/message/asm-import-wrong-annotation.js
new file mode 100644
index 0000000000..0b57c1a986
--- /dev/null
+++ b/deps/v8/test/message/asm-import-wrong-annotation.js
@@ -0,0 +1,11 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+
+function Module(stdlib, foreign, heap) {
+ "use asm"
+ var x = foreign.x | 1;
+}
+Module(this, { x:0 });
diff --git a/deps/v8/test/message/asm-import-wrong-annotation.out b/deps/v8/test/message/asm-import-wrong-annotation.out
new file mode 100644
index 0000000000..dec52ddb49
--- /dev/null
+++ b/deps/v8/test/message/asm-import-wrong-annotation.out
@@ -0,0 +1,5 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+*%(basename)s:9: Invalid asm.js: Expected |0 type annotation for foreign integer import
diff --git a/deps/v8/test/message/asm-import-wrong-object.js b/deps/v8/test/message/asm-import-wrong-object.js
new file mode 100644
index 0000000000..d077e04d91
--- /dev/null
+++ b/deps/v8/test/message/asm-import-wrong-object.js
@@ -0,0 +1,11 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+
+function Module(stdlib, foreign, heap) {
+ "use asm"
+ var x = +stdlib.x;
+}
+Module(this, { x:0 });
diff --git a/deps/v8/test/message/asm-import-wrong-object.out b/deps/v8/test/message/asm-import-wrong-object.out
new file mode 100644
index 0000000000..f72d0863f9
--- /dev/null
+++ b/deps/v8/test/message/asm-import-wrong-object.out
@@ -0,0 +1,5 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+*%(basename)s:9: Invalid asm.js: Unexpected token
diff --git a/deps/v8/test/message/asm-linking-bogus-heap.js b/deps/v8/test/message/asm-linking-bogus-heap.js
new file mode 100644
index 0000000000..a520dfb282
--- /dev/null
+++ b/deps/v8/test/message/asm-linking-bogus-heap.js
@@ -0,0 +1,15 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+
+function Module(stdlib, foreign, heap) {
+ "use asm"
+ var a = new stdlib.Int8Array(heap);
+ function f() {
+ return a[0] | 0;
+ }
+ return { f:f };
+}
+Module(this, {}, new ArrayBuffer(1)).f();
diff --git a/deps/v8/test/message/asm-linking-bogus-heap.out b/deps/v8/test/message/asm-linking-bogus-heap.out
new file mode 100644
index 0000000000..5a324c1fea
--- /dev/null
+++ b/deps/v8/test/message/asm-linking-bogus-heap.out
@@ -0,0 +1,5 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+*%(basename)s:7: Linking failure in asm.js: Unexpected heap size
diff --git a/deps/v8/test/message/asm-linking-bogus-stdlib.js b/deps/v8/test/message/asm-linking-bogus-stdlib.js
new file mode 100644
index 0000000000..0a64422440
--- /dev/null
+++ b/deps/v8/test/message/asm-linking-bogus-stdlib.js
@@ -0,0 +1,15 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+
+function Module(stdlib, foreign, heap) {
+ "use asm"
+ var pi = stdlib.Math.PI;
+ function f() {
+ return +pi;
+ }
+ return { f:f };
+}
+Module({ Math: { PI:23 }}).f();
diff --git a/deps/v8/test/message/asm-linking-bogus-stdlib.out b/deps/v8/test/message/asm-linking-bogus-stdlib.out
new file mode 100644
index 0000000000..5f3021ece8
--- /dev/null
+++ b/deps/v8/test/message/asm-linking-bogus-stdlib.out
@@ -0,0 +1,5 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+*%(basename)s:7: Linking failure in asm.js: Unexpected stdlib member
diff --git a/deps/v8/test/message/asm-linking-missing-heap.js b/deps/v8/test/message/asm-linking-missing-heap.js
new file mode 100644
index 0000000000..a33b59ad19
--- /dev/null
+++ b/deps/v8/test/message/asm-linking-missing-heap.js
@@ -0,0 +1,15 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
+
+function Module(stdlib, foreign, heap) {
+ "use asm"
+ var a = new stdlib.Int8Array(heap);
+ function f() {
+ return a[0] | 0;
+ }
+ return { f:f };
+}
+Module(this).f();
diff --git a/deps/v8/test/message/asm-linking-missing-heap.out b/deps/v8/test/message/asm-linking-missing-heap.out
new file mode 100644
index 0000000000..7bded84dec
--- /dev/null
+++ b/deps/v8/test/message/asm-linking-missing-heap.out
@@ -0,0 +1,5 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+*%(basename)s:7: Linking failure in asm.js: Requires heap buffer
diff --git a/deps/v8/test/message/asm-missing-parameter-annotation.js b/deps/v8/test/message/asm-missing-parameter-annotation.js
index a9ab782b47..64a57a0c03 100644
--- a/deps/v8/test/message/asm-missing-parameter-annotation.js
+++ b/deps/v8/test/message/asm-missing-parameter-annotation.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages --fast-validate-asm
+// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
function Module() {
"use asm"
diff --git a/deps/v8/test/message/asm-missing-return-annotation.js b/deps/v8/test/message/asm-missing-return-annotation.js
index 48bfdd1366..0f57df8c27 100644
--- a/deps/v8/test/message/asm-missing-return-annotation.js
+++ b/deps/v8/test/message/asm-missing-return-annotation.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages --fast-validate-asm
+// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
function Module() {
"use asm"
diff --git a/deps/v8/test/message/asm-table-mismatch-def.js b/deps/v8/test/message/asm-table-mismatch-def.js
index 179f1925b3..47e692cc38 100644
--- a/deps/v8/test/message/asm-table-mismatch-def.js
+++ b/deps/v8/test/message/asm-table-mismatch-def.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages --fast-validate-asm
+// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
// Violates asm.js {funTable} definition doesn't match the use in {f}.
diff --git a/deps/v8/test/message/asm-table-mismatch-use.js b/deps/v8/test/message/asm-table-mismatch-use.js
index 85889f1466..7615ee4456 100644
--- a/deps/v8/test/message/asm-table-mismatch-use.js
+++ b/deps/v8/test/message/asm-table-mismatch-use.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages --fast-validate-asm
+// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
// Violates asm.js {funTable} use in {f} doesn't match its use in {g}.
diff --git a/deps/v8/test/message/asm-table-redefined.js b/deps/v8/test/message/asm-table-redefined.js
index 73b22860ee..bac6d67e5a 100644
--- a/deps/v8/test/message/asm-table-redefined.js
+++ b/deps/v8/test/message/asm-table-redefined.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages --fast-validate-asm
+// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
function Module() {
"use asm"
diff --git a/deps/v8/test/message/asm-table-undefined.js b/deps/v8/test/message/asm-table-undefined.js
index bef600a3a0..8092f56657 100644
--- a/deps/v8/test/message/asm-table-undefined.js
+++ b/deps/v8/test/message/asm-table-undefined.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages --fast-validate-asm
+// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
function Module() {
"use asm"
diff --git a/deps/v8/test/message/asm-table-variable-collision.js b/deps/v8/test/message/asm-table-variable-collision.js
index 7162b3a55b..da8e6195b8 100644
--- a/deps/v8/test/message/asm-table-variable-collision.js
+++ b/deps/v8/test/message/asm-table-variable-collision.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages --fast-validate-asm
+// Flags: --validate-asm --no-stress-opt --no-stress-validate-asm --no-suppress-asm-messages
function Module() {
"use asm"
diff --git a/deps/v8/test/message/async-arrow-invalid-rest-2.js b/deps/v8/test/message/async-arrow-invalid-rest-2.js
new file mode 100644
index 0000000000..ff5245f457
--- /dev/null
+++ b/deps/v8/test/message/async-arrow-invalid-rest-2.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+//
+
+var f = async (a, ...x = 10) => x;
+f(1, 2, 3, 4, 5);
diff --git a/deps/v8/test/message/async-arrow-invalid-rest-2.out b/deps/v8/test/message/async-arrow-invalid-rest-2.out
new file mode 100644
index 0000000000..ff90771fbb
--- /dev/null
+++ b/deps/v8/test/message/async-arrow-invalid-rest-2.out
@@ -0,0 +1,4 @@
+*%(basename)s:7: SyntaxError: Rest parameter may not have a default initializer
+var f = async (a, ...x = 10) => x;
+ ^^
+SyntaxError: Rest parameter may not have a default initializer
diff --git a/deps/v8/test/message/async-arrow-invalid-rest.js b/deps/v8/test/message/async-arrow-invalid-rest.js
new file mode 100644
index 0000000000..c77a7eb4b4
--- /dev/null
+++ b/deps/v8/test/message/async-arrow-invalid-rest.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+//
+
+var f = async (...x = 10) => x;
+f(1, 2, 3, 4, 5);
diff --git a/deps/v8/test/message/async-arrow-invalid-rest.out b/deps/v8/test/message/async-arrow-invalid-rest.out
new file mode 100644
index 0000000000..31fd1ab0e1
--- /dev/null
+++ b/deps/v8/test/message/async-arrow-invalid-rest.out
@@ -0,0 +1,4 @@
+*%(basename)s:7: SyntaxError: Rest parameter may not have a default initializer
+var f = async (...x = 10) => x;
+ ^^
+SyntaxError: Rest parameter may not have a default initializer
diff --git a/deps/v8/test/message/async-arrow-param-after-rest.js b/deps/v8/test/message/async-arrow-param-after-rest.js
new file mode 100644
index 0000000000..a050749a13
--- /dev/null
+++ b/deps/v8/test/message/async-arrow-param-after-rest.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+//
+
+async (...x, y) => 10
diff --git a/deps/v8/test/message/async-arrow-param-after-rest.out b/deps/v8/test/message/async-arrow-param-after-rest.out
new file mode 100644
index 0000000000..51d8c879b2
--- /dev/null
+++ b/deps/v8/test/message/async-arrow-param-after-rest.out
@@ -0,0 +1,5 @@
+*%(basename)s:7: SyntaxError: Rest parameter must be last formal parameter
+async (...x, y) => 10
+ ^
+SyntaxError: Rest parameter must be last formal parameter
+
diff --git a/deps/v8/test/message/class-spread-property.js b/deps/v8/test/message/class-spread-property.js
new file mode 100644
index 0000000000..5887df538b
--- /dev/null
+++ b/deps/v8/test/message/class-spread-property.js
@@ -0,0 +1,5 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class C { ...[] }
diff --git a/deps/v8/test/message/class-spread-property.out b/deps/v8/test/message/class-spread-property.out
new file mode 100644
index 0000000000..df15e50262
--- /dev/null
+++ b/deps/v8/test/message/class-spread-property.out
@@ -0,0 +1,4 @@
+*%(basename)s:5: SyntaxError: Unexpected token ...
+class C { ...[] }
+ ^^^
+SyntaxError: Unexpected token ... \ No newline at end of file
diff --git a/deps/v8/test/message/console.js b/deps/v8/test/message/console.js
new file mode 100644
index 0000000000..f49ce4c608
--- /dev/null
+++ b/deps/v8/test/message/console.js
@@ -0,0 +1,25 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-stress-opt
+
+console.time();
+console.timeEnd();
+
+console.time("abcd");
+console.timeEnd({ toString: () => "ab" + "cd" });
+
+console.time("a");
+console.timeEnd("b");
+
+console.time("a", "b");
+console.timeEnd("a", "b");
+
+console.log("log", "more");
+console.warn("warn", { toString: () => 2 });
+console.error("error");
+console.debug("debug");
+console.info("info");
+
+console.info({ toString: () => {throw new Error("exception");} })
diff --git a/deps/v8/test/message/console.out b/deps/v8/test/message/console.out
new file mode 100644
index 0000000000..7813ccd025
--- /dev/null
+++ b/deps/v8/test/message/console.out
@@ -0,0 +1,15 @@
+default: {NUMBER}
+abcd: {NUMBER}
+b: 0.000000
+a: {NUMBER}
+log more
+warn 2
+debug
+info
+*%(basename)s:25: Error: exception
+console.info({ toString: () => {throw new Error("exception");} })
+ ^
+Error: exception
+ at Object.toString (*%(basename)s:25:39)
+ at console.info (<anonymous>)
+ at *%(basename)s:25:9
diff --git a/deps/v8/test/message/function-param-after-rest.js b/deps/v8/test/message/function-param-after-rest.js
new file mode 100644
index 0000000000..3fe79a8f27
--- /dev/null
+++ b/deps/v8/test/message/function-param-after-rest.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+//
+
+function f(...x, y) { }
diff --git a/deps/v8/test/message/function-param-after-rest.out b/deps/v8/test/message/function-param-after-rest.out
new file mode 100644
index 0000000000..58633fddca
--- /dev/null
+++ b/deps/v8/test/message/function-param-after-rest.out
@@ -0,0 +1,5 @@
+*%(basename)s:7: SyntaxError: Rest parameter must be last formal parameter
+function f(...x, y) { }
+ ^
+SyntaxError: Rest parameter must be last formal parameter
+
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index 620328e90d..0576458af2 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -107,6 +107,7 @@ class MessageTestSuite(testsuite.TestSuite):
expected_lines, actual_lines, fillvalue=''):
pattern = re.escape(expected.rstrip() % env)
pattern = pattern.replace("\\*", ".*")
+ pattern = pattern.replace("\\{NUMBER\\}", "\d(?:\.\d*)?")
pattern = "^%s$" % pattern
if not re.match(pattern, actual):
return True
diff --git a/deps/v8/test/mjsunit/allocation-site-info.js b/deps/v8/test/mjsunit/allocation-site-info.js
index ba4ae17cf0..c7184af6f2 100644
--- a/deps/v8/test/mjsunit/allocation-site-info.js
+++ b/deps/v8/test/mjsunit/allocation-site-info.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --expose-gc
-// Flags: --crankshaft --no-always-opt
+// Flags: --opt --no-always-opt
var elements_kind = {
fast_smi_only : 'fast smi only elements',
@@ -136,7 +136,7 @@ assertKind(elements_kind.fast, obj);
obj = fastliteralcase(get_standard_literal(), 3);
assertKind(elements_kind.fast, obj);
-// Make sure this works in crankshafted code too.
+// Make sure this works in optimized code too.
%OptimizeFunctionOnNextCall(get_standard_literal);
get_standard_literal();
obj = get_standard_literal();
@@ -347,7 +347,7 @@ instanceof_check(realmBArray);
assertOptimized(instanceof_check);
// Try to optimize again, but first clear all type feedback, and allow it
-// to be monomorphic on first call. Only after crankshafting do we introduce
+// to be monomorphic on first call. Only after optimizing do we introduce
// realmBArray. This should deopt the method.
%DeoptimizeFunction(instanceof_check);
%ClearFunctionFeedback(instanceof_check);
@@ -360,6 +360,12 @@ assertOptimized(instanceof_check);
instanceof_check(realmBArray);
assertUnoptimized(instanceof_check);
+// Perform a gc because without it the test below can experience an
+// allocation failure at an inconvenient point. Allocation mementos get
+// cleared on gc, and they can't deliver elements kind feedback when that
+// happens.
+gc();
+
// Case: make sure nested arrays benefit from allocation site feedback as
// well.
(function() {
diff --git a/deps/v8/test/mjsunit/arguments.js b/deps/v8/test/mjsunit/arguments.js
index 97ec7cca6d..8c6186e1b9 100644
--- a/deps/v8/test/mjsunit/arguments.js
+++ b/deps/v8/test/mjsunit/arguments.js
@@ -248,26 +248,107 @@ assertEquals(117, arg_set(0xFFFFFFFF));
return arguments
};
var args = f(1, 2);
+ %HeapObjectVerify(args);
assertEquals(1, args[0]);
assertEquals(2, args[1]);
assertEquals(key, args[key]);
assertEquals(2, args.length);
delete args[0];
+ %HeapObjectVerify(args);
assertEquals(undefined, args[0]);
assertEquals(2, args[1]);
assertEquals(key, args[key]);
assertEquals(2, args.length);
delete args[1];
+ %HeapObjectVerify(args);
assertEquals(undefined, args[0]);
assertEquals(undefined, args[1]);
assertEquals(key, args[key]);
assertEquals(2, args.length);
delete args[key];
+ %HeapObjectVerify(args);
assertEquals(undefined, args[0]);
assertEquals(undefined, args[1]);
assertEquals(undefined, args[key]);
assertEquals(2, args.length);
})();
+
+(function testDeleteSlowSloppyArguments2() {
+ function f(a) {
+ return arguments
+ };
+ var args = f(1, 2);
+ %HeapObjectVerify(args);
+ assertEquals(1, args[0]);
+ assertEquals(2, args[1]);
+ assertEquals(2, args.length);
+
+ delete args[1];
+ %HeapObjectVerify(args);
+ assertEquals(1, args[0]);
+ assertEquals(undefined, args[1]);
+ assertEquals(undefined, args[2]);
+ assertEquals(2, args.length);
+
+ delete args[0];
+ %HeapObjectVerify(args);
+ assertEquals(undefined, args[0]);
+ assertEquals(undefined, args[1]);
+ assertEquals(undefined, args[2]);
+ assertEquals(2, args.length);
+})();
+
+(function testSloppyArgumentProperties() {
+ function f(a, b) { return arguments }
+ let args = f(1, 2, 3, 4);
+ %HeapObjectVerify(args);
+ assertEquals(4, args.length);
+ args.foo = "foo";
+ %HeapObjectVerify(args);
+ assertEquals("foo", args.foo);
+ assertEquals(4, args.length);
+
+ delete args.foo;
+ %HeapObjectVerify(args);
+ assertEquals(undefined, args.foo);
+ assertEquals(4, args.length);
+})();
+
+
+(function testSloppyArgumentsLengthMapChange() {
+ function f(a) { return arguments };
+ let args1 = f(1);
+ let args2 = f(1,2);
+ assertTrue(%HaveSameMap(args1, args2));
+ args2.length = 12;
+ assertTrue(%HaveSameMap(args1, args2));
+ args2.length = "aa"
+ assertTrue(%HaveSameMap(args1, args2));
+
+ let args3 = f(1);
+ let args4 = f(1,2);
+ // Creating holes causes map transitions.
+ assertTrue(%HaveSameMap(args1, args3));
+ assertTrue(%HaveSameMap(args1, args4));
+ delete args3[0];
+ assertFalse(%HaveSameMap(args1, args3));
+ delete args4[1];
+ assertFalse(%HaveSameMap(args1, args4));
+})();
+
+(function testSloppyArgumentsLengthMapChange() {
+ function f(a) { return arguments };
+ let args1 = f(1);
+ let args2 = f(1,2);
+ assertTrue(%HaveSameMap(args1, args2));
+ // Changing the length type doesn't causes a map transition.
+ args2.length = 12;
+ assertTrue(%HaveSameMap(args1, args2));
+ args2.length = 12.0;
+ assertTrue(%HaveSameMap(args1, args2));
+ args2.length = "aa"
+ assertTrue(%HaveSameMap(args1, args2));
+})();
diff --git a/deps/v8/test/mjsunit/array-constructor-feedback.js b/deps/v8/test/mjsunit/array-constructor-feedback.js
index 45ea0d53e1..d5715f6d2d 100644
--- a/deps/v8/test/mjsunit/array-constructor-feedback.js
+++ b/deps/v8/test/mjsunit/array-constructor-feedback.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --expose-gc
-// Flags: --no-always-opt --crankshaft
+// Flags: --no-always-opt --opt
// Test element kind of objects.
@@ -119,10 +119,13 @@ function assertKind(expected, obj, name_opt) {
return new Array(one, two, three);
}
- barn(1, 2, 3);
- barn(1, 2, 3);
+ a = barn(1, 2, 3);
+ a[1] = "a string";
+ a = barn(1, 2, 3);
+ assertKind(elements_kind.fast, a);
%OptimizeFunctionOnNextCall(barn);
- barn(1, 2, 3);
+ a = barn(1, 2, 3);
+ assertKind(elements_kind.fast, a);
assertOptimized(barn);
a = barn(1, "oops", 3);
assertOptimized(barn);
diff --git a/deps/v8/test/mjsunit/array-feedback.js b/deps/v8/test/mjsunit/array-feedback.js
index c585920df1..01856c57d0 100644
--- a/deps/v8/test/mjsunit/array-feedback.js
+++ b/deps/v8/test/mjsunit/array-feedback.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --expose-gc
-// Flags: --crankshaft --no-always-opt
+// Flags: --opt --no-always-opt
var elements_kind = {
fast_smi_only : 'fast smi only elements',
diff --git a/deps/v8/test/mjsunit/array-literal-feedback.js b/deps/v8/test/mjsunit/array-literal-feedback.js
index d27f089c22..f3e39ddf4e 100644
--- a/deps/v8/test/mjsunit/array-literal-feedback.js
+++ b/deps/v8/test/mjsunit/array-literal-feedback.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --expose-gc
-// Flags: --crankshaft --no-always-opt
+// Flags: --opt --no-always-opt
var elements_kind = {
fast_smi_only : 'fast smi only elements',
diff --git a/deps/v8/test/mjsunit/array-literal-transitions.js b/deps/v8/test/mjsunit/array-literal-transitions.js
index ce46cb7a7c..2db6fd35c5 100644
--- a/deps/v8/test/mjsunit/array-literal-transitions.js
+++ b/deps/v8/test/mjsunit/array-literal-transitions.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --expose-gc --ignition-osr --no-always-opt
-// Flags: --crankshaft
+// Flags: --opt
// IC and Crankshaft support for smi-only elements in dynamic array literals.
function get(foo) { return foo; } // Used to generate dynamic values.
diff --git a/deps/v8/test/mjsunit/array-push5.js b/deps/v8/test/mjsunit/array-push5.js
index 4d75dc8b7c..9961ff98c3 100644
--- a/deps/v8/test/mjsunit/array-push5.js
+++ b/deps/v8/test/mjsunit/array-push5.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft --no-always-opt
+// Flags: --allow-natives-syntax --opt --no-always-opt
var v = 0;
diff --git a/deps/v8/test/mjsunit/array-shift4.js b/deps/v8/test/mjsunit/array-shift4.js
index aa0c546233..5d28fd306a 100644
--- a/deps/v8/test/mjsunit/array-shift4.js
+++ b/deps/v8/test/mjsunit/array-shift4.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
// Inlining shift with holey smi arrays shouldn't deopt just because it
// encounters the hole on the copy step.
diff --git a/deps/v8/test/mjsunit/array-slice.js b/deps/v8/test/mjsunit/array-slice.js
index b017dd506a..1fe1418459 100644
--- a/deps/v8/test/mjsunit/array-slice.js
+++ b/deps/v8/test/mjsunit/array-slice.js
@@ -25,7 +25,10 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --allow-natives-syntax
+
// Check that slicing array of holes keeps it as array of holes
+
(function() {
var array = new Array(10);
for (var i = 0; i < 7; i++) {
@@ -222,7 +225,10 @@
// Check slicing on arguments object.
(function() {
function func(expected, a0, a1, a2) {
- assertEquals(expected, Array.prototype.slice.call(arguments, 1));
+ let result = Array.prototype.slice.call(arguments, 1);
+ %HeapObjectVerify(result);
+ %HeapObjectVerify(arguments);
+ assertEquals(expected, result);
}
func([]);
@@ -240,7 +246,10 @@
assertEquals(undefined, y);
y = 239;
assertEquals(1, arguments.length); // arguments length is the same.
- assertEquals([x], Array.prototype.slice.call(arguments, 0));
+ let result = Array.prototype.slice.call(arguments, 0);
+ %HeapObjectVerify(result);
+ %HeapObjectVerify(arguments);
+ assertEquals([x], result);
}
func('a');
@@ -251,7 +260,10 @@
function func(x, y) {
assertEquals(1, arguments.length);
arguments.length = 7;
- assertEquals([x,,,,,,,], Array.prototype.slice.call(arguments, 0));
+ let result = Array.prototype.slice.call(arguments, 0);
+ assertEquals([x,,,,,,,], result);
+ %HeapObjectVerify(result);
+ %HeapObjectVerify(arguments);
}
func('a');
@@ -263,7 +275,10 @@
function func(x, y) {
assertEquals(1, arguments.length);
arguments.length = 'foobar';
- assertEquals([], Array.prototype.slice.call(arguments, 0));
+ let result = Array.prototype.slice.call(arguments, 0);
+ assertEquals([], result);
+ %HeapObjectVerify(result);
+ %HeapObjectVerify(arguments);
}
func('a');
@@ -275,7 +290,10 @@
function func(x, y) {
assertEquals(1, arguments.length);
arguments[3] = 239;
- assertEquals([x], Array.prototype.slice.call(arguments, 0));
+ let result = Array.prototype.slice.call(arguments, 0);
+ assertEquals([x], result);
+ %HeapObjectVerify(result);
+ %HeapObjectVerify(arguments);
}
func('a');
@@ -286,7 +304,10 @@
function func(x, y, z) {
assertEquals(3, arguments.length);
delete arguments[1];
- assertEquals([x,,z], Array.prototype.slice.call(arguments, 0));
+ let result = Array.prototype.slice.call(arguments, 0);
+ assertEquals([x,,z], result);
+ %HeapObjectVerify(result);
+ %HeapObjectVerify(arguments);
}
func('a', 'b', 'c');
@@ -300,6 +321,8 @@
var result = Array.prototype.slice.call(arguments);
delete arguments.__proto__[1];
assertEquals([1,5,3], result);
+ %HeapObjectVerify(result);
+ %HeapObjectVerify(arguments);
}
f(1,2,3);
})();
diff --git a/deps/v8/test/mjsunit/array-store-and-grow.js b/deps/v8/test/mjsunit/array-store-and-grow.js
index 4de7a376e3..ee831ad061 100644
--- a/deps/v8/test/mjsunit/array-store-and-grow.js
+++ b/deps/v8/test/mjsunit/array-store-and-grow.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft --no-always-opt
+// Flags: --allow-natives-syntax --opt --no-always-opt
// Verifies that the KeyedStoreIC correctly handles out-of-bounds stores
// to an array that grow it by a single element. Test functions are
diff --git a/deps/v8/test/mjsunit/asm/asm-memory.js b/deps/v8/test/mjsunit/asm/asm-memory.js
new file mode 100644
index 0000000000..6f9b2fe639
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/asm-memory.js
@@ -0,0 +1,55 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function TestUnalignedMemory() {
+ // Test that a buffer whose length is not a multiple of the element size of a
+ // heap view throws the proper {RangeError} during instantiation.
+ function Module(stdlib, foreign, heap) {
+ "use asm";
+ var a = new stdlib.Int32Array(heap);
+ function f() {}
+ return { f:f };
+ }
+ assertThrows(() => Module(this, {}, new ArrayBuffer(2)), RangeError);
+ assertThrows(() => Module(this, {}, new ArrayBuffer(10)), RangeError);
+ assertDoesNotThrow(() => Module(this, {}, new ArrayBuffer(4)));
+ assertDoesNotThrow(() => Module(this, {}, new ArrayBuffer(16)));
+ assertFalse(%IsAsmWasmCode(Module));
+})();
+
+(function TestMissingMemory() {
+ // Test that a buffer is required for instantiation of modules containing any
+ // heap views. JavaScript needs to create individual buffers for each view.
+ function Module(stdlib, foreign, heap) {
+ "use asm";
+ var a = new stdlib.Int16Array(heap);
+ var b = new stdlib.Int32Array(heap);
+ function f() {
+ a[0] = 0x1234;
+ return b[0] | 0;
+ }
+ return { f:f };
+ }
+ var m = Module(this, {}, undefined);
+ assertFalse(%IsAsmWasmCode(Module));
+ assertEquals(0, m.f());
+})();
+
+(function TestNonBufferMemory() {
+ // Test that a buffer has to be an instance of {ArrayBuffer} in order to be
+ // valid. JavaScript will also accept any other array-like object.
+ function Module(stdlib, foreign, heap) {
+ "use asm";
+ var a = new stdlib.Int32Array(heap);
+ function f() {
+ return a[0] | 0;
+ }
+ return { f:f };
+ }
+ var m = Module(this, {}, [ 23, 42 ]);
+ assertFalse(%IsAsmWasmCode(Module));
+ assertEquals(23, m.f());
+})();
diff --git a/deps/v8/test/mjsunit/asm/asm-stdlib.js b/deps/v8/test/mjsunit/asm/asm-stdlib.js
new file mode 100644
index 0000000000..65d0b76ff7
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/asm-stdlib.js
@@ -0,0 +1,46 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function FailProxyAsStdlib() {
+ // Test that passing a proxy as "stdlib" will cause module instantiation to
+ // fail while still only triggering one observable property load.
+ function Module(stdlib, foreign, heap) {
+ "use asm";
+ var a = stdlib.Math.PI;
+ function f() { return a }
+ return { f:f };
+ }
+ var trap_was_called = 0;
+ var proxy = new Proxy(this, { get:function(target, property, receiver) {
+ trap_was_called++;
+ if (property == "Math") return { PI:23 };
+ return Reflect.get(target, property, receiver);
+ }});
+ var m = Module(proxy);
+ assertFalse(%IsAsmWasmCode(Module));
+ assertEquals(1, trap_was_called);
+ assertEquals(23, m.f());
+})();
+
+(function FailGetterInStdlib() {
+ // Test that accessors as part of "stdlib" will cause module instantiation to
+ // fail while still only triggering one observable property load.
+ function Module(stdlib, foreign, heap) {
+ "use asm";
+ var a = new stdlib.Int8Array(heap);
+ function f() { return a[0] | 0 }
+ return { f:f };
+ }
+ var trap_was_called = 0;
+ var observer = { get Int8Array() {
+ trap_was_called++;
+ return function() { return [ 23 ] };
+ }};
+ var m = Module(observer);
+ assertFalse(%IsAsmWasmCode(Module));
+ assertEquals(1, trap_was_called);
+ assertEquals(23, m.f());
+})();
diff --git a/deps/v8/test/mjsunit/asm/asm-validation.js b/deps/v8/test/mjsunit/asm/asm-validation.js
index 0925d103ea..ed5b748aad 100644
--- a/deps/v8/test/mjsunit/asm/asm-validation.js
+++ b/deps/v8/test/mjsunit/asm/asm-validation.js
@@ -474,8 +474,26 @@ function assertValidAsm(func) {
assertFalse(o instanceof WebAssembly.Instance);
assertTrue(o instanceof Object);
assertTrue(o.__proto__ === Object.prototype);
+ var p = Object.getOwnPropertyDescriptor(o, "x")
+ assertTrue(p.writable);
+ assertTrue(p.enumerable);
+ assertTrue(p.configurable);
+ assertTrue(typeof o.x === 'function');
o.x = 5;
assertTrue(typeof o.x === 'number');
assertTrue(o.__single_function__ === undefined);
assertTrue(o.__foreign_init__ === undefined);
})();
+
+(function TestAsmExportOrderPreserved() {
+ function Module() {
+ "use asm";
+ function f() {}
+ function g() {}
+ return { a:f, b:g, x:f, c:g, d:f };
+ }
+ var m = Module();
+ assertValidAsm(Module);
+ var props = Object.getOwnPropertyNames(m);
+ assertEquals(["a","b","x","c","d"], props);
+})();
diff --git a/deps/v8/test/mjsunit/asm/call-annotation.js b/deps/v8/test/mjsunit/asm/call-annotation.js
new file mode 100644
index 0000000000..e2260441e0
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/call-annotation.js
@@ -0,0 +1,179 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --validate-asm
+
+// This file contains test cases that are particularly interesting for a single
+// pass asm.js parsing and validation implementation in regards to the return
+// type annotation via the "|0" operation.
+
+var g_was_called = 0;
+function g() {
+ g_was_called++;
+ return "23.4";
+}
+
+(function SuccessExternCoercion() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ var g = imports.g;
+ function f(a) {
+ a = a | 0;
+ a = a + (g() | 0) | 0;
+ return a | 0;
+ }
+ return { f:f };
+ }
+ g_was_called = 0;
+ var m = Module(this, { g:g });
+ assertTrue(%IsAsmWasmCode(Module));
+ assertEquals(24, m.f(1));
+ assertEquals(1, g_was_called);
+})();
+
+(function FailPrecedenceLeftStronger() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ var g = imports.g;
+ function f(a) {
+ a = a | 0;
+ a = a + g() | 0;
+ return a | 0;
+ }
+ return { f:f };
+ }
+ g_was_called = 0;
+ var m = Module(this, { g:g });
+ assertFalse(%IsAsmWasmCode(Module));
+ assertEquals(123, m.f(1));
+ assertEquals(1, g_was_called);
+})();
+
+(function FailPrecedenceRightStronger() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ var g = imports.g;
+ function f(a) {
+ a = a | 0;
+ a = (g() | 0 + a) | 0;
+ return a | 0;
+ }
+ return { f:f };
+ }
+ g_was_called = 0;
+ var m = Module(this, { g:g });
+ assertFalse(%IsAsmWasmCode(Module));
+ assertEquals(127, m.f(127));
+ assertEquals(1, g_was_called);
+})();
+
+(function FailParenthesizedAnnotation() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ var g = imports.g;
+ function f(a) {
+ a = a | 0;
+ a = (g()) | 0;
+ return a | 0;
+ }
+ return { f:f };
+ }
+ g_was_called = 0;
+ var m = Module(this, { g:g });
+ // TODO(6127): Only properly rejected by "new" parser.
+ // assertFalse(%IsAsmWasmCode(Module));
+ assertEquals(23, m.f(1));
+ assertEquals(1, g_was_called);
+})();
+
+(function FailNonZeroAnnotation() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ var g = imports.g;
+ function f(a) {
+ a = a | 0;
+ a = g() | 127;
+ return a | 0;
+ }
+ return { f:f };
+ }
+ g_was_called = 0;
+ var m = Module(this, { g:g });
+ assertFalse(%IsAsmWasmCode(Module));
+ assertEquals(127, m.f(1));
+ assertEquals(1, g_was_called);
+})();
+
+(function FailNestedAnnotation1() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ var g = imports.g;
+ function f(a) {
+ a = a | 0;
+ a = g() | g() | 0;
+ return a | 0;
+ }
+ return { f:f };
+ }
+ g_was_called = 0;
+ var m = Module(this, { g:g });
+ assertFalse(%IsAsmWasmCode(Module));
+ assertEquals(23, m.f(1));
+ assertEquals(2, g_was_called);
+})();
+
+(function FailNestedAnnotation2() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ var g = imports.g;
+ function f(a) {
+ a = a | 0;
+ a = g() | 0 | g() | 0;
+ return a | 0;
+ }
+ return { f:f };
+ }
+ g_was_called = 0;
+ var m = Module(this, { g:g });
+ assertFalse(%IsAsmWasmCode(Module));
+ assertEquals(23, m.f(1));
+ assertEquals(2, g_was_called);
+})();
+
+(function SuccessMixedWithDoubleAnnotation() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ var g = imports.g;
+ function f(a) {
+ a = +a;
+ a = a + +(g() | 0);
+ return +a;
+ }
+ return { f:f };
+ }
+ g_was_called = 0;
+ var m = Module(this, { g:g });
+ assertTrue(%IsAsmWasmCode(Module));
+ assertEquals(23.5, m.f(0.5));
+ assertEquals(1, g_was_called);
+})();
+
+(function SuccessMixedWithFloatAnnotation() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ var g = imports.g;
+ var fround = stdlib.Math.fround;
+ function f(a) {
+ a = fround(a);
+ a = fround(a + fround(g() | 0));
+ return fround(a);
+ }
+ return { f:f };
+ }
+ g_was_called = 0;
+ var m = Module(this, { g:g });
+ assertTrue(%IsAsmWasmCode(Module));
+ assertEquals(23.5, m.f(0.5));
+ assertEquals(1, g_was_called);
+})();
diff --git a/deps/v8/test/mjsunit/asm/call-stdlib.js b/deps/v8/test/mjsunit/asm/call-stdlib.js
new file mode 100644
index 0000000000..5d883f324a
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/call-stdlib.js
@@ -0,0 +1,85 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --validate-asm
+
+// This file contains test cases that are particularly interesting because they
+// omit the usual call-site coercion of function calls that target well-known
+// stdlib functions.
+
+(function SuccessStdlibWithoutAnnotation() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ var imul = stdlib.Math.imul;
+ function f(a, b) {
+ a = a | 0;
+ b = b | 0;
+ var r = 0;
+ r = imul(a, b);
+ return r | 0;
+ }
+ return { f:f };
+ }
+ var m = Module(this);
+ assertTrue(%IsAsmWasmCode(Module));
+ assertEquals(966, m.f(23, 42));
+ assertEquals(-0x0fffffff, m.f(0x7ffffff, 0x7ffffff));
+})();
+
+(function SuccessStdlibWithoutAnnotationThenRound() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ var fround = stdlib.Math.fround;
+ var imul = stdlib.Math.imul;
+ function f(a, b) {
+ a = a | 0;
+ b = b | 0;
+ var r = fround(0);
+ r = fround(imul(a, b));
+ return fround(r);
+ }
+ return { f:f };
+ }
+ var m = Module(this);
+ assertTrue(%IsAsmWasmCode(Module));
+ assertEquals(966, m.f(23, 42));
+ assertEquals(-0x0fffffff - 1, m.f(0x7ffffff, 0x7ffffff));
+})();
+
+(function FailureStdlibWithoutAnnotationMismatch() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ var fround = stdlib.Math.fround;
+ var imul = stdlib.Math.imul;
+ function f(a, b) {
+ a = a | 0;
+ b = b | 0;
+ var r = fround(0);
+ r = imul(a, b);
+ return r | 0;
+ }
+ return { f:f };
+ }
+ var m = Module(this);
+ assertFalse(%IsAsmWasmCode(Module));
+ assertEquals(966, m.f(23, 42));
+ assertEquals(-0x0fffffff, m.f(0x7ffffff, 0x7ffffff));
+})();
+
+(function SuccessStdlibWithoutAnnotationUsedInReturn() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ var imul = stdlib.Math.imul;
+ function f(a, b) {
+ a = a | 0;
+ b = b | 0;
+ return imul(a, b);
+ }
+ return { f:f };
+ }
+ var m = Module(this);
+ assertTrue(%IsAsmWasmCode(Module));
+ assertEquals(966, m.f(23, 42));
+ assertEquals(-0x0fffffff, m.f(0x7ffffff, 0x7ffffff));
+})();
diff --git a/deps/v8/test/mjsunit/asm/global-imports.js b/deps/v8/test/mjsunit/asm/global-imports.js
new file mode 100644
index 0000000000..60c0930fa8
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/global-imports.js
@@ -0,0 +1,74 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --validate-asm
+
+function MODULE_TEMPLATE(stdlib, foreign, buffer) {
+ "use asm";
+ var fround = stdlib.Math.fround;
+ IMPORT;
+ function f(int, flt, dbl) {
+ int = int | 0;
+ flt = fround(flt);
+ dbl = +dbl;
+ return EXPRESSION;
+ }
+ return { f:f };
+}
+
+var throws = {};
+var test_count = 0;
+const stdlib = this;
+const buffer = new ArrayBuffer(1024);
+function p(x) { return x * x; }
+
+function assertThrowsOrEquals(result, fun) {
+ if (result === throws) {
+ assertThrows(fun, TypeError);
+ } else {
+ assertEquals(result, fun(1, 2.3, 4.2));
+ }
+}
+
+function RunAsmJsTest(asm_source, imports, result, valid) {
+ var nonasm_source = asm_source.replace(new RegExp("use asm"), "");
+
+ var js_module = eval("(" + nonasm_source + ")")
+ var js_instance = js_module(stdlib, imports, buffer);
+ assertThrowsOrEquals(result, js_instance.f);
+
+ var asm_module = eval("(" + asm_source + ")");
+ var asm_instance = asm_module(stdlib, imports, buffer);
+ assertEquals(valid, %IsAsmWasmCode(asm_module));
+ assertThrowsOrEquals(result, asm_instance.f);
+}
+
+function Run(imp, exp, imports, result, valid) {
+ var name = "test" + (++test_count);
+ var src = MODULE_TEMPLATE.toString();
+ src = src.replace("IMPORT", imp);
+ src = src.replace("EXPRESSION", exp);
+ src = src.replace("MODULE_TEMPLATE", name);
+ RunAsmJsTest(src, imports, result, valid);
+}
+
+// Imports of values from foreign.
+Run("var x = foreign.x | 0", "(x + int) | 0", {x:12}, 13, true);
+Run("var x = foreign.x | 0", "(x = int) | 0", {x:12}, 1, true);
+Run("var x = foreign.x | 0", "+(x + dbl)", {x:12}, 16.2, false);
+Run("var x = +foreign.x", "+(x + dbl)", {x:1.2}, 5.4, true);
+Run("var x = +foreign.x", "+(x = dbl)", {x:1.2}, 4.2, true);
+Run("var x = +foreign.x", "(x + int) | 0", {x:1.2}, 2, false);
+Run("const x = foreign.x | 0", "(x + int) | 0", {x:12}, 13, true);
+Run("const x = foreign.x | 0", "(x = int) | 0", {x:12}, throws, false);
+Run("const x = foreign.x | 0", "+(x + dbl)", {x:12}, 16.2, false);
+Run("const x = +foreign.x", "+(x + dbl)", {x:1.2}, 5.4, true);
+Run("const x = +foreign.x", "+(x = dbl)", {x:1.2}, throws, false);
+Run("const x = +foreign.x", "(x + int) | 0", {x:1.2}, 2, false);
+
+// Imports of functions and values from stdlib and foreign.
+Run("var x = foreign.x", "x(dbl) | 0", { x:p }, 17, true);
+Run("var x = foreign.x", "(x = fround, x(dbl)) | 0", { x:p }, 4, false);
+Run("var x = stdlib.Math.E", "(x = 3.1415, 1) | 0", {}, 1, false);
+Run("var x = stdlib.Math.imul", "(x = fround, 1) | 0", {}, 1, false);
diff --git a/deps/v8/test/mjsunit/asm/immutable.js b/deps/v8/test/mjsunit/asm/immutable.js
new file mode 100644
index 0000000000..29c15cc7d9
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/immutable.js
@@ -0,0 +1,48 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function FailImmutableFunction() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ function f(a) {
+ a = a | 0;
+ if (a) {
+ a = f((a - 1) | 0) | 0;
+ f = 0;
+ return (a + 1) | 0;
+ }
+ return 23;
+ }
+ return { f:f };
+ }
+ var m = Module(this);
+ assertFalse(%IsAsmWasmCode(Module));
+ assertEquals(23, m.f(0));
+ assertEquals(24, m.f(1));
+ assertThrows(() => m.f(2));
+})();
+
+(function FailImmutableFunctionTable() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ function f(a) {
+ a = a | 0;
+ if (a) {
+ a = funTable[a & 0]((a - 1) | 0) | 0;
+ funTable = 0;
+ return (a + 1) | 0;
+ }
+ return 23;
+ }
+ var funTable = [ f ];
+ return { f:f };
+ }
+ var m = Module(this);
+ assertFalse(%IsAsmWasmCode(Module));
+ assertEquals(23, m.f(0));
+ assertEquals(24, m.f(1));
+ assertThrows(() => m.f(2));
+})();
diff --git a/deps/v8/test/mjsunit/asm/int32-mul.js b/deps/v8/test/mjsunit/asm/int32-mul.js
index 4c5e38668a..1f2066b78b 100644
--- a/deps/v8/test/mjsunit/asm/int32-mul.js
+++ b/deps/v8/test/mjsunit/asm/int32-mul.js
@@ -6,7 +6,7 @@ function Module(stdlib, foreign, heap) {
"use asm";
function f1(i) {
i = i|0;
- return (i | 0) * 3 | 0;
+ return (i | 0) * -3 | 0;
}
function f2(i) {
i = i|0;
@@ -26,7 +26,7 @@ function Module(stdlib, foreign, heap) {
var m = Module(this, {}, new ArrayBuffer(1024));
for (var i = -2147483648; i < 2147483648; i += 3999771) {
- assertEquals(i * 3 | 0, m.f1(i));
+ assertEquals(i * -3 | 0, m.f1(i));
assertEquals(i * 7 | 0, m.f2(i));
assertEquals(i * 1024 | 0, m.f3(i));
assertEquals(i * 333339 | 0, m.f4(i));
diff --git a/deps/v8/test/mjsunit/asm/regress-718745.js b/deps/v8/test/mjsunit/asm/regress-718745.js
new file mode 100644
index 0000000000..eb5416808b
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/regress-718745.js
@@ -0,0 +1,13 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Module(stdlib) {
+ "use asm";
+ var fround = stdlib.Math.fround;
+ function f(a) {
+ a = (fround(a));
+ }
+ return { f:f };
+}
+Module(this).f();
diff --git a/deps/v8/test/mjsunit/asm/return-types.js b/deps/v8/test/mjsunit/asm/return-types.js
new file mode 100644
index 0000000000..7ecdd4267e
--- /dev/null
+++ b/deps/v8/test/mjsunit/asm/return-types.js
@@ -0,0 +1,123 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --validate-asm
+
+(function SuccessReturnTypesMatch() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ function f(a) {
+ a = a | 0;
+ if ((a | 0) == 1) return 2.3;
+ if ((a | 0) == 2) return 4.2;
+ return 6.5;
+ }
+ return { f:f };
+ }
+ var m = Module(this);
+ assertTrue(%IsAsmWasmCode(Module));
+ assertEquals(2.3, m.f(1));
+ assertEquals(4.2, m.f(2));
+ assertEquals(6.5, m.f(3));
+})();
+
+(function FailReturnTypesMismatch() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ function f(a) {
+ a = a | 0;
+ if ((a | 0) == 1) return 2.3;
+ if ((a | 0) == 2) return 123;
+ return 4.2;
+ }
+ return { f:f };
+ }
+ var m = Module(this);
+ assertFalse(%IsAsmWasmCode(Module));
+ assertEquals(2.3, m.f(1));
+ assertEquals(123, m.f(2));
+ assertEquals(4.2, m.f(3));
+})();
+
+(function FailFallOffNonVoidFunction() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ function f(a) {
+ a = a | 0;
+ if ((a | 0) == 1) return 2.3;
+ if ((a | 0) == 2) return 4.2;
+ }
+ return { f:f };
+ }
+ var m = Module(this);
+ assertFalse(%IsAsmWasmCode(Module));
+ assertEquals(2.3, m.f(1));
+ assertEquals(4.2, m.f(2));
+ assertEquals(undefined, m.f(3));
+})();
+
+(function FailNonVoidVoidMismatch() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ function f(a) {
+ a = a | 0;
+ if ((a | 0) == 1) return 2.3;
+ if ((a | 0) == 2) return;
+ }
+ return { f:f };
+ }
+ var m = Module(this);
+ assertFalse(%IsAsmWasmCode(Module));
+ assertEquals(2.3, m.f(1));
+ assertEquals(undefined, m.f(2));
+ assertEquals(undefined, m.f(3));
+})();
+
+(function FailVoidNonVoidMismatch() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ function f(a) {
+ a = a | 0;
+ if ((a | 0) == 1) return;
+ if ((a | 0) == 2) return 2.3;
+ }
+ return { f:f };
+ }
+ var m = Module(this);
+ assertFalse(%IsAsmWasmCode(Module));
+ assertEquals(undefined, m.f(1));
+ assertEquals(2.3, m.f(2));
+ assertEquals(undefined, m.f(3));
+})();
+
+(function SuccessVoidFunction() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ function f(a) {
+ a = a | 0;
+ if ((a | 0) == 1) return;
+ return;
+ }
+ return { f:f };
+ }
+ var m = Module(this);
+ assertTrue(%IsAsmWasmCode(Module));
+ assertEquals(undefined, m.f(1));
+ assertEquals(undefined, m.f(2));
+})();
+
+(function SuccessFallOffVoidFunction() {
+ function Module(stdlib, imports, heap) {
+ "use asm";
+ function f(a) {
+ a = a | 0;
+ if ((a | 0) == 1) return;
+ }
+ return { f:f };
+ }
+ var m = Module(this);
+ assertTrue(%IsAsmWasmCode(Module));
+ assertEquals(undefined, m.f(1));
+ assertEquals(undefined, m.f(2));
+})();
diff --git a/deps/v8/test/mjsunit/basic-promise.js b/deps/v8/test/mjsunit/basic-promise.js
index 9905fa475f..da12f28198 100644
--- a/deps/v8/test/mjsunit/basic-promise.js
+++ b/deps/v8/test/mjsunit/basic-promise.js
@@ -8,14 +8,6 @@
// exceptions which are swallowed in a then clause.
failWithMessage = (msg) => %AbortJS(msg);
-let decrement = () => { %DecrementWaitCount(); }
-let increment = () => { %IncrementWaitCount(); }
-
-function WaitForPromise(p) {
- increment();
- p.then(decrement, decrement);
-}
-
function newPromise() {
var outerResolve;
var outerReject;
@@ -23,7 +15,7 @@ function newPromise() {
outerResolve = resolve;
outerReject = reject;
});
- WaitForPromise(promise); // explicitly wait for promise to resolve.
+ Promise.resolve(promise);
return {
resolve: outerResolve,
reject: outerReject,
diff --git a/deps/v8/test/mjsunit/compiler/array-constructor.js b/deps/v8/test/mjsunit/compiler/array-constructor.js
new file mode 100644
index 0000000000..583817b7d8
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/array-constructor.js
@@ -0,0 +1,89 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Test Array call with known Boolean.
+(() => {
+ function foo(x) { return Array(!!x); }
+
+ assertEquals([true], foo(true));
+ assertEquals([false], foo(false));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals([true], foo(true));
+ assertEquals([false], foo(false));
+})();
+
+// Test Array construct with known Boolean.
+(() => {
+ function foo(x) { return new Array(!!x); }
+
+ assertEquals([true], foo(true));
+ assertEquals([false], foo(false));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals([true], foo(true));
+ assertEquals([false], foo(false));
+})();
+
+// Test Array call with known String.
+(() => {
+ function foo(x) { return Array("" + x); }
+
+ assertEquals(["a"], foo("a"));
+ assertEquals(["b"], foo("b"));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(["a"], foo("a"));
+ assertEquals(["b"], foo("b"));
+})();
+
+// Test Array construct with known String.
+(() => {
+ function foo(x) { return new Array("" + x); }
+
+ assertEquals(["a"], foo("a"));
+ assertEquals(["b"], foo("b"));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(["a"], foo("a"));
+ assertEquals(["b"], foo("b"));
+})();
+
+// Test Array call with known fixed small integer.
+(() => {
+ function foo() { return Array(2); }
+
+ assertEquals(2, foo().length);
+ assertEquals(2, foo().length);
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2, foo().length);
+})();
+
+// Test Array construct with known fixed small integer.
+(() => {
+ function foo() { return new Array(2); }
+
+ assertEquals(2, foo().length);
+ assertEquals(2, foo().length);
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals(2, foo().length);
+})();
+
+// Test Array call with multiple parameters.
+(() => {
+ function foo(x, y, z) { return Array(x, y, z); }
+
+ assertEquals([1, 2, 3], foo(1, 2, 3));
+ assertEquals([1, 2, 3], foo(1, 2, 3));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals([1, 2, 3], foo(1, 2, 3));
+})();
+
+// Test Array construct with multiple parameters.
+(() => {
+ function foo(x, y, z) { return new Array(x, y, z); }
+
+ assertEquals([1, 2, 3], foo(1, 2, 3));
+ assertEquals([1, 2, 3], foo(1, 2, 3));
+ %OptimizeFunctionOnNextCall(foo);
+ assertEquals([1, 2, 3], foo(1, 2, 3));
+})();
diff --git a/deps/v8/test/mjsunit/compiler/constructor-inlining-no-harmony-restrict-constructor-return.js b/deps/v8/test/mjsunit/compiler/constructor-inlining-no-harmony-restrict-constructor-return.js
new file mode 100644
index 0000000000..6dbaf6bf3f
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/constructor-inlining-no-harmony-restrict-constructor-return.js
@@ -0,0 +1,12 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-harmony-restrict-constructor-return --max-deopt-count 200
+
+this.FLAG_harmony_restrict_constructor_return = false;
+try {
+ load('mjsunit/compiler/constructor-inlining.js');
+} catch(e) {
+ load('test/mjsunit/compiler/constructor-inlining.js');
+}
diff --git a/deps/v8/test/mjsunit/compiler/constructor-inlining.js b/deps/v8/test/mjsunit/compiler/constructor-inlining.js
new file mode 100644
index 0000000000..b01885d8ba
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/constructor-inlining.js
@@ -0,0 +1,128 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony-restrict-constructor-return --max-deopt-count 200
+
+if (this.FLAG_harmony_restrict_constructor_return === undefined)
+ this.FLAG_harmony_restrict_constructor_return = true;
+var counter = 0;
+var deopt_at = -1;
+
+class Base {
+ constructor(use, x){
+ if (deopt_at-- == 0) {
+ %_DeoptimizeNow();
+ %DeoptimizeFunction(testConstructorInlining);
+ }
+ counter++;
+ this.x = x;
+ if (use) {
+ return x;
+ }
+ }
+}
+
+class Derived extends Base {
+ constructor(use, x, y, deopt = false) {
+ super(use, x);
+ counter++;
+ if (deopt_at-- == 0) %_DeoptimizeNow();
+ this.y = y;
+ if (use) {
+ return y;
+ }
+ }
+}
+
+var DerivedDeoptCreate = new Proxy(Derived, {
+ get: function(target, name) {
+ if (name=='prototype') {
+ counter++;
+ if (deopt_at-- == 0) %DeoptimizeFunction(Derived);
+ }
+ return target[name];
+ }
+});
+
+function Constr(use, x){
+ counter++;
+ if (deopt_at-- == 0) %_DeoptimizeNow();
+ this.x = x;
+ if (use) {
+ return x;
+ }
+}
+
+%SetForceInlineFlag(Base);
+%SetForceInlineFlag(Derived);
+%SetForceInlineFlag(Constr);
+
+var a = {};
+var b = {};
+
+function testConstructorInlining(){
+ assertEquals(a, new Constr(true, a));
+ assertEquals(7, new Constr(false, 7).x);
+ assertEquals(5, new Constr(true, 5).x);
+
+ assertEquals(a, new Base(true, a));
+ assertEquals(7, new Base(false, 7).x);
+ if (FLAG_harmony_restrict_constructor_return) {
+ // not using assertThrows to ensure proper inlining
+ try {
+ new Base(true, 5);
+ assertTrue(false);
+ } catch (e) {
+ if (!(e instanceof TypeError)) throw e;
+ }
+ } else {
+ assertEquals(5, new Base(true, 5).x);
+ }
+
+ assertEquals(b, new Derived(true, a, b));
+ assertEquals(a, new Derived(true, a, undefined));
+ assertEquals(5, new Derived(false, 5, 7).x);
+ assertEquals(7, new Derived(false, 5, 7).y);
+ try {
+ new Derived(true, a, 7)
+ assertTrue(false);
+ } catch (e) {
+ if (!(e instanceof TypeError)) throw e;
+ }
+ if (FLAG_harmony_restrict_constructor_return) {
+ try {
+ new Derived(true, 5, a)
+ assertTrue(false);
+ } catch (e) {
+ if (!(e instanceof TypeError)) throw e;
+ }
+ } else {
+ assertEquals(a, new Derived(true, 5, a));
+ }
+
+ %OptimizeFunctionOnNextCall(Derived);
+ assertEquals(b, new DerivedDeoptCreate(true, a, b));
+ %OptimizeFunctionOnNextCall(Derived);
+ assertEquals(a, new DerivedDeoptCreate(true, a, undefined));
+ %OptimizeFunctionOnNextCall(Derived);
+ assertEquals(5, new DerivedDeoptCreate(false, 5, 7).x);
+ %OptimizeFunctionOnNextCall(Derived);
+ assertEquals(7, new DerivedDeoptCreate(false, 5, 7).y);
+}
+
+testConstructorInlining();
+%OptimizeFunctionOnNextCall(testConstructorInlining);
+testConstructorInlining();
+
+var last = undefined;
+for(var i = 0; deopt_at < 0; ++i) {
+ deopt_at = i;
+ counter = 0;
+ %OptimizeFunctionOnNextCall(testConstructorInlining);
+ testConstructorInlining();
+ if (last !== undefined) {
+ assertEquals(counter, last)
+ }
+ last = counter;
+}
diff --git a/deps/v8/test/mjsunit/compiler/deopt-inlined-from-call.js b/deps/v8/test/mjsunit/compiler/deopt-inlined-from-call.js
index db9eb96824..8fa8af5a9b 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-inlined-from-call.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-inlined-from-call.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft --no-always-opt
+// Flags: --allow-natives-syntax --opt --no-always-opt
var global = this;
diff --git a/deps/v8/test/mjsunit/compiler/deopt-numberoroddball-binop.js b/deps/v8/test/mjsunit/compiler/deopt-numberoroddball-binop.js
index d14c8471bc..2c6473d15f 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-numberoroddball-binop.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-numberoroddball-binop.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
(function() {
function foo(x, y) { return x << y; }
diff --git a/deps/v8/test/mjsunit/compiler/deopt-string-outofbounds.js b/deps/v8/test/mjsunit/compiler/deopt-string-outofbounds.js
index ef85eee2b2..ebdcc6cce5 100644
--- a/deps/v8/test/mjsunit/compiler/deopt-string-outofbounds.js
+++ b/deps/v8/test/mjsunit/compiler/deopt-string-outofbounds.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft --no-always-opt
+// Flags: --allow-natives-syntax --opt --no-always-opt
var s = "12345";
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-16.js b/deps/v8/test/mjsunit/compiler/escape-analysis-16.js
new file mode 100644
index 0000000000..4cd9ae43fd
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-16.js
@@ -0,0 +1,18 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-turbo-loop-peeling --turbo-escape
+
+function foo(){
+ var o = {a : 5}
+ for (var i = 0; i < 100; ++i) {
+ o.a = 5;
+ o.a = 7;
+ }
+}
+
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo)
+foo();
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-phi-type-2.js b/deps/v8/test/mjsunit/compiler/escape-analysis-phi-type-2.js
new file mode 100644
index 0000000000..2c2135b6da
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-phi-type-2.js
@@ -0,0 +1,41 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-escape
+
+function f(x) {
+ var o = {a : 0, b: 0};
+ if (x == 0) {
+ o.a = 1
+ } else {
+ if (x <= 1) {
+ if (x == 2) {
+ o.a = 2;
+ } else {
+ o.a = 1
+ }
+ o.a = 2;
+ } else {
+ if (x == 2) {
+ o.a = "x";
+ } else {
+ o.a = "x";
+ }
+ o.b = 22;
+ }
+ o.b = 22;
+ }
+ return o.a + 1;
+}
+
+f(0,0);
+f(1,0);
+f(2,0);
+f(3,0);
+f(0,1);
+f(1,1);
+f(2,1);
+f(3,1);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(f(2), "x1");
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-phi-type.js b/deps/v8/test/mjsunit/compiler/escape-analysis-phi-type.js
new file mode 100644
index 0000000000..806b09b3de
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-phi-type.js
@@ -0,0 +1,24 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-escape --turbo-experimental --no-turbo-loop-peeling
+
+function f(x) {
+ var o = {a : 0};
+ var l = [1,2,3,4];
+ var res;
+ for (var i = 0; i < 3; ++i) {
+ if (x%2 == 0) { o.a = 1; b = false}
+ res = l[o.a];
+ o.a = x;
+ }
+ return res;
+}
+
+f(0);
+f(1);
+f(0);
+f(1);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(undefined, f(101));
diff --git a/deps/v8/test/mjsunit/compiler/escape-analysis-representation.js b/deps/v8/test/mjsunit/compiler/escape-analysis-representation.js
index 127d92ea7d..1c7cae76c9 100644
--- a/deps/v8/test/mjsunit/compiler/escape-analysis-representation.js
+++ b/deps/v8/test/mjsunit/compiler/escape-analysis-representation.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --use-escape-analysis --max-opt-count=100
+// Flags: --allow-natives-syntax --use-escape-analysis
// This tests that captured objects materialized through the deoptimizer
// have field descriptors with a representation matching the values that
diff --git a/deps/v8/test/mjsunit/compiler/increment-typefeedback.js b/deps/v8/test/mjsunit/compiler/increment-typefeedback.js
index dca488b13f..53e5ed678f 100644
--- a/deps/v8/test/mjsunit/compiler/increment-typefeedback.js
+++ b/deps/v8/test/mjsunit/compiler/increment-typefeedback.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
function f(x) {
x++;
diff --git a/deps/v8/test/mjsunit/compiler/inline-accessors.js b/deps/v8/test/mjsunit/compiler/inline-accessors.js
index 90e0263b42..b3985bf9dc 100644
--- a/deps/v8/test/mjsunit/compiler/inline-accessors.js
+++ b/deps/v8/test/mjsunit/compiler/inline-accessors.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --inline-accessors --max-opt-count=100
+// Flags: --allow-natives-syntax --inline-accessors
var accessorCallCount, setterArgument, setterValue, obj, forceDeopt;
diff --git a/deps/v8/test/mjsunit/compiler/inline-arguments.js b/deps/v8/test/mjsunit/compiler/inline-arguments.js
index 1579926e24..13f4a33e7b 100644
--- a/deps/v8/test/mjsunit/compiler/inline-arguments.js
+++ b/deps/v8/test/mjsunit/compiler/inline-arguments.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --max-opt-count=100
+// Flags: --allow-natives-syntax
function A() {
}
diff --git a/deps/v8/test/mjsunit/compiler/inlined-array-pop-opt.js b/deps/v8/test/mjsunit/compiler/inlined-array-pop-opt.js
index 8263fc2f37..08cbdbef8c 100644
--- a/deps/v8/test/mjsunit/compiler/inlined-array-pop-opt.js
+++ b/deps/v8/test/mjsunit/compiler/inlined-array-pop-opt.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
(function() {
function foo(a) { return a.pop(); }
diff --git a/deps/v8/test/mjsunit/compiler/inlined-call.js b/deps/v8/test/mjsunit/compiler/inlined-call.js
index 97f2514387..772dcbee8b 100644
--- a/deps/v8/test/mjsunit/compiler/inlined-call.js
+++ b/deps/v8/test/mjsunit/compiler/inlined-call.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
var global = this;
diff --git a/deps/v8/test/mjsunit/compiler/instanceof.js b/deps/v8/test/mjsunit/compiler/instanceof.js
index cb88e7c284..f6a364e607 100644
--- a/deps/v8/test/mjsunit/compiler/instanceof.js
+++ b/deps/v8/test/mjsunit/compiler/instanceof.js
@@ -131,3 +131,15 @@ F.__proto__ = null;
assertFalse(foo(new A()));
assertTrue(foo(new F()));
})();
+
+(function() {
+ function foo() {
+ var a = new A();
+ return a instanceof A;
+ }
+
+ assertTrue(foo());
+ assertTrue(foo());
+ %OptimizeFunctionOnNextCall(foo);
+ assertTrue(foo());
+})();
diff --git a/deps/v8/test/mjsunit/compiler/integral32-add-sub.js b/deps/v8/test/mjsunit/compiler/integral32-add-sub.js
index c85397cc8a..16515d3990 100644
--- a/deps/v8/test/mjsunit/compiler/integral32-add-sub.js
+++ b/deps/v8/test/mjsunit/compiler/integral32-add-sub.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
(function() {
function foo(x) {
diff --git a/deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js b/deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js
index 094c338dcd..19b4d2a0f1 100644
--- a/deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js
+++ b/deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js
@@ -27,7 +27,7 @@
// Flags: --allow-natives-syntax --expose-gc
// Flags: --concurrent-recompilation --block-concurrent-recompilation
-// Flags: --crankshaft --no-always-opt
+// Flags: --opt --no-always-opt
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
diff --git a/deps/v8/test/mjsunit/compiler/object-getprototypeof.js b/deps/v8/test/mjsunit/compiler/object-getprototypeof.js
new file mode 100644
index 0000000000..ac172dbeb2
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/object-getprototypeof.js
@@ -0,0 +1,15 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var prototype = Object.create(null);
+var object = Object.create(prototype);
+
+function foo() { return Object.getPrototypeOf(object); }
+
+assertSame(prototype, foo());
+assertSame(prototype, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertSame(prototype, foo());
diff --git a/deps/v8/test/mjsunit/compiler/opt-next-call-turbo.js b/deps/v8/test/mjsunit/compiler/opt-next-call-turbo.js
index 39f29c9e5c..eb8df4b50c 100644
--- a/deps/v8/test/mjsunit/compiler/opt-next-call-turbo.js
+++ b/deps/v8/test/mjsunit/compiler/opt-next-call-turbo.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft --turbo-filter=*
+// Flags: --allow-natives-syntax --opt --turbo-filter=*
function foo() {
with ({ value:"fooed" }) { return value; }
diff --git a/deps/v8/test/mjsunit/compiler/opt-next-call.js b/deps/v8/test/mjsunit/compiler/opt-next-call.js
index f1ae5bda09..2878efefe9 100644
--- a/deps/v8/test/mjsunit/compiler/opt-next-call.js
+++ b/deps/v8/test/mjsunit/compiler/opt-next-call.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
function foo() {
return "fooed";
diff --git a/deps/v8/test/mjsunit/compiler/optimized-float32array-length.js b/deps/v8/test/mjsunit/compiler/optimized-float32array-length.js
index 6fde8d6daf..6e08e4a57f 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-float32array-length.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-float32array-length.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
var a = new Float32Array(1);
function len(a) { return a.length; }
diff --git a/deps/v8/test/mjsunit/compiler/optimized-float64array-length.js b/deps/v8/test/mjsunit/compiler/optimized-float64array-length.js
index 13a7539054..7d48d09c68 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-float64array-length.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-float64array-length.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
var a = new Float64Array(1);
function len(a) { return a.length; }
diff --git a/deps/v8/test/mjsunit/compiler/optimized-int32array-length.js b/deps/v8/test/mjsunit/compiler/optimized-int32array-length.js
index cd1b7a775a..00bf8d12a4 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-int32array-length.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-int32array-length.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
var a = new Int32Array(1);
function len(a) { return a.length; }
diff --git a/deps/v8/test/mjsunit/compiler/optimized-uint32array-length.js b/deps/v8/test/mjsunit/compiler/optimized-uint32array-length.js
index fe56e68cb5..3a88ed7d25 100644
--- a/deps/v8/test/mjsunit/compiler/optimized-uint32array-length.js
+++ b/deps/v8/test/mjsunit/compiler/optimized-uint32array-length.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
var a = new Uint32Array(1);
function len(a) { return a.length; }
diff --git a/deps/v8/test/mjsunit/compiler/reflect-getprototypeof.js b/deps/v8/test/mjsunit/compiler/reflect-getprototypeof.js
new file mode 100644
index 0000000000..a5ea89140e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/reflect-getprototypeof.js
@@ -0,0 +1,15 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var prototype = Object.create(null);
+var object = Object.create(prototype);
+
+function foo() { return Reflect.getPrototypeOf(object); }
+
+assertSame(prototype, foo());
+assertSame(prototype, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertSame(prototype, foo());
diff --git a/deps/v8/test/mjsunit/compiler/regress-5320.js b/deps/v8/test/mjsunit/compiler/regress-5320.js
index 026a51e7cb..e2fa65de31 100644
--- a/deps/v8/test/mjsunit/compiler/regress-5320.js
+++ b/deps/v8/test/mjsunit/compiler/regress-5320.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
function OptimizeTruncatingBinaryOp(func) {
func(42, -2);
diff --git a/deps/v8/test/mjsunit/compiler/regress-715204.js b/deps/v8/test/mjsunit/compiler/regress-715204.js
new file mode 100644
index 0000000000..65f97c0ee3
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-715204.js
@@ -0,0 +1,13 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var global = true;
+global = false;
+
+function f() {
+ global = 1;
+ return !global;
+}
+
+f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-715651.js b/deps/v8/test/mjsunit/compiler/regress-715651.js
new file mode 100644
index 0000000000..a75adc8ae7
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-715651.js
@@ -0,0 +1,38 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ this.x = 1;
+}
+
+var a = [];
+
+// Create enough objects to trigger slack tracking.
+for (let i = 0; i < 100; i++) {
+ new f();
+}
+
+function h() {
+ // Create a new object and add an out-of-object property 'y'.
+ var o = new f();
+ o.y = 1.5;
+ return o;
+}
+
+function g(o) {
+ // Add more properties so that we trigger extension of out-ot-object
+ // property store.
+ o.u = 1.1;
+ o.v = 1.2;
+ o.z = 1.3;
+ // Return a field from the out-of-object-property store.
+ return o.y;
+}
+
+g(h());
+g(h());
+%OptimizeFunctionOnNextCall(g);
+assertEquals(1.5, g(h()));
diff --git a/deps/v8/test/mjsunit/compiler/regress-compare-negate.js b/deps/v8/test/mjsunit/compiler/regress-compare-negate.js
index 3ae2cc284c..5591af1b32 100644
--- a/deps/v8/test/mjsunit/compiler/regress-compare-negate.js
+++ b/deps/v8/test/mjsunit/compiler/regress-compare-negate.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --turbo --crankshaft
+// Flags: --allow-natives-syntax --turbo --opt
function CompareNegate(a,b) {
a = a|0;
diff --git a/deps/v8/test/mjsunit/compiler/regress-string-to-number-add.js b/deps/v8/test/mjsunit/compiler/regress-string-to-number-add.js
index e872401c0b..0d524d20fd 100644
--- a/deps/v8/test/mjsunit/compiler/regress-string-to-number-add.js
+++ b/deps/v8/test/mjsunit/compiler/regress-string-to-number-add.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --turbo-type-feedback
+// Flags: --allow-natives-syntax
function f(x) {
var s = x ? "0" : "1";
diff --git a/deps/v8/test/mjsunit/compiler/turbo-number-feedback.js b/deps/v8/test/mjsunit/compiler/turbo-number-feedback.js
index 8dcc42c8a1..8875b8c0c3 100644
--- a/deps/v8/test/mjsunit/compiler/turbo-number-feedback.js
+++ b/deps/v8/test/mjsunit/compiler/turbo-number-feedback.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --turbo-type-feedback
+// Flags: --allow-natives-syntax
(function AddSubtractSmis() {
function f0(a, b, c) {
diff --git a/deps/v8/test/mjsunit/compiler/uint8-clamped-array.js b/deps/v8/test/mjsunit/compiler/uint8-clamped-array.js
index 17a0ad400e..21cc5bbc36 100644
--- a/deps/v8/test/mjsunit/compiler/uint8-clamped-array.js
+++ b/deps/v8/test/mjsunit/compiler/uint8-clamped-array.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
(function() {
function foo(a, v) {
diff --git a/deps/v8/test/mjsunit/const-field-tracking.js b/deps/v8/test/mjsunit/const-field-tracking.js
index ab1dbc1b23..3da6d29bf6 100644
--- a/deps/v8/test/mjsunit/const-field-tracking.js
+++ b/deps/v8/test/mjsunit/const-field-tracking.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft --no-always-opt
+// Flags: --allow-natives-syntax --opt --no-always-opt
var global = this;
diff --git a/deps/v8/test/mjsunit/constant-folding-2.js b/deps/v8/test/mjsunit/constant-folding-2.js
index e9bea0aab5..e2d334b2f2 100644
--- a/deps/v8/test/mjsunit/constant-folding-2.js
+++ b/deps/v8/test/mjsunit/constant-folding-2.js
@@ -27,7 +27,7 @@
// Flags: --nodead-code-elimination --fold-constants
-// Flags: --allow-natives-syntax --nostress-opt --crankshaft
+// Flags: --allow-natives-syntax --nostress-opt --opt
function test(f) {
f();
diff --git a/deps/v8/test/mjsunit/deopt-minus-zero.js b/deps/v8/test/mjsunit/deopt-minus-zero.js
index 47d46cd0f4..38795afa33 100644
--- a/deps/v8/test/mjsunit/deopt-minus-zero.js
+++ b/deps/v8/test/mjsunit/deopt-minus-zero.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --expose-gc --crankshaft
+// Flags: --allow-natives-syntax --expose-gc --opt
function mul (a, b) {
return a * b;
diff --git a/deps/v8/test/mjsunit/deopt-recursive-eager-once.js b/deps/v8/test/mjsunit/deopt-recursive-eager-once.js
index 0c044694b2..ba88b01b1a 100644
--- a/deps/v8/test/mjsunit/deopt-recursive-eager-once.js
+++ b/deps/v8/test/mjsunit/deopt-recursive-eager-once.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft --turbo --no-always-opt
+// Flags: --allow-natives-syntax --opt --turbo --no-always-opt
function foo(i, deopt = false) {
if (i == 0) {
diff --git a/deps/v8/test/mjsunit/deopt-recursive-lazy-once.js b/deps/v8/test/mjsunit/deopt-recursive-lazy-once.js
index ace7bdfda6..f0eec59a4b 100644
--- a/deps/v8/test/mjsunit/deopt-recursive-lazy-once.js
+++ b/deps/v8/test/mjsunit/deopt-recursive-lazy-once.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft --turbo --no-always-opt
+// Flags: --allow-natives-syntax --opt --turbo --no-always-opt
function foo(i, deopt = false) {
if (i == 0) {
diff --git a/deps/v8/test/mjsunit/deopt-recursive-soft-once.js b/deps/v8/test/mjsunit/deopt-recursive-soft-once.js
index af1eb731f4..9338ff553b 100644
--- a/deps/v8/test/mjsunit/deopt-recursive-soft-once.js
+++ b/deps/v8/test/mjsunit/deopt-recursive-soft-once.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft --turbo --no-always-opt
+// Flags: --allow-natives-syntax --opt --turbo --no-always-opt
function foo(i, deopt = false, deoptobj = null) {
diff --git a/deps/v8/test/mjsunit/deopt-unlinked.js b/deps/v8/test/mjsunit/deopt-unlinked.js
index a1f8e72ddb..e7374da543 100644
--- a/deps/v8/test/mjsunit/deopt-unlinked.js
+++ b/deps/v8/test/mjsunit/deopt-unlinked.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft --turbo --no-always-opt
+// Flags: --allow-natives-syntax --opt --turbo --no-always-opt
function foo() {}
diff --git a/deps/v8/test/mjsunit/deopt-with-fp-regs.js b/deps/v8/test/mjsunit/deopt-with-fp-regs.js
index 7591f0358d..bdb08053ae 100644
--- a/deps/v8/test/mjsunit/deopt-with-fp-regs.js
+++ b/deps/v8/test/mjsunit/deopt-with-fp-regs.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft --no-always-opt
+// Flags: --allow-natives-syntax --opt --no-always-opt
deopt_trigger = 0;
side_effect = 0;
diff --git a/deps/v8/test/mjsunit/deserialize-optimize-inner.js b/deps/v8/test/mjsunit/deserialize-optimize-inner.js
index bbd3875b55..ca78b8a4b0 100644
--- a/deps/v8/test/mjsunit/deserialize-optimize-inner.js
+++ b/deps/v8/test/mjsunit/deserialize-optimize-inner.js
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --cache=code --no-lazy --serialize-inner
-// Flags: --crankshaft
+// Flags: --allow-natives-syntax --cache=code --no-lazy --opt
function f(x, y) { return x + y; }
diff --git a/deps/v8/test/mjsunit/dictionary-properties.js b/deps/v8/test/mjsunit/dictionary-properties.js
index 33360d7f52..cffa48547e 100644
--- a/deps/v8/test/mjsunit/dictionary-properties.js
+++ b/deps/v8/test/mjsunit/dictionary-properties.js
@@ -11,6 +11,7 @@ function SlowObject() {
this.foo = 1;
this.bar = 2;
this.qux = 3;
+ this.z = 4;
delete this.qux;
assertFalse(%HasFastProperties(this));
}
@@ -38,6 +39,7 @@ function SlowPrototype() {
}
SlowPrototype.prototype.bar = 2;
SlowPrototype.prototype.baz = 3;
+SlowPrototype.prototype.z = 4;
delete SlowPrototype.prototype.baz;
assertFalse(%HasFastProperties(SlowPrototype.prototype));
var slow_proto = new SlowPrototype;
diff --git a/deps/v8/test/mjsunit/div-mul-minus-one.js b/deps/v8/test/mjsunit/div-mul-minus-one.js
index c7643ae262..71ad299610 100644
--- a/deps/v8/test/mjsunit/div-mul-minus-one.js
+++ b/deps/v8/test/mjsunit/div-mul-minus-one.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
function div(g) {
return (g/-1) ^ 1
diff --git a/deps/v8/test/mjsunit/element-read-only.js b/deps/v8/test/mjsunit/element-read-only.js
index 9ec027f6cc..dcc7e421b6 100644
--- a/deps/v8/test/mjsunit/element-read-only.js
+++ b/deps/v8/test/mjsunit/element-read-only.js
@@ -2,17 +2,23 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --allow-natives-syntax
+
function f(a, b, c, d) { return arguments; }
// Ensure non-configurable argument elements stay non-configurable.
(function () {
var args = f(1);
Object.defineProperty(args, "0", {value: 10, configurable: false});
+ %HeapObjectVerify(args);
assertFalse(Object.getOwnPropertyDescriptor(args, "0").configurable);
+ %HeapObjectVerify(args);
for (var i = 0; i < 10; i++) {
args[i] = 1;
}
+ %HeapObjectVerify(args);
assertFalse(Object.getOwnPropertyDescriptor(args, "0").configurable);
+ %HeapObjectVerify(args);
})();
// Ensure read-only properties on the prototype chain cause TypeError.
@@ -27,7 +33,11 @@ function f(a, b, c, d) { return arguments; }
for (var i = 0; i < index; i++) {
store(o, i, 0);
}
+ %HeapObjectVerify(proto);
+ %HeapObjectVerify(o);
Object.defineProperty(proto, index, {value: 100, writable: false});
+ %HeapObjectVerify(proto);
+ %HeapObjectVerify(o);
assertThrows(function() { store(o, index, 0); });
assertEquals(100, o[index]);
})();
@@ -42,7 +52,11 @@ function f(a, b, c, d) { return arguments; }
for (var i = 0; i < index; i++) {
store(o, i, 0);
}
+ %HeapObjectVerify(proto);
+ %HeapObjectVerify(o);
Object.defineProperty(proto, index, {value: 100, writable: false});
+ %HeapObjectVerify(proto);
+ %HeapObjectVerify(o);
assertThrows(function() { store(o, index, 0); });
assertEquals(100, o[index]);
})();
@@ -57,7 +71,11 @@ function f(a, b, c, d) { return arguments; }
for (var i = 0; i < index; i++) {
store(o, i, 0);
}
+ %HeapObjectVerify(proto);
+ %HeapObjectVerify(o);
Object.defineProperty(proto, index, {value: 100, writable: false});
+ %HeapObjectVerify(proto);
+ %HeapObjectVerify(o);
assertThrows(function() { store(o, index, 0); });
assertEquals(100, o[index]);
})();
@@ -72,7 +90,11 @@ function f(a, b, c, d) { return arguments; }
for (var i = 0; i < index; i++) {
store(o, i, 0);
}
+ %HeapObjectVerify(proto);
+ %HeapObjectVerify(o);
Object.defineProperty(proto, index, {value: 100, writable: false});
+ %HeapObjectVerify(proto);
+ %HeapObjectVerify(o);
assertThrows(function() { store(o, index, 0); });
assertEquals(100, o[index]);
})();
@@ -87,12 +109,17 @@ function f(a, b, c, d) { return arguments; }
for (var i = 0; i < index; i++) {
store(o, i, 0);
}
+ %HeapObjectVerify(proto);
+ %HeapObjectVerify(o);
Object.preventExtensions(proto);
+ %HeapObjectVerify(proto);
+ %HeapObjectVerify(o);
Object.defineProperty(proto, index, {value: 100, writable: false});
+ %HeapObjectVerify(proto);
+ %HeapObjectVerify(o);
assertThrows(function() { store(o, index, 0); });
assertEquals(100, o[index]);
})();
-
// Extensions prevented arguments object.
(function () {
var o = [];
@@ -103,8 +130,14 @@ function f(a, b, c, d) { return arguments; }
for (var i = 0; i < index; i++) {
store(o, i, 0);
}
+ %HeapObjectVerify(proto);
+ %HeapObjectVerify(o);
Object.preventExtensions(proto);
+ %HeapObjectVerify(proto);
+ %HeapObjectVerify(o);
Object.defineProperty(proto, index, {value: 100, writable: false});
+ %HeapObjectVerify(proto);
+ %HeapObjectVerify(o);
assertThrows(function() { store(o, index, 0); });
assertEquals(100, o[index]);
})();
@@ -120,7 +153,11 @@ function f(a, b, c, d) { return arguments; }
store(o, i, 0);
}
proto[1 << 30] = 1;
+ %HeapObjectVerify(proto);
+ %HeapObjectVerify(o);
Object.defineProperty(proto, index, {value: 100, writable: false});
+ %HeapObjectVerify(proto);
+ %HeapObjectVerify(o);
assertThrows(function() { store(o, index, 0); });
assertEquals(100, o[index]);
})();
@@ -134,7 +171,11 @@ function f(a, b, c, d) { return arguments; }
for (var i = 0; i < 3; i++) {
store(o, i, 0);
}
+ %HeapObjectVerify(proto);
+ %HeapObjectVerify(o);
Object.freeze(proto);
+ %HeapObjectVerify(proto);
+ %HeapObjectVerify(o);
assertThrows(function() { store(o, 3, 0); });
assertEquals(3, o[3]);
})();
@@ -148,7 +189,11 @@ function f(a, b, c, d) { return arguments; }
for (var i = 0; i < 3; i++) {
store(o, i, 0);
}
+ %HeapObjectVerify(proto);
+ %HeapObjectVerify(o);
Object.freeze(proto);
+ %HeapObjectVerify(proto);
+ %HeapObjectVerify(o);
assertThrows(function() { store(o, 3, 0); });
assertEquals(3, o[3]);
})();
diff --git a/deps/v8/test/mjsunit/elements-transition-hoisting.js b/deps/v8/test/mjsunit/elements-transition-hoisting.js
index db5b24f6a0..2b67b62b92 100644
--- a/deps/v8/test/mjsunit/elements-transition-hoisting.js
+++ b/deps/v8/test/mjsunit/elements-transition-hoisting.js
@@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
-// Flags: --nostress-opt --crankshaft
+// Flags: --nostress-opt --opt
// Ensure that ElementsKind transitions in various situations are hoisted (or
// not hoisted) correctly, don't change the semantics programs and don't trigger
diff --git a/deps/v8/test/mjsunit/ensure-growing-store-learns.js b/deps/v8/test/mjsunit/ensure-growing-store-learns.js
index 659ff0d66e..7cb8aaa8ba 100644
--- a/deps/v8/test/mjsunit/ensure-growing-store-learns.js
+++ b/deps/v8/test/mjsunit/ensure-growing-store-learns.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --noverify-heap --noenable-slow-asserts
-// Flags: --crankshaft --no-always-opt
+// Flags: --opt --no-always-opt
// --noverify-heap and --noenable-slow-asserts are set because the test is too
// slow with it on.
@@ -65,11 +65,11 @@
assertTrue(%HasFastSmiElements(a));
// Grow a large array into large object space through the keyed store
- // without deoptimizing. Grow by 10s. If we set elements too sparsely, the
+ // without deoptimizing. Grow by 9s. If we set elements too sparsely, the
// array will convert to dictionary mode.
a = new Array(99999);
assertTrue(%HasFastSmiElements(a));
- for (var i = 0; i < 263000; i += 10) {
+ for (var i = 0; i < 263000; i += 9) {
foo2(a, i);
}
diff --git a/deps/v8/test/mjsunit/es6/array-iterator-turbo.js b/deps/v8/test/mjsunit/es6/array-iterator-turbo.js
index def018eea2..c9182f89e0 100644
--- a/deps/v8/test/mjsunit/es6/array-iterator-turbo.js
+++ b/deps/v8/test/mjsunit/es6/array-iterator-turbo.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --turbo --turbo-escape --allow-natives-syntax --no-always-opt
-// Flags: --crankshaft --turbo-filter=*
+// Flags: --opt --turbo-filter=*
"use strict";
@@ -43,13 +43,15 @@ let tests = {
return sum;
},
- FAST_HOLEY_DOUBLE_ELEMENTS(array) {
+ // TODO(6587): Re-enable the below test case once we no longer deopt due
+ // to non-truncating uses of {CheckFloat64Hole} nodes.
+ /*FAST_HOLEY_DOUBLE_ELEMENTS(array) {
let sum = 0.0;
for (let x of array) {
if (x) sum += x;
}
return sum;
- }
+ }*/
};
let tests = {
@@ -84,12 +86,14 @@ let tests = {
array2: [0.6, 0.4, 0.2],
expected2: 1.2
},
- FAST_HOLEY_DOUBLE_ELEMENTS: {
+ // TODO(6587): Re-enable the below test case once we no longer deopt due
+ // to non-truncating uses of {CheckFloat64Hole} nodes.
+ /*FAST_HOLEY_DOUBLE_ELEMENTS: {
array: [0.1, , 0.3, , 0.5, , 0.7, , 0.9, ,],
expected: 2.5,
array2: [0.1, , 0.3],
expected2: 0.4
- }
+ }*/
};
for (let key of Object.keys(runners)) {
@@ -102,29 +106,13 @@ let tests = {
%OptimizeFunctionOnNextCall(fn);
fn(array);
- // TODO(bmeurer): FAST_HOLEY_DOUBLE_ELEMENTS maps generally deopt when
- // a hole is encountered. Test should be fixed once that is corrected.
- let expect_deopt = /HOLEY_DOUBLE/.test(key);
-
- if (expect_deopt) {
- assertUnoptimized(fn, '', key);
- } else {
- assertOptimized(fn, '', key);
- }
+ assertOptimized(fn, '', key);
assertEquals(expected, fn(array), key);
- if (expect_deopt) {
- assertUnoptimized(fn, '', key);
- } else {
- assertOptimized(fn, '', key);
- }
+ assertOptimized(fn, '', key);
// Check no deopt when another array with the same map is used
assertTrue(%HaveSameMap(array, array2), key);
- if (expect_deopt) {
- assertUnoptimized(fn, '', key);
- } else {
- assertOptimized(fn, '', key);
- }
+ assertOptimized(fn, '', key);
assertEquals(expected2, fn(array2), key);
// CheckMaps bailout
@@ -231,6 +219,10 @@ let tests = {
let clone = new array.constructor(array);
%ArrayBufferNeuter(clone.buffer);
assertThrows(() => sum(clone), TypeError);
+
+ // Clear the slate for the next iteration.
+ %DeoptimizeFunction(sum);
+ %ClearFunctionFeedback(sum);
}
}
};
diff --git a/deps/v8/test/mjsunit/es6/block-let-crankshaft-sloppy.js b/deps/v8/test/mjsunit/es6/block-let-crankshaft-sloppy.js
index d06153ed8a..5711ac270e 100644
--- a/deps/v8/test/mjsunit/es6/block-let-crankshaft-sloppy.js
+++ b/deps/v8/test/mjsunit/es6/block-let-crankshaft-sloppy.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
// Check that the following functions are optimizable.
var functions = [ f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14,
diff --git a/deps/v8/test/mjsunit/es6/block-let-crankshaft.js b/deps/v8/test/mjsunit/es6/block-let-crankshaft.js
index 99a8b52968..97de765c8a 100644
--- a/deps/v8/test/mjsunit/es6/block-let-crankshaft.js
+++ b/deps/v8/test/mjsunit/es6/block-let-crankshaft.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
"use strict";
diff --git a/deps/v8/test/mjsunit/es6/block-scoping-sloppy.js b/deps/v8/test/mjsunit/es6/block-scoping-sloppy.js
index 29eadb17d1..d86eb0794f 100644
--- a/deps/v8/test/mjsunit/es6/block-scoping-sloppy.js
+++ b/deps/v8/test/mjsunit/es6/block-scoping-sloppy.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
// Test functionality of block scopes.
// Hoisting of var declarations.
diff --git a/deps/v8/test/mjsunit/es6/block-scoping.js b/deps/v8/test/mjsunit/es6/block-scoping.js
index ec13592977..9fa22cddc3 100644
--- a/deps/v8/test/mjsunit/es6/block-scoping.js
+++ b/deps/v8/test/mjsunit/es6/block-scoping.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
// Test functionality of block scopes.
"use strict";
diff --git a/deps/v8/test/mjsunit/es6/destructuring-assignment.js b/deps/v8/test/mjsunit/es6/destructuring-assignment.js
index df9bb0e8c6..579c87718b 100644
--- a/deps/v8/test/mjsunit/es6/destructuring-assignment.js
+++ b/deps/v8/test/mjsunit/es6/destructuring-assignment.js
@@ -478,3 +478,93 @@ assertEquals(oz, [1, 2, 3, 4, 5]);
{ firstLetter: "B", rest: ["p", "u", "p", "p", "y"] },
], log);
})();
+
+(function testNewTarget() {
+ assertThrows("(function() { [...new.target] = []; })", SyntaxError);
+ assertThrows("(function() { [a] = [...new.target] = []; })", SyntaxError);
+ assertThrows("(function() { [new.target] = []; })", SyntaxError);
+ assertThrows("(function() { [a] = [new.target] = []; })", SyntaxError);
+ assertThrows("(function() { ({ a: new.target] = {a: 0}); })", SyntaxError);
+ assertThrows("(function() { ({ a } = { a: new.target } = {}); })",
+ SyntaxError);
+
+ function ReturnNewTarget1() {
+ var result;
+ [result = new.target] = [];
+ return result;
+ }
+
+ function ReturnNewTarget2() {
+ var result;
+ [result] = [new.target];
+ return result;
+ }
+
+ function ReturnNewTarget3() {
+ var result;
+ ({ result = new.target } = {});
+ return result;
+ }
+
+ function ReturnNewTarget4() {
+ var result;
+ ({ result } = { result: new.target });
+ return result;
+ }
+
+ function FakeNewTarget() {}
+ assertEquals(undefined, ReturnNewTarget1());
+ assertEquals(ReturnNewTarget1, new ReturnNewTarget1());
+ assertEquals(FakeNewTarget,
+ Reflect.construct(ReturnNewTarget1, [], FakeNewTarget));
+
+ assertEquals(undefined, ReturnNewTarget2());
+ assertEquals(ReturnNewTarget2, new ReturnNewTarget2());
+ assertEquals(FakeNewTarget,
+ Reflect.construct(ReturnNewTarget2, [], FakeNewTarget));
+
+ assertEquals(undefined, ReturnNewTarget3());
+ assertEquals(ReturnNewTarget3, new ReturnNewTarget3());
+ assertEquals(FakeNewTarget,
+ Reflect.construct(ReturnNewTarget3, [], FakeNewTarget));
+
+ assertEquals(undefined, ReturnNewTarget4());
+ assertEquals(ReturnNewTarget4, new ReturnNewTarget4());
+ assertEquals(FakeNewTarget,
+ Reflect.construct(ReturnNewTarget4, [], FakeNewTarget));
+})();
+
+(function testSuperCall() {
+ function ctor(body) {
+ return () => eval("(class extends Object { \n" +
+ " constructor() {\n" +
+ body +
+ "\n }\n" +
+ "})");
+ }
+ assertThrows(ctor("({ new: super() } = {})"), SyntaxError);
+ assertThrows(ctor("({ new: x } = { new: super() } = {})"), SyntaxError);
+ assertThrows(ctor("[super()] = []"), SyntaxError);
+ assertThrows(ctor("[x] = [super()] = []"), SyntaxError);
+ assertThrows(ctor("[...super()] = []"), SyntaxError);
+ assertThrows(ctor("[x] = [...super()] = []"), SyntaxError);
+
+ class Base { get foo() { return 1; } }
+ function ext(body) {
+ return eval("new (class extends Base {\n" +
+ " constructor() {\n" +
+ body + ";\n" +
+ " return { x: super.foo }" +
+ "\n }\n" +
+ "})");
+ }
+ assertEquals(1, ext("let x; [x = super()] = []").x);
+ assertEquals(1, ext("let x, y; [y] = [x = super()] = []").x);
+ assertEquals(1, ext("let x; [x] = [super()]").x);
+ assertEquals(1, ext("let x, y; [y] = [x] = [super()]").x);
+
+ assertEquals(1, ext("let x; ({x = super()} = {})").x);
+ assertEquals(1, ext("let x, y; ({ x: y } = { x = super() } = {})").x);
+ assertEquals(1, ext("let x; ({x} = { x: super() })").x);
+ assertEquals(1, ext("let x, y; ({ x: y } = { x } = { x: super() })").x);
+})();
diff --git a/deps/v8/test/mjsunit/es6/regress/regress-6322.js b/deps/v8/test/mjsunit/es6/regress/regress-6322.js
new file mode 100644
index 0000000000..41f66171ed
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/regress/regress-6322.js
@@ -0,0 +1,6 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Crash with --verify-heap
+(function*() { for (let { a = class b { } } of [{}]) { } })().next();
diff --git a/deps/v8/test/mjsunit/es6/rest-params-lazy-parsing.js b/deps/v8/test/mjsunit/es6/rest-params-lazy-parsing.js
index c9b81661dc..271ddf3c4a 100644
--- a/deps/v8/test/mjsunit/es6/rest-params-lazy-parsing.js
+++ b/deps/v8/test/mjsunit/es6/rest-params-lazy-parsing.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --min-preparse-length=0
-
function variadic(co, ...values) {
var sum = 0;
while (values.length) {
diff --git a/deps/v8/test/mjsunit/es6/string-replace.js b/deps/v8/test/mjsunit/es6/string-replace.js
index 0beb57a536..16cadc5369 100644
--- a/deps/v8/test/mjsunit/es6/string-replace.js
+++ b/deps/v8/test/mjsunit/es6/string-replace.js
@@ -3,7 +3,8 @@
// found in the LICENSE file.
var pattern = {
- [Symbol.replace]: (string, newValue) => string + newValue
+ [Symbol.replace]: (string, newValue) => string + newValue,
+ toString: () => "c"
};
// Check object coercible fails.
assertThrows(() => String.prototype.replace.call(null, pattern, "x"),
@@ -13,5 +14,8 @@ assertEquals("abcdex", "abcde".replace(pattern, "x"));
// Non-callable override.
pattern[Symbol.replace] = "dumdidum";
assertThrows(() => "abcde".replace(pattern, "x"), TypeError);
+// Null override.
+pattern[Symbol.replace] = null;
+assertEquals("abXde", "abcde".replace(pattern, "X"));
assertEquals("[Symbol.replace]", RegExp.prototype[Symbol.replace].name);
diff --git a/deps/v8/test/mjsunit/es6/string-split.js b/deps/v8/test/mjsunit/es6/string-split.js
index 8ca655cad9..c21f9d3d94 100644
--- a/deps/v8/test/mjsunit/es6/string-split.js
+++ b/deps/v8/test/mjsunit/es6/string-split.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-var pattern = {};
+var pattern = {toString: () => ""};
var limit = { value: 3 };
pattern[Symbol.split] = function(string, limit) {
return string.length * limit.value;
@@ -15,5 +15,8 @@ assertEquals(15, "abcde".split(pattern, limit));
// Non-callable override.
pattern[Symbol.split] = "dumdidum";
assertThrows(() => "abcde".split(pattern, limit), TypeError);
+// Null override.
+pattern[Symbol.split] = null;
+assertEquals(["a", "b", "c", "d", "e"], "abcde".split(pattern));
assertEquals("[Symbol.split]", RegExp.prototype[Symbol.split].name);
diff --git a/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like-prototype-element-added.js b/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like-prototype-element-added.js
new file mode 100644
index 0000000000..edcba43b52
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like-prototype-element-added.js
@@ -0,0 +1,32 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+(function() {
+ var arr = [0, 1, , 3];
+ Array.prototype[2] = 2;
+
+ var constructors = [
+ Uint8Array,
+ Int8Array,
+ Uint16Array,
+ Int16Array,
+ Uint32Array,
+ Int32Array,
+ Float32Array,
+ Float64Array,
+ Uint8ClampedArray
+ ];
+
+ for (var constr of constructors) {
+ var ta = new constr(arr);
+ assertArrayEquals([0, 1, 2, 3], ta);
+ }
+})();
+
+(function testTypedArrayConstructByArrayLikeInvalidArrayProtector() {
+ Array.prototype[2] = undefined;
+ load("test/mjsunit/es6/typedarray-construct-by-array-like.js");
+})();
diff --git a/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js b/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js
index 6f3e961a27..7d17812a8d 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-construct-by-array-like.js
@@ -4,7 +4,11 @@
// Flags: --allow-natives-syntax
-function TestConstructSmallObject(constr) {
+var tests = [];
+
+// Tests that will be called with each TypedArray constructor.
+
+tests.push(function TestConstructSmallObject(constr) {
var myObject = { 0: 5, 1: 6, length: 2 };
arr = new constr(myObject);
@@ -12,9 +16,9 @@ function TestConstructSmallObject(constr) {
assertEquals(2, arr.length);
assertEquals(5, arr[0]);
assertEquals(6, arr[1]);
-};
+});
-function TestConstructLargeObject(constr) {
+tests.push(function TestConstructLargeObject(constr) {
var myObject = {};
const n = 128;
for (var i = 0; i < n; i++) {
@@ -28,18 +32,18 @@ function TestConstructLargeObject(constr) {
for (var i = 0; i < n; i++) {
assertEquals(i, arr[i]);
}
-}
+});
-function TestConstructFromArrayWithSideEffects(constr) {
+tests.push(function TestConstructFromArrayWithSideEffects(constr) {
var arr = [{ valueOf() { arr[1] = 20; return 1; }}, 2];
var ta = new constr(arr);
assertEquals(1, ta[0]);
assertEquals(2, ta[1]);
-}
+});
-function TestConstructFromArrayWithSideEffectsHoley(constr) {
+tests.push(function TestConstructFromArrayWithSideEffectsHoley(constr) {
var arr = [{ valueOf() { arr[1] = 20; return 1; }}, 2, , 4];
var ta = new constr(arr);
@@ -48,10 +52,75 @@ function TestConstructFromArrayWithSideEffectsHoley(constr) {
assertEquals(2, ta[1]);
// ta[2] will be the default value, but we aren't testing that here.
assertEquals(4, ta[3]);
-}
+});
+
+tests.push(function TestConstructFromArrayHoleySmi(constr) {
+ var arr = [0, 1, , 3];
+
+ var ta = new constr(arr);
+
+ assertArrayEquals([0, 1, defaultValue(constr), 3], ta);
+});
+
+tests.push(function TestConstructFromArrayHoleyDouble(constr) {
+ var arr = [0.0, 1.0, , 3.0];
+
+ var ta = new constr(arr);
+
+ assertArrayEquals([0, 1, defaultValue(constr), 3], ta);
+});
+
+tests.push(function TestConstructFromArrayHoleySmiWithOtherPrototype(constr) {
+ var arr = [0, 1, , 3];
+ Object.setPrototypeOf(arr, { 2: 2 });
+
+ var ta = new constr(arr);
+
+ assertArrayEquals([0, 1, 2, 3], ta);
+});
+tests.push(function TestConstructFromArrayWithProxyPrototype(constr) {
+ var arr = [0, 1, , 3];
+ var proxy = new Proxy([], {
+ get: function(target, name) {
+ if (name === Symbol.iterator) return undefined;
+ if (name == 2) return 2;
+ return target[name];
+ }
+ });
+ Object.setPrototypeOf(arr, proxy);
-function TestConstructFromArray(constr) {
+ var ta = new constr(arr);
+
+ assertArrayEquals([0, 1, 2, 3], ta);
+});
+
+tests.push(function TestConstructFromArrayHoleySmiWithSubclass(constr) {
+ class SubArray extends Array {}
+ var arr = new SubArray(0, 1);
+ arr[3] = 3;
+
+ var ta = new constr(arr);
+
+ assertArrayEquals([0, 1, defaultValue(constr), 3], ta);
+});
+
+tests.push(function TestConstructFromArrayNoIteratorWithGetter(constr) {
+ var arr = [1, 2, 3];
+ arr[Symbol.iterator] = undefined;
+
+ Object.defineProperty(arr, "2", {
+ get() {
+ return 22;
+ }
+ });
+
+ var ta = new constr(arr);
+
+ assertArrayEquals([1, 2, 22], ta);
+});
+
+tests.push(function TestConstructFromArray(constr) {
var n = 64;
var jsArray = [];
for (var i = 0; i < n; i++) {
@@ -64,9 +133,9 @@ function TestConstructFromArray(constr) {
for (var i = 0; i < n; i++) {
assertEquals(i, arr[i]);
}
-}
+});
-function TestConstructFromTypedArray(constr) {
+tests.push(function TestConstructFromTypedArray(constr) {
var n = 64;
var ta = new constr(n);
for (var i = 0; i < ta.length; i++) {
@@ -79,15 +148,55 @@ function TestConstructFromTypedArray(constr) {
for (var i = 0; i < n; i++) {
assertEquals(i, arr[i]);
}
-}
+});
-function TestLengthIsMaxSmi(constr) {
+tests.push(function TestLengthIsMaxSmi(constr) {
var myObject = { 0: 5, 1: 6, length: %_MaxSmi() + 1 };
assertThrows(function() {
new constr(myObject);
}, RangeError);
-}
+});
+
+tests.push(function TestProxyHoleConverted(constr) {
+ var source = {0: 0, 2: 2, length: 3};
+ var proxy = new Proxy(source, {});
+
+ var converted = new constr(proxy);
+
+ assertArrayEquals([0, defaultValue(constr), 2], converted);
+});
+
+tests.push(function TestProxyToObjectValueOfCalled(constr) {
+ var thrower = { valueOf: function() { throw new TypeError(); } };
+ var source = {0: 0, 1: thrower, length: 2};
+ var proxy = new Proxy(source, {});
+
+ assertThrows(() => new constr(proxy), TypeError);
+});
+
+tests.push(function TestObjectValueOfCalled(constr) {
+ var thrower = { valueOf: function() { throw new TypeError(); } };
+
+ var obj = {0: 0, 1: thrower, length: 2};
+ assertThrows(() => new constr(obj), TypeError);
+});
+
+tests.push(function TestSmiPackedArray(constr) {
+ var ta = new constr([1, 2, 3, 4, 127]);
+
+ assertEquals(5 * constr.BYTES_PER_ELEMENT, ta.byteLength);
+ assertArrayEquals([1, 2, 3, 4, 127], ta);
+});
+
+tests.push(function TestOffsetIsUsed(constr) {
+ TestOffsetIsUsedRunner(constr, 4);
+ TestOffsetIsUsedRunner(constr, 16);
+ TestOffsetIsUsedRunner(constr, 32);
+ TestOffsetIsUsedRunner(constr, 128);
+});
+
+// Helpers for above tests.
function TestOffsetIsUsedRunner(constr, n) {
var buffer = new ArrayBuffer(constr.BYTES_PER_ELEMENT * n);
@@ -109,21 +218,13 @@ function TestOffsetIsUsedRunner(constr, n) {
}
}
-function TestOffsetIsUsed(constr, n) {
- TestOffsetIsUsedRunner(constr, 4);
- TestOffsetIsUsedRunner(constr, 16);
- TestOffsetIsUsedRunner(constr, 32);
- TestOffsetIsUsedRunner(constr, 128);
+function defaultValue(constr) {
+ if (constr == Float32Array || constr == Float64Array) return NaN;
+ return 0;
}
-Test(TestConstructSmallObject);
-Test(TestConstructLargeObject);
-Test(TestConstructFromArrayWithSideEffects);
-Test(TestConstructFromArrayWithSideEffectsHoley);
-Test(TestConstructFromArray);
-Test(TestConstructFromTypedArray);
-Test(TestLengthIsMaxSmi);
-Test(TestOffsetIsUsed);
+tests.forEach(f => Test(f));
+
function Test(func) {
func(Uint8Array);
@@ -136,3 +237,43 @@ function Test(func) {
func(Float64Array);
func(Uint8ClampedArray);
}
+
+// Other, standalone tests.
+
+(function TestUint8ClampedIsNotBitCopied() {
+ var arr = new Int8Array([-1.0, 0, 1.1, 255, 256]);
+ assertArrayEquals([-1, 0, 1, -1, 0], arr);
+ var expected = new Uint8ClampedArray([0, 0, 1, 0, 0]);
+
+ var converted = new Uint8ClampedArray(arr);
+
+ assertArrayEquals([0, 0, 1, 0, 0], converted);
+})();
+
+(function TestInt8ArrayCopying() {
+ var source = new Uint8Array([0, 1, 127, 128, 255, 256]);
+ assertArrayEquals([0, 1, 127, 128, 255, 0], source);
+
+ var converted = new Int8Array(source);
+
+ assertArrayEquals([0, 1, 127, -128, -1, 0], converted);
+})();
+
+(function TestInt16ArrayCopying() {
+ var source = new Uint16Array([0, 1, 32767, 32768, 65535, 65536]);
+ assertArrayEquals([0, 1, 32767, 32768, 65535, 0], source);
+
+ var converted = new Int16Array(source);
+
+ assertArrayEquals([0, 1, 32767, -32768, -1, 0], converted);
+})();
+
+(function TestInt32ArrayCopying() {
+ var source =
+ new Uint32Array([0, 1, 2147483647, 2147483648, 4294967295, 4294967296]);
+ assertArrayEquals([0, 1, 2147483647, 2147483648, 4294967295, 0], source);
+
+ var converted = new Int32Array(source);
+
+ assertArrayEquals([0, 1, 2147483647, -2147483648, -1, 0], converted);
+})();
diff --git a/deps/v8/test/mjsunit/es6/typedarray-construct-offset-not-smi.js b/deps/v8/test/mjsunit/es6/typedarray-construct-offset-not-smi.js
index 27beb762dc..0a267bc64b 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-construct-offset-not-smi.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-construct-offset-not-smi.js
@@ -2,18 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --mock-arraybuffer-allocator
(function TestBufferByteLengthNonSmi() {
var non_smi_byte_length = %_MaxSmi() + 1;
- try {
- var buffer = new ArrayBuffer(non_smi_byte_length);
- } catch (e) {
- // The ArrayBuffer allocation can fail on 32-bit archs, so no need to try to
- // construct the typed array.
- return;
- }
+ var buffer = new ArrayBuffer(non_smi_byte_length);
+
var arr = new Uint16Array(buffer);
assertEquals(non_smi_byte_length, arr.byteLength);
@@ -26,33 +21,17 @@
(function TestByteOffsetNonSmi() {
var non_smi_byte_length = %_MaxSmi() + 11;
- try {
- var buffer = new ArrayBuffer(non_smi_byte_length);
- } catch (e) {
- // The ArrayBuffer allocation can fail on 32-bit archs, so no need to try to
- // construct the typed array.
- return;
- }
- print(buffer.byteLength);
+
+ var buffer = new ArrayBuffer(non_smi_byte_length);
+
var whole = new Uint16Array(buffer);
- whole[non_smi_byte_length / 2 - 1] = 1;
- whole[non_smi_byte_length / 2 - 2] = 2;
- whole[non_smi_byte_length / 2 - 3] = 3;
- whole[non_smi_byte_length / 2 - 4] = 4;
- whole[non_smi_byte_length / 2 - 5] = 5;
+ assertEquals(non_smi_byte_length, whole.byteLength);
assertEquals(non_smi_byte_length / 2, whole.length);
- assertEquals(1, whole[non_smi_byte_length / 2 - 1]);
var arr = new Uint16Array(buffer, non_smi_byte_length - 10, 5);
assertEquals(non_smi_byte_length, arr.buffer.byteLength);
assertEquals(10, arr.byteLength);
assertEquals(5, arr.length);
-
- assertEquals(5, arr[0]);
- assertEquals(4, arr[1]);
- assertEquals(3, arr[2]);
- assertEquals(2, arr[3]);
- assertEquals(1, arr[4]);
})();
diff --git a/deps/v8/test/mjsunit/es6/typedarray-copywithin.js b/deps/v8/test/mjsunit/es6/typedarray-copywithin.js
index 1e63508393..c52a38625b 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-copywithin.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-copywithin.js
@@ -240,8 +240,6 @@ CheckEachTypedArray(function parametersNotCalledIfDetached(constructor) {
var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
%ArrayBufferNeuter(array.buffer);
- // TODO(caitp): this should throw due to being invoked on a TypedArray with a
- // detached buffer (per v8:4648).
- array.copyWithin(tmp, tmp, tmp);
+ assertThrows(() => array.copyWithin(tmp, tmp, tmp), TypeError);
assertEquals(0, array.length, "array.[[ViewedArrayBuffer]] is detached");
});
diff --git a/deps/v8/test/mjsunit/es6/typedarray-every.js b/deps/v8/test/mjsunit/es6/typedarray-every.js
index 4ceee5f3aa..a3498f5786 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-every.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-every.js
@@ -159,6 +159,11 @@ function TestTypedArrayForEach(constructor) {
assertEquals(Array.prototype.every.call(a,
function(elt) { x += elt; return true; }), true);
assertEquals(x, 4);
+
+ // Detached Operation
+ var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+ %ArrayBufferNeuter(array.buffer);
+ assertThrows(() => array.every(() => true), TypeError);
}
for (i = 0; i < typedArrayConstructors.length; i++) {
diff --git a/deps/v8/test/mjsunit/es6/typedarray-fill.js b/deps/v8/test/mjsunit/es6/typedarray-fill.js
index 260e5ab08a..9ed220373b 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-fill.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-fill.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --allow-natives-syntax
+
var intArrayConstructors = [
Uint8Array,
Int8Array,
@@ -67,6 +69,18 @@ for (var constructor of typedArrayConstructors) {
assertArrayEquals([3, 3], [a[0], a[1]]);
Array.prototype.fill.call(a, 4);
assertArrayEquals([4, 3], [a[0], a[1]]);
+
+ // Detached Operation
+ var tmp = {
+ [Symbol.toPrimitive]() {
+ assertUnreachable("Parameter should not be processed when " +
+ "array.[[ViewedArrayBuffer]] is neutered.");
+ return 0;
+ }
+ };
+ var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+ %ArrayBufferNeuter(array.buffer);
+ assertThrows(() => array.fill(tmp), TypeError);
}
for (var constructor of intArrayConstructors) {
diff --git a/deps/v8/test/mjsunit/es6/typedarray-find.js b/deps/v8/test/mjsunit/es6/typedarray-find.js
index 69ceedc8b5..6f646e5c80 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-find.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-find.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --allow-natives-syntax
+
var typedArrayConstructors = [
Uint8Array,
Int8Array,
@@ -184,4 +186,17 @@ assertEquals(Array.prototype.find.call(a,
function(elt) { x += elt; return false; }), undefined);
assertEquals(x, 4);
+// Detached Operation
+var tmp = {
+ [Symbol.toPrimitive]() {
+ assertUnreachable("Parameter should not be processed when " +
+ "array.[[ViewedArrayBuffer]] is neutered.");
+ return 0;
+ }
+};
+
+var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+%ArrayBufferNeuter(array.buffer);
+
+assertThrows(() => array.find(tmp), TypeError);
}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-findindex.js b/deps/v8/test/mjsunit/es6/typedarray-findindex.js
index 51c439203d..7447395e77 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-findindex.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-findindex.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --allow-natives-syntax
+
var typedArrayConstructors = [
Uint8Array,
Int8Array,
@@ -184,4 +186,15 @@ assertEquals(Array.prototype.findIndex.call(a,
function(elt) { x += elt; return false; }), -1);
assertEquals(x, 4);
+// Detached Operation
+ var tmp = {
+ [Symbol.toPrimitive]() {
+ assertUnreachable("Parameter should not be processed when " +
+ "array.[[ViewedArrayBuffer]] is neutered.");
+ return 0;
+ }
+ };
+ var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+ %ArrayBufferNeuter(array.buffer);
+ assertThrows(() => array.findIndex(tmp), TypeError);
}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-foreach.js b/deps/v8/test/mjsunit/es6/typedarray-foreach.js
index b9789805f6..7a846b1ac7 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-foreach.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-foreach.js
@@ -148,6 +148,11 @@ function TestTypedArrayForEach(constructor) {
assertEquals(Array.prototype.forEach.call(a,
function(elt) { x += elt; }), undefined);
assertEquals(x, 4);
+
+ // Detached Operation
+ var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+ %ArrayBufferNeuter(array.buffer);
+ assertThrows(() => array.forEach(() => true), TypeError);
}
for (i = 0; i < typedArrayConstructors.length; i++) {
diff --git a/deps/v8/test/mjsunit/es6/typedarray-indexing.js b/deps/v8/test/mjsunit/es6/typedarray-indexing.js
index 1c439f9dda..d12a1570c2 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-indexing.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-indexing.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --allow-natives-syntax
+
var typedArrayConstructors = [
Uint8Array,
Int8Array,
@@ -14,6 +16,14 @@ var typedArrayConstructors = [
Float64Array
];
+var tmp = {
+ [Symbol.toPrimitive]() {
+ assertUnreachable("Parameter should not be processed when " +
+ "array.[[ViewedArrayBuffer]] is neutered.");
+ return 0;
+ }
+};
+
for (var constructor of typedArrayConstructors) {
var array = new constructor([1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3]);
@@ -53,6 +63,11 @@ for (var constructor of typedArrayConstructors) {
}
assertEquals(-1, array.indexOf(NaN));
+ // Detached Operation
+ var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+ %ArrayBufferNeuter(array.buffer);
+ assertThrows(() => array.indexOf(tmp), TypeError);
+
// ----------------------------------------------------------------------
// %TypedArray%.prototype.lastIndexOf.
// ----------------------------------------------------------------------
@@ -89,4 +104,9 @@ for (var constructor of typedArrayConstructors) {
assertEquals(-1, array.lastIndexOf(-Infinity));
}
assertEquals(-1, array.lastIndexOf(NaN));
+
+ // Detached Operation
+ var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+ %ArrayBufferNeuter(array.buffer);
+ assertThrows(() => array.lastIndexOf(tmp), TypeError);
}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-iteration.js b/deps/v8/test/mjsunit/es6/typedarray-iteration.js
index 9560cbc5df..b423ed0f04 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-iteration.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-iteration.js
@@ -4,6 +4,8 @@
// Tests for standard TypedArray array iteration functions.
+// Flags: --allow-natives-syntax
+
var typedArrayConstructors = [
Uint8Array,
Int8Array,
@@ -77,6 +79,11 @@ for (var constructor of typedArrayConstructors) {
assertArrayLikeEquals([2], a.filter(function(elt) {
return elt == 2;
}), constructor);
+
+ // Detached Operation
+ var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+ %ArrayBufferNeuter(array.buffer);
+ assertThrows(() => array.filter(() => false), TypeError);
})();
(function TypedArrayMapTest() {
@@ -130,6 +137,11 @@ for (var constructor of typedArrayConstructors) {
return NaN;
}), constructor);
}
+
+ // Detached Operation
+ var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+ %ArrayBufferNeuter(array.buffer);
+ assertThrows(() => array.map((v) => v), TypeError);
})();
//
@@ -189,6 +201,11 @@ for (var constructor of typedArrayConstructors) {
assertEquals(false, Array.prototype.some.call(a, function(elt) {
return elt == 2;
}));
+
+ // Detached Operation
+ var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+ %ArrayBufferNeuter(array.buffer);
+ assertThrows(() => array.some((v) => false), TypeError);
})();
}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-map.js b/deps/v8/test/mjsunit/es6/typedarray-map.js
new file mode 100644
index 0000000000..54b535fd30
--- /dev/null
+++ b/deps/v8/test/mjsunit/es6/typedarray-map.js
@@ -0,0 +1,49 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var typedArrayConstructors = [
+ Uint8Array,
+ Int8Array,
+ Uint16Array,
+ Int16Array,
+ Uint32Array,
+ Int32Array,
+ Uint8ClampedArray,
+ Float32Array,
+ Float64Array];
+
+function TestTypedArrayMap(constructor) {
+ assertEquals(1, constructor.prototype.map.length);
+
+ var target;
+
+ class EscapingArray extends constructor {
+ constructor(...args) {
+ super(...args);
+ target = this;
+ }
+ }
+
+ class DetachingArray extends constructor {
+ static get [Symbol.species]() {
+ return EscapingArray;
+ }
+ }
+
+ assertThrows(function(){
+ new DetachingArray(5).map(function(v,i,a){
+ print(i);
+ if (i == 1) {
+ %ArrayBufferNeuter(target.buffer);
+ }
+ })
+ }, TypeError);
+
+}
+
+for (i = 0; i < typedArrayConstructors.length; i++) {
+ TestTypedArrayMap(typedArrayConstructors[i]);
+}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-reduce.js b/deps/v8/test/mjsunit/es6/typedarray-reduce.js
index 1fddeca0bc..ba5d7f7a20 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-reduce.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-reduce.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --allow-natives-syntax
+
var typedArrayConstructors = [
Uint8Array,
Int8Array,
@@ -247,4 +249,18 @@ for (var constructor of typedArrayConstructors) {
assertEquals(1, constructor.prototype.reduce.length);
assertEquals(1, constructor.prototype.reduceRight.length);
+
+ // Detached Operation
+ var tmp = {
+ [Symbol.toPrimitive]() {
+ assertUnreachable("Parameter should not be processed when " +
+ "array.[[ViewedArrayBuffer]] is neutered.");
+ return 0;
+ }
+ };
+
+ var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+ %ArrayBufferNeuter(array.buffer);
+ assertThrows(() => array.reduce(sum, tmp), TypeError);
+ assertThrows(() => array.reduceRight(sum, tmp), TypeError);
}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-reverse.js b/deps/v8/test/mjsunit/es6/typedarray-reverse.js
index f32813e155..bfeb227c5c 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-reverse.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-reverse.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --allow-natives-syntax
+
function ArrayMaker(x) {
return x;
}
@@ -51,4 +53,11 @@ for (var constructor of arrayConstructors) {
}
assertEquals(0, a.reverse.length);
+
+ // Detached Operation
+ if (constructor != ArrayMaker) {
+ var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+ %ArrayBufferNeuter(array.buffer);
+ assertThrows(() => array.reverse(), TypeError);
+ }
}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-slice.js b/deps/v8/test/mjsunit/es6/typedarray-slice.js
index cddc5bbdec..4fa3b9f21f 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-slice.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-slice.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --allow-natives-syntax
+
var typedArrayConstructors = [
Uint8Array,
Int8Array,
@@ -67,6 +69,18 @@ for (var constructor of typedArrayConstructors) {
assertEquals(3, slice[1]);
assertTrue(slice instanceof constructor);
+ // Detached Operation
+ var tmp = {
+ [Symbol.toPrimitive]() {
+ assertUnreachable("Parameter should not be processed when " +
+ "array.[[ViewedArrayBuffer]] is neutered.");
+ return 0;
+ }
+ };
+ var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+ %ArrayBufferNeuter(array.buffer);
+ assertThrows(() => array.slice(tmp, tmp), TypeError);
+
// Check that the species array must be a typed array
class MyTypedArray extends constructor {
static get[Symbol.species]() {
diff --git a/deps/v8/test/mjsunit/es6/typedarray-sort.js b/deps/v8/test/mjsunit/es6/typedarray-sort.js
index 9051a775d0..b69009b22d 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-sort.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-sort.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --allow-natives-syntax
+
var typedArrayConstructors = [
Uint8Array,
Int8Array,
@@ -60,4 +62,9 @@ for (var constructor of typedArrayConstructors) {
b[0] = 3; b[1] = 2; b[2] = 1;
a.sort();
assertArrayLikeEquals(a, [1, 2], constructor);
+
+ // Detached Operation
+ var array = new constructor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+ %ArrayBufferNeuter(array.buffer);
+ assertThrows(() => array.sort(), TypeError);
}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-tostring.js b/deps/v8/test/mjsunit/es6/typedarray-tostring.js
index 9d49cb1cc9..a1fa9c7665 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-tostring.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-tostring.js
@@ -5,6 +5,8 @@
// Array's toString should call the object's own join method, if one exists and
// is callable. Otherwise, just use the original Object.toString function.
+// Flags: --allow-natives-syntax
+
var typedArrayConstructors = [
Uint8Array,
Int8Array,
@@ -96,4 +98,11 @@ for (var constructor of typedArrayConstructors) {
Number.prototype.toLocaleString = NumberToLocaleString;
})();
+
+ // Detached Operation
+ var array = new constructor([1, 2, 3]);
+ %ArrayBufferNeuter(array.buffer);
+ assertThrows(() => array.join(), TypeError);
+ assertThrows(() => array.toLocalString(), TypeError);
+ assertThrows(() => array.toString(), TypeError);
}
diff --git a/deps/v8/test/mjsunit/es6/typedarray.js b/deps/v8/test/mjsunit/es6/typedarray.js
index a483e551a9..dcfc9caa30 100644
--- a/deps/v8/test/mjsunit/es6/typedarray.js
+++ b/deps/v8/test/mjsunit/es6/typedarray.js
@@ -496,6 +496,16 @@ function TestTypedArraySet() {
}
}
+ a = new Uint32Array();
+ a.set('');
+ assertEquals(0, a.length);
+
+ assertThrows(() => a.set('abc'), RangeError);
+
+ a = new Uint8Array(3);
+ a.set('123');
+ assertArrayEquals([1, 2, 3], a);
+
var a11 = new Int16Array([1, 2, 3, 4, 0, -1])
var a12 = new Uint16Array(15)
a12.set(a11, 3)
@@ -579,6 +589,21 @@ function TestTypedArraySet() {
assertThrows(function() { a.set(0, 1); }, TypeError);
assertEquals(1, a.set.length);
+
+ // Shared buffer that does not overlap.
+ var buf = new ArrayBuffer(32);
+ var a101 = new Int8Array(buf, 0, 16);
+ var b101 = new Uint8Array(buf, 16);
+ b101[0] = 42;
+ a101.set(b101);
+ assertArrayPrefix([42], a101);
+
+ buf = new ArrayBuffer(32);
+ var a101 = new Int8Array(buf, 0, 16);
+ var b101 = new Uint8Array(buf, 16);
+ a101[0] = 42;
+ b101.set(a101);
+ assertArrayPrefix([42], b101);
}
TestTypedArraySet();
diff --git a/deps/v8/test/mjsunit/es8/constructor-returning-primitive.js b/deps/v8/test/mjsunit/es8/constructor-returning-primitive.js
new file mode 100644
index 0000000000..1c0e7725be
--- /dev/null
+++ b/deps/v8/test/mjsunit/es8/constructor-returning-primitive.js
@@ -0,0 +1,318 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-restrict-constructor-return
+
+assertThrows(
+ () => {
+ new class {
+ constructor() {
+ return 1;
+ }
+ }();
+ },
+ TypeError,
+ "Class constructors may only return object or undefined"
+);
+
+assertThrows(
+ () => {
+ new class {
+ constructor() {
+ return 2147483649;
+ }
+ }();
+ },
+ TypeError,
+ "Class constructors may only return object or undefined"
+);
+
+assertThrows(
+ () => {
+ new class {
+ constructor() {
+ return true;
+ }
+ }();
+ },
+ TypeError,
+ "Class constructors may only return object or undefined"
+);
+
+assertThrows(
+ () => {
+ new class {
+ constructor() {
+ return null;
+ }
+ }();
+ },
+ TypeError,
+ "Class constructors may only return object or undefined"
+);
+
+assertThrows(
+ () => {
+ new class {
+ constructor() {
+ return "wat";
+ }
+ }();
+ },
+ TypeError,
+ "Class constructors may only return object or undefined"
+);
+
+assertThrows(
+ () => {
+ new class {
+ constructor() {
+ return Symbol();
+ }
+ }();
+ },
+ TypeError,
+ "Class constructors may only return object or undefined"
+);
+
+assertThrows(
+ () => {
+ new class {
+ constructor() {
+ return 2.2;
+ }
+ }();
+ },
+ TypeError,
+ "Class constructors may only return object or undefined"
+);
+
+assertThrows(
+ () => {
+ new class extends Object {
+ constructor() {
+ return 1;
+ }
+ }();
+ },
+ TypeError,
+ "Class constructors may only return object or undefined"
+);
+
+assertThrows(
+ () => {
+ new class extends Object {
+ constructor() {
+ return 2147483649;
+ }
+ }();
+ },
+ TypeError,
+ "Class constructors may only return object or undefined"
+);
+
+assertThrows(
+ () => {
+ new class extends Object {
+ constructor() {
+ return true;
+ }
+ }();
+ },
+ TypeError,
+ "Class constructors may only return object or undefined"
+);
+
+assertThrows(
+ () => {
+ new class extends Object {
+ constructor() {
+ return null;
+ }
+ }();
+ },
+ TypeError,
+ "Class constructors may only return object or undefined"
+);
+
+assertThrows(
+ () => {
+ new class extends Object {
+ constructor() {
+ return "wat";
+ }
+ }();
+ },
+ TypeError,
+ "Class constructors may only return object or undefined"
+);
+
+assertThrows(
+ () => {
+ new class extends Object {
+ constructor() {
+ return Symbol();
+ }
+ }();
+ },
+ TypeError,
+ "Class constructors may only return object or undefined"
+);
+
+assertThrows(
+ () => {
+ new class extends Object {
+ constructor() {
+ return 2.2;
+ }
+ }();
+ },
+ TypeError,
+ "Class constructors may only return object or undefined"
+);
+
+assertThrows(
+ () => {
+ new class extends Object {
+ constructor() {}
+ }();
+ },
+ ReferenceError,
+ "Must call super constructor in derived class before accessing " +
+ "'this' or returning from derived constructor"
+);
+
+(function() {
+ let ret_val = { x: 1 };
+ let x = new class {
+ constructor() {
+ return ret_val;
+ }
+ }();
+ assertSame(ret_val, x);
+})();
+
+(function() {
+ class Foo {
+ constructor() {}
+ }
+ let x = new Foo();
+ assertTrue(x instanceof Foo);
+})();
+
+(function() {
+ class Foo {
+ constructor() {
+ return undefined;
+ }
+ }
+ let x = new Foo();
+ assertTrue(x instanceof Foo);
+})();
+
+(function() {
+ let ret_val = { x: 1 };
+ let x = new class extends Object {
+ constructor() {
+ return ret_val;
+ }
+ }();
+ assertSame(ret_val, x);
+})();
+
+(function() {
+ class Foo extends Object {
+ constructor() {
+ super();
+ return undefined;
+ }
+ }
+
+ let x = new Foo();
+ assertTrue(x instanceof Foo);
+})();
+
+(function() {
+ class Foo extends Object {
+ constructor() {
+ super();
+ }
+ }
+
+ let x = new Foo();
+ assertTrue(x instanceof Foo);
+})();
+
+(function() {
+ function foo() {
+ return 1;
+ }
+ let x = new foo();
+ assertTrue(x instanceof foo);
+})();
+
+(function() {
+ function foo() {
+ return 2147483649;
+ }
+ let x = new foo();
+ assertTrue(x instanceof foo);
+})();
+
+(function() {
+ function foo() {
+ return true;
+ }
+ let x = new foo();
+ assertTrue(x instanceof foo);
+})();
+
+(function() {
+ function foo() {
+ return undefined;
+ }
+ let x = new foo();
+ assertTrue(x instanceof foo);
+})();
+
+(function() {
+ function foo() {
+ return null;
+ }
+ let x = new foo();
+ assertTrue(x instanceof foo);
+})();
+
+(function() {
+ function foo() {
+ return "wat";
+ }
+ let x = new foo();
+ assertTrue(x instanceof foo);
+})();
+
+(function() {
+ function foo() {
+ return Symbol();
+ }
+ let x = new foo();
+ assertTrue(x instanceof foo);
+})();
+
+(function() {
+ function foo() {
+ return 2.2;
+ }
+ let x = new foo();
+ assertTrue(x instanceof foo);
+})();
+
+(function() {
+ var ret_val = { x: 1 };
+ function foo() {
+ return ret_val;
+ }
+ let x = new foo();
+ assertSame(x, ret_val);
+})();
diff --git a/deps/v8/test/mjsunit/field-type-tracking.js b/deps/v8/test/mjsunit/field-type-tracking.js
index e6b19b9bbc..2e0cb8bd6f 100644
--- a/deps/v8/test/mjsunit/field-type-tracking.js
+++ b/deps/v8/test/mjsunit/field-type-tracking.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --nostress-opt --track-field-types
-// Flags: --crankshaft --no-always-opt
+// Flags: --opt --no-always-opt
(function() {
var o = { text: "Hello World!" };
diff --git a/deps/v8/test/mjsunit/fixed-context-shapes-when-recompiling.js b/deps/v8/test/mjsunit/fixed-context-shapes-when-recompiling.js
index b78b5562bf..8221665ccb 100644
--- a/deps/v8/test/mjsunit/fixed-context-shapes-when-recompiling.js
+++ b/deps/v8/test/mjsunit/fixed-context-shapes-when-recompiling.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --min-preparse-length=1 --allow-natives-syntax --lazy-inner-functions
+// Flags: --allow-natives-syntax --lazy-inner-functions
// Test that the information on which variables to allocate in context doesn't
// change when recompiling.
diff --git a/deps/v8/test/mjsunit/function-arguments-duplicate.js b/deps/v8/test/mjsunit/function-arguments-duplicate.js
index 80f03a106b..a0ec37ca10 100644
--- a/deps/v8/test/mjsunit/function-arguments-duplicate.js
+++ b/deps/v8/test/mjsunit/function-arguments-duplicate.js
@@ -27,10 +27,14 @@
// Execises ArgumentsAccessStub::GenerateNewNonStrictSlow.
+// Flags: --allow-natives-syntax
+
function f(a, a) {
assertEquals(2, a);
assertEquals(1, arguments[0]);
assertEquals(2, arguments[1]);
+ assertEquals(2, arguments.length);
+ %HeapObjectVerify(arguments);
}
f(1, 2);
diff --git a/deps/v8/test/mjsunit/getters-on-elements.js b/deps/v8/test/mjsunit/getters-on-elements.js
index 85525f8466..c80cdb3f86 100644
--- a/deps/v8/test/mjsunit/getters-on-elements.js
+++ b/deps/v8/test/mjsunit/getters-on-elements.js
@@ -25,17 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --max-opt-count=100 --noalways-opt
-// Flags: --nocollect-maps --crankshaft
-
-// We specify max-opt-count because we opt/deopt the same function many
-// times.
-
-// We specify nocollect-maps because in gcstress we can end up deoptimizing
-// a function in a gc in the stack guard at the beginning of the (optimized)
-// function due to leftover map clearing work that results in deoptimizing
-// dependent code from those maps. The choice is to insert strategic gc()
-// calls or specify this flag.
+// Flags: --allow-natives-syntax --noalways-opt --opt
// It's nice to run this in other browsers too.
var standalone = false;
@@ -88,17 +78,38 @@ function base_getter_test(create_func) {
foo(a);
assertUnoptimized(foo);
+ // Smi and Double elements transition the KeyedLoadIC to Generic state
+ // here, because they miss twice with the same map when loading the hole.
+ // For FAST_HOLEY_ELEMENTS, however, the IC knows how to convert the hole
+ // to undefined if the prototype is the original array prototype, so it
+ // stays monomorphic for now...
foo(a);
foo(a);
delete a[0];
assertEquals(0, calls);
a.__proto__ = ap;
+ // ...and later becomes polymorphic when it sees a second map. Optimized
+ // code will therefore inline the elements access, and deopt right away
+ // when it loads the hole from index [0].
+ // Possible solutions:
+ // - remove the convert_hole_to_undefined flag from the IC, to force it
+ // into generic state for all elements kinds. Cost: slower ICs in code
+ // that doesn't get optimized.
+ // - teach Turbofan about the same trick: for holey elements with the
+ // original array prototype, convert hole to undefined inline. Cost:
+ // larger optimized code size, because the loads for different maps with
+ // the same elements kind can no longer be consolidated if they handle
+ // the hole differently.
+ // - call "foo" twice after setting a.__proto__ and before optimizing it;
+ // this is the simplest fix so let's do that for now.
foo(a);
assertEquals(1, calls);
- optimize(foo);
foo(a);
assertEquals(2, calls);
+ optimize(foo);
+ foo(a);
+ assertEquals(3, calls);
assertOptimized(foo);
// Testcase: getter "deep" in prototype chain.
diff --git a/deps/v8/test/mjsunit/global-accessors.js b/deps/v8/test/mjsunit/global-accessors.js
index 47f4328b0e..00658f43a5 100644
--- a/deps/v8/test/mjsunit/global-accessors.js
+++ b/deps/v8/test/mjsunit/global-accessors.js
@@ -26,6 +26,8 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Test accessors on the global object.
+//
+// Flags: --no-harmony-strict-legacy-accessor-builtins
var x_ = 0;
diff --git a/deps/v8/test/mjsunit/harmony/async-generators-basic.js b/deps/v8/test/mjsunit/harmony/async-generators-basic.js
index 6d41aada60..29441b119b 100644
--- a/deps/v8/test/mjsunit/harmony/async-generators-basic.js
+++ b/deps/v8/test/mjsunit/harmony/async-generators-basic.js
@@ -1658,3 +1658,73 @@ assertEqualsAsync({ value: undefined, done: true }, () => it.next("x"));
assertEqualsAsync({ value: "nores", done: true },
() => it.return("nores"));
assertThrowsAsync(() => it.throw(new MyError("nores")), MyError, "nores");
+
+// ----------------------------------------------------------------------------
+// Simple yield*:
+
+log = [];
+async function* asyncGeneratorYieldStar1() {
+ yield* {
+ get [Symbol.asyncIterator]() {
+ log.push({ name: "get @@asyncIterator" });
+ return (...args) => {
+ log.push({ name: "call @@asyncIterator", args });
+ return this;
+ };
+ },
+ get [Symbol.iterator]() {
+ log.push({ name: "get @@iterator" });
+ return (...args) => {
+ log.push({ name: "call @@iterator", args });
+ return this;
+ }
+ },
+ get next() {
+ log.push({ name: "get next" });
+ return (...args) => {
+ log.push({ name: "call next", args });
+ return {
+ get then() {
+ log.push({ name: "get then" });
+ return null;
+ },
+ get value() {
+ log.push({ name: "get value" });
+ throw (exception = new MyError("AbruptValue!"));
+ },
+ get done() {
+ log.push({ name: "get done" });
+ return false;
+ }
+ };
+ }
+ },
+ get return() {
+ log.push({ name: "get return" });
+ return (...args) => {
+ log.push({ name: "call return", args });
+ return { value: args[0], done: true };
+ }
+ },
+ get throw() {
+ log.push({ name: "get throw" });
+ return (...args) => {
+ log.push({ name: "call throw", args });
+ throw args[0];
+ };
+ },
+ };
+}
+
+it = asyncGeneratorYieldStar1();
+assertThrowsAsync(() => it.next(), MyError);
+assertEquals([
+ { name: "get @@asyncIterator" },
+ { name: "call @@asyncIterator", args: [] },
+ { name: "get next" },
+ { name: "call next", args: [undefined] },
+ { name: "get then" },
+ { name: "get done" },
+ { name: "get value" },
+], log);
+assertEqualsAsync({ value: undefined, done: true }, () => it.next());
diff --git a/deps/v8/test/mjsunit/harmony/atomics.js b/deps/v8/test/mjsunit/harmony/atomics.js
index 840d00e78b..ef90076103 100644
--- a/deps/v8/test/mjsunit/harmony/atomics.js
+++ b/deps/v8/test/mjsunit/harmony/atomics.js
@@ -62,9 +62,9 @@ var IntegerTypedArrayConstructors = [
var si32a = new Int32Array(sab);
var si32a2 = new Int32Array(sab, 4);
- // Non-integer indexes should throw RangeError.
- var nonInteger = [1.4, '1.4', NaN, -Infinity, Infinity, undefined, 'hi', {}];
- nonInteger.forEach(function(i) {
+ // Indexes that are out of bounds when coerced via ToIndex should throw
+ // RangeError.
+ [-Infinity, Infinity].forEach(function(i) {
assertThrows(function() { Atomics.compareExchange(si32a, i, 0); },
RangeError);
assertThrows(function() { Atomics.load(si32a, i, 0); }, RangeError);
@@ -140,7 +140,8 @@ var IntegerTypedArrayConstructors = [
};
// These values all map to index 0
- [-0, 0, 0.0, null, false].forEach(function(i) {
+ [-0, 0, 0.0, null, false, NaN, {}, '0.2', 'hi', undefined].forEach(
+ function(i) {
var name = String(i);
[si32a, si32a2].forEach(function(array) {
testOp(Atomics.compareExchange, array, i, 0, name);
@@ -564,3 +565,33 @@ function clearArray(sab) {
});
})();
+
+(function TestValidateIndexBeforeValue() {
+ var testOp = function(op, sta, name) {
+ var valueof_has_been_called = 0;
+ var value = {valueOf: function() { valueof_has_been_called = 1; return 0;}};
+ var index = -1;
+
+ // The index should be checked before calling ToInteger on the value, so
+ // valueof_has_been_called should not be modified.
+ sta[0] = 0;
+ assertThrows(function() { op(sta, index, value, value); }, RangeError);
+ assertEquals(0, valueof_has_been_called);
+ };
+
+ IntegerTypedArrayConstructors.forEach(function(t) {
+ var sab = new SharedArrayBuffer(10 * t.constr.BYTES_PER_ELEMENT);
+ var sta = new t.constr(sab);
+ var name = Object.prototype.toString.call(sta);
+
+ testOp(Atomics.compareExchange, sta, name);
+ testOp(Atomics.load, sta, name);
+ testOp(Atomics.store, sta, name);
+ testOp(Atomics.add, sta, name);
+ testOp(Atomics.sub, sta, name);
+ testOp(Atomics.and, sta, name);
+ testOp(Atomics.or, sta, name);
+ testOp(Atomics.xor, sta, name);
+ testOp(Atomics.exchange, sta, name);
+ });
+})();
diff --git a/deps/v8/test/mjsunit/harmony/do-expressions.js b/deps/v8/test/mjsunit/harmony/do-expressions.js
index 5adf1545a5..ea0ed2b04e 100644
--- a/deps/v8/test/mjsunit/harmony/do-expressions.js
+++ b/deps/v8/test/mjsunit/harmony/do-expressions.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --harmony-do-expressions --allow-natives-syntax --no-always-opt
-// Flags: --crankshaft
+// Flags: --opt
function returnValue(v) { return v; }
function MyError() {}
diff --git a/deps/v8/test/mjsunit/harmony/global-accessors-strict.js b/deps/v8/test/mjsunit/harmony/global-accessors-strict.js
new file mode 100644
index 0000000000..15a581e795
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/global-accessors-strict.js
@@ -0,0 +1,54 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test accessors on the global object.
+//
+// Flags: --harmony-strict-legacy-accessor-builtins
+
+var x_ = 0;
+
+this.__defineSetter__('x', function(x) { x_ = x; });
+this.__defineGetter__('x', function() { return x_; });
+
+this.__defineSetter__('y', function(x) { });
+this.__defineGetter__('y', function() { return 7; });
+
+function f(a) {
+ x = x + a;
+ return x;
+}
+
+function g(a) {
+ y = y + a;
+ return y;
+}
+
+assertEquals(1, f(1));
+assertEquals(3, f(2));
+
+assertEquals(7, g(1));
+assertEquals(7, g(2));
diff --git a/deps/v8/test/mjsunit/harmony/regexp-dotall.js b/deps/v8/test/mjsunit/harmony/regexp-dotall.js
index 9bf78431a9..eed5d26f05 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-dotall.js
+++ b/deps/v8/test/mjsunit/harmony/regexp-dotall.js
@@ -56,6 +56,12 @@ function toSlowMode(re) {
assertFalse(re.dotAll);
}
+// Different construction variants with all flags.
+{
+ assertEquals("gimsuy", new RegExp("", "yusmig").flags);
+ assertEquals("gimsuy", new RegExp().compile("", "yusmig").flags);
+}
+
// Default '.' behavior.
{
let re = /^.$/;
diff --git a/deps/v8/test/mjsunit/harmony/regexp-named-captures.js b/deps/v8/test/mjsunit/harmony/regexp-named-captures.js
index 42dbe0f74f..be90427cfa 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-named-captures.js
+++ b/deps/v8/test/mjsunit/harmony/regexp-named-captures.js
@@ -147,7 +147,7 @@ assertThrows('/(?<𐒤>a)/u', SyntaxError); // ID_Continue but not ID_Start.
assertEquals("a", /(?<π>a)/.exec("bab").groups.π);
assertEquals("a", /(?<$>a)/.exec("bab").groups.$);
assertEquals("a", /(?<_>a)/.exec("bab").groups._);
-assertEquals("a", /(?<$𐒤>a)/.exec("bab").groups.$𐒤);
+assertThrows("/(?<$𐒤>a)/", SyntaxError);
assertEquals("a", /(?<ಠ_ಠ>a)/.exec("bab").groups.ಠ_ಠ);
assertThrows('/(?<❤>a)/', SyntaxError);
assertThrows('/(?<𐒤>a)/', SyntaxError); // ID_Continue but not ID_Start.
@@ -195,10 +195,15 @@ assertTrue(/(?<\u0041>.)/u.test("a")); // \u NonSurrogate
assertTrue(/(?<\u{0041}>.)/u.test("a")); // \u{ Non-surrogate }
assertTrue(/(?<a\u{104A4}>.)/u.test("a")); // \u{ Surrogate, ID_Continue }
assertThrows("/(?<a\\u{110000}>.)/u", SyntaxError); // \u{ Out-of-bounds }
+assertThrows("/(?<a\\uD801>.)/u", SyntaxError); // Lead
+assertThrows("/(?<a\\uDCA4>.)/u", SyntaxError); // Trail
assertThrows("/(?<a\uD801>.)/u", SyntaxError); // Lead
assertThrows("/(?<a\uDCA4>.)/u", SyntaxError); // Trail
+assertTrue(RegExp("(?<\\u{0041}>.)", "u").test("a")); // Non-surrogate
+assertTrue(RegExp("(?<a\\u{104A4}>.)", "u").test("a")); // Surrogate,ID_Continue
assertTrue(RegExp("(?<\u{0041}>.)", "u").test("a")); // Non-surrogate
assertTrue(RegExp("(?<a\u{104A4}>.)", "u").test("a")); // Surrogate,ID_Continue
+assertTrue(RegExp("(?<\\u0041>.)", "u").test("a")); // Non-surrogate
assertThrows("/(?<a\\uD801\uDCA4>.)/", SyntaxError);
assertThrows("/(?<a\\uD801>.)/", SyntaxError);
@@ -207,10 +212,15 @@ assertTrue(/(?<\u0041>.)/.test("a"));
assertThrows("/(?<\\u{0041}>.)/", SyntaxError);
assertThrows("/(?<a\\u{104A4}>.)/", SyntaxError);
assertThrows("/(?<a\\u{10FFFF}>.)/", SyntaxError);
+assertThrows("/(?<a\\uD801>.)/", SyntaxError); // Lead
+assertThrows("/(?<a\\uDCA4>.)/", SyntaxError); // Trail;
assertThrows("/(?<a\uD801>.)/", SyntaxError); // Lead
assertThrows("/(?<a\uDCA4>.)/", SyntaxError); // Trail
+assertThrows("/(?<\\u{0041}>.)/", SyntaxError); // Non-surrogate
+assertThrows("/(?<a\\u{104A4}>.)/", SyntaxError); // Surrogate, ID_Continue
assertTrue(RegExp("(?<\u{0041}>.)").test("a")); // Non-surrogate
-assertTrue(RegExp("(?<a\u{104A4}>.)").test("a")); // Surrogate, ID_Continue
+assertThrows("(?<a\u{104A4}>.)", SyntaxError); // Surrogate, ID_Continue
+assertTrue(RegExp("(?<\\u0041>.)").test("a")); // Non-surrogate
// @@replace with a callable replacement argument (no named captures).
{
diff --git a/deps/v8/test/mjsunit/harmony/regexp-property-binary.js b/deps/v8/test/mjsunit/harmony/regexp-property-binary.js
index e1daf08568..8ab3f19329 100644
--- a/deps/v8/test/mjsunit/harmony/regexp-property-binary.js
+++ b/deps/v8/test/mjsunit/harmony/regexp-property-binary.js
@@ -13,6 +13,19 @@ assertThrows("/\\p{Bidi_C=False}/u");
assertThrows("/\\P{Bidi_Control=Y}/u");
assertThrows("/\\p{AHex=Yes}/u");
+assertThrows("/\\p{Composition_Exclusion}/u");
+assertThrows("/\\p{CE}/u");
+assertThrows("/\\p{Full_Composition_Exclusion}/u");
+assertThrows("/\\p{Comp_Ex}/u");
+assertThrows("/\\p{Grapheme_Link}/u");
+assertThrows("/\\p{Gr_Link}/u");
+assertThrows("/\\p{Hyphen}/u");
+assertThrows("/\\p{NFD_Inert}/u");
+assertThrows("/\\p{NFDK_Inert}/u");
+assertThrows("/\\p{NFC_Inert}/u");
+assertThrows("/\\p{NFKC_Inert}/u");
+assertThrows("/\\p{Segment_Starter}/u");
+
t(/\p{Alphabetic}/u, "æ");
f(/\p{Alpha}/u, "1");
@@ -43,9 +56,6 @@ f(/\p{CWT}/u, "1");
t(/\p{Changes_When_Uppercased}/u, "b");
f(/\p{CWU}/u, "1");
-//t(/\p{Composition_Exclusion}/u, "\u0958");
-//f(/\p{CE}/u, "1");
-
t(/\p{Dash}/u, "-");
f(/\p{Dash}/u, "1");
@@ -76,9 +86,6 @@ f(/\p{Emoji_Presentation}/u, "x");
t(/\p{Extender}/u, "\u3005");
f(/\p{Ext}/u, "x");
-t(/\p{Full_Composition_Exclusion}/u, "\uFB1F");
-f(/\p{Comp_Ex}/u, "x");
-
t(/\p{Grapheme_Base}/u, " ");
f(/\p{Gr_Base}/u, "\u0010");
@@ -124,9 +131,6 @@ f(/\p{NChar}/u, "A");
t(/\p{Pattern_White_Space}/u, "\u0009");
f(/\p{Pat_Syn}/u, "A");
-// t(/\p{Prepended_Concatenation_Mark}/u, "\uFDD0");
-// f(/\p{PCM}/u, "A");
-
t(/\p{Quotation_Mark}/u, "'");
f(/\p{QMark}/u, "A");
diff --git a/deps/v8/test/mjsunit/indexed-accessors.js b/deps/v8/test/mjsunit/indexed-accessors.js
index b69695a99f..534a6c95c3 100644
--- a/deps/v8/test/mjsunit/indexed-accessors.js
+++ b/deps/v8/test/mjsunit/indexed-accessors.js
@@ -101,7 +101,7 @@ assertEquals(q1.b, 17);
// Reported by nth10sd.
a = function() {};
-__defineSetter__("0", function() {});
+this.__defineSetter__("0", function() {});
if (a |= '') {};
assertThrows('this[a].__parent__');
assertEquals(a, 0);
diff --git a/deps/v8/test/mjsunit/keyed-load-hole-to-undefined.js b/deps/v8/test/mjsunit/keyed-load-hole-to-undefined.js
index 9366458a5f..47dc65b0a0 100644
--- a/deps/v8/test/mjsunit/keyed-load-hole-to-undefined.js
+++ b/deps/v8/test/mjsunit/keyed-load-hole-to-undefined.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax
-// Flags: --no-stress-opt --crankshaft --no-always-opt
+// Flags: --no-stress-opt --opt --no-always-opt
// --nostress-opt is specified because the test corrupts the "pristine"
// array prototype chain by storing an element, and this is tracked
diff --git a/deps/v8/test/mjsunit/keyed-load-with-string-key.js b/deps/v8/test/mjsunit/keyed-load-with-string-key.js
index 2d5f0200d0..ee055e4790 100644
--- a/deps/v8/test/mjsunit/keyed-load-with-string-key.js
+++ b/deps/v8/test/mjsunit/keyed-load-with-string-key.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
var o = {
diff --git a/deps/v8/test/mjsunit/keyed-load-with-symbol-key.js b/deps/v8/test/mjsunit/keyed-load-with-symbol-key.js
index 2c818a84ae..d0be0a0545 100644
--- a/deps/v8/test/mjsunit/keyed-load-with-symbol-key.js
+++ b/deps/v8/test/mjsunit/keyed-load-with-symbol-key.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft --no-always-opt
+// Flags: --allow-natives-syntax --opt --no-always-opt
var s = Symbol("foo");
diff --git a/deps/v8/test/mjsunit/keyed-store-generic.js b/deps/v8/test/mjsunit/keyed-store-generic.js
new file mode 100644
index 0000000000..c2c48dd96d
--- /dev/null
+++ b/deps/v8/test/mjsunit/keyed-store-generic.js
@@ -0,0 +1,22 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-stress-opt
+
+function f(a, i, v) { a[i] = v; }
+f("make it generic", 0, 0);
+
+(function TestIsConcatSpreadableProtector() {
+ var o = {length: 1, '0': 99};
+ %OptimizeObjectForAddingMultipleProperties(o, 0);
+ f(o, Symbol.isConcatSpreadable, true);
+ assertEquals([99], [].concat(o));
+})();
+
+(function TestSpeciesProtector() {
+ function MyArray() {}
+ assertTrue(%SpeciesProtector());
+ f(Array.prototype, "constructor", MyArray);
+ assertFalse(%SpeciesProtector());
+})();
diff --git a/deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js b/deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js
index 9a24231ae6..7418a4e257 100644
--- a/deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js
+++ b/deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --nouse-inlining --crankshaft
+// Flags: --allow-natives-syntax --nouse-inlining --opt
// Test for negative zero that doesn't need bail out
diff --git a/deps/v8/test/mjsunit/math-imul.js b/deps/v8/test/mjsunit/math-imul.js
index c24f6a3970..e05c000c64 100644
--- a/deps/v8/test/mjsunit/math-imul.js
+++ b/deps/v8/test/mjsunit/math-imul.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --max-opt-count=1000
+// Flags: --allow-natives-syntax
var imul_func = Math.imul;
function imul_polyfill(a, b) {
diff --git a/deps/v8/test/mjsunit/messages.js b/deps/v8/test/mjsunit/messages.js
index 9c25004861..ff91185a72 100644
--- a/deps/v8/test/mjsunit/messages.js
+++ b/deps/v8/test/mjsunit/messages.js
@@ -192,33 +192,33 @@ test(function() {
}, "this is not a Date object.", TypeError);
// kNotGeneric
-test(function() {
- String.prototype.toString.call(1);
-}, "String.prototype.toString is not generic", TypeError);
+test(() => String.prototype.toString.call(1),
+ "String.prototype.toString requires that 'this' be a String",
+ TypeError);
-test(function() {
- String.prototype.valueOf.call(1);
-}, "String.prototype.valueOf is not generic", TypeError);
+test(() => String.prototype.valueOf.call(1),
+ "String.prototype.valueOf requires that 'this' be a String",
+ TypeError);
-test(function() {
- Boolean.prototype.toString.call(1);
-}, "Boolean.prototype.toString is not generic", TypeError);
+test(() => Boolean.prototype.toString.call(1),
+ "Boolean.prototype.toString requires that 'this' be a Boolean",
+ TypeError);
-test(function() {
- Boolean.prototype.valueOf.call(1);
-}, "Boolean.prototype.valueOf is not generic", TypeError);
+test(() => Boolean.prototype.valueOf.call(1),
+ "Boolean.prototype.valueOf requires that 'this' be a Boolean",
+ TypeError);
-test(function() {
- Number.prototype.toString.call({});
-}, "Number.prototype.toString is not generic", TypeError);
+test(() => Number.prototype.toString.call({}),
+ "Number.prototype.toString requires that 'this' be a Number",
+ TypeError);
-test(function() {
- Number.prototype.valueOf.call({});
-}, "Number.prototype.valueOf is not generic", TypeError);
+test(() => Number.prototype.valueOf.call({}),
+ "Number.prototype.valueOf requires that 'this' be a Number",
+ TypeError);
-test(function() {
- Function.prototype.toString.call(1);
-}, "Function.prototype.toString is not generic", TypeError);
+test(() => Function.prototype.toString.call(1),
+ "Function.prototype.toString requires that 'this' be a Function",
+ TypeError);
// kNotTypedArray
test(function() {
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index bef1b0b59f..754dcbb3de 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -123,6 +123,9 @@ var assertMatches;
// Assert the result of a promise.
var assertPromiseResult;
+var promiseTestChain;
+var promiseTestCount = 0;
+
// These bits must be in sync with bits defined in Runtime_GetOptimizationStatus
var V8OptimizationStatus = {
kIsFunction: 1 << 0,
@@ -134,7 +137,7 @@ var V8OptimizationStatus = {
kInterpreted: 1 << 6
};
-// Returns true if --no-crankshaft mode is on.
+// Returns true if --no-opt mode is on.
var isNeverOptimize;
// Returns true if --always-opt mode is on.
@@ -499,21 +502,35 @@ var failWithMessage;
// We have to patch mjsunit because normal assertion failures just throw
// exceptions which are swallowed in a then clause.
// We use eval here to avoid parsing issues with the natives syntax.
+ if (!success) success = () => {};
+
failWithMessage = (msg) => eval("%AbortJS(msg)");
- if (!fail)
+ if (!fail) {
fail = result => failWithMessage("assertPromiseResult failed: " + result);
+ }
- eval("%IncrementWaitCount()");
- promise.then(
- result => {
- eval("%DecrementWaitCount()");
- success(result);
- },
- result => {
- eval("%DecrementWaitCount()");
- fail(result);
- }
- );
+ var test_promise =
+ promise.then(
+ result => {
+ try {
+ success(result);
+ } catch (e) {
+ failWithMessage(e);
+ }
+ },
+ result => {
+ fail(result);
+ }
+ )
+ .then((x)=> {
+ if (--promiseTestCount == 0) testRunner.notifyDone();
+ });
+
+ if (!promiseTestChain) promiseTestChain = Promise.resolve();
+ // waitUntilDone is idempotent.
+ testRunner.waitUntilDone();
+ ++promiseTestCount;
+ return promiseTestChain.then(test_promise);
};
var OptimizationStatusImpl = undefined;
@@ -550,10 +567,10 @@ var failWithMessage;
assertOptimized = function assertOptimized(fun, sync_opt, name_opt) {
if (sync_opt === undefined) sync_opt = "";
var opt_status = OptimizationStatus(fun, sync_opt);
- // Tests that use assertOptimized() do not make sense if --no-crankshaft
- // option is provided. Such tests must add --crankshaft to flags comment.
+ // Tests that use assertOptimized() do not make sense if --no-opt
+ // option is provided. Such tests must add --opt to flags comment.
assertFalse((opt_status & V8OptimizationStatus.kNeverOptimize) !== 0,
- "test does not make sense with --no-crankshaft");
+ "test does not make sense with --no-opt");
assertTrue((opt_status & V8OptimizationStatus.kIsFunction) !== 0, name_opt);
if ((opt_status & V8OptimizationStatus.kMaybeDeopted) !== 0) {
// When --deopt-every-n-times flag is specified it's no longer guaranteed
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index c8c3c4e927..bb1630aad6 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -65,6 +65,7 @@
# Too slow in debug mode for validation of elements.
'regress/regress-430201': [PASS, ['mode == debug', SKIP]],
'regress/regress-430201b': [PASS, ['mode == debug', SKIP]],
+ 'regress/regress-716044': [PASS, ['mode == debug', SKIP]],
##############################################################################
# Too slow in debug mode for GC stress mode.
@@ -130,9 +131,6 @@
'compiler/alloc-number-debug': [PASS, ['mode == release', SKIP]],
'regress/regress-634-debug': [PASS, ['mode == release', SKIP]],
- # BUG(336820). TODO(bmeurer): Investigate.
- 'regress/regress-336820': [PASS, FAIL],
-
# BUG(v8:2989). PASS/FAIL on linux32 because crankshaft is turned off for
# nosse2. Also for arm novfp3.
'regress/regress-2989': [FAIL, NO_VARIANTS, ['system == linux and arch == x87 or arch == arm and simulator == True', PASS]],
@@ -148,7 +146,7 @@
# Slow tests.
'copy-on-write-assert': [PASS, SLOW],
'es6/tail-call-megatest*': [PASS, SLOW, FAST_VARIANTS, ['tsan', SKIP]],
- 'es6/typedarray-construct-offset-not-smi': [PASS, SLOW, NO_VARIANTS],
+ 'es6/typedarray-construct-offset-not-smi': [PASS, SLOW],
'harmony/regexp-property-script-extensions': [PASS, SLOW],
'numops-fuzz-part*': [PASS, ['mode == debug', SLOW]],
'readonly': [PASS, SLOW],
@@ -186,15 +184,8 @@
# which makes the test useless.
'big-object-literal': [PASS, ['mode == debug', SKIP]],
- # BUG(v8:5778): These fail with --future, which we are about to turn on.
- # Investigate.
- 'regress/regress-105': [SKIP],
-
- # BUG(v8:6101): This fails because of a hole deopt, need to investigate.
- 'getters-on-elements': [SKIP],
-
- # BUG(v8:6113).
- 'es6/array-iterator-turbo': [SKIP],
+ # BUG(v8:6306).
+ 'wasm/huge-memory': [SKIP],
}], # ALWAYS
['novfp3 == True', {
@@ -342,7 +333,7 @@
'whitespaces': [PASS, TIMEOUT, SLOW],
}], # 'arch == arm64'
-['arch == arm64 and mode == debug and simulator_run == True', {
+['arch == arm64 and mode == debug and simulator_run', {
# Pass but take too long with the simulator in debug mode.
'array-sort': [PASS, TIMEOUT],
@@ -354,7 +345,7 @@
'unicodelctest-no-optimization': [PASS, TIMEOUT],
# Issue 3219:
'getters-on-elements': [PASS, ['gc_stress == True', FAIL]],
-}], # 'arch == arm64 and mode == debug and simulator_run == True'
+}], # 'arch == arm64 and mode == debug and simulator_run'
##############################################################################
['asan == True', {
@@ -369,6 +360,10 @@
# Exception thrown during bootstrapping on ASAN builds, see issue 4236.
'regress/regress-1132': [SKIP],
+
+ # Flaky on ASAN builds: https://bugs.chromium.org/p/v8/issues/detail?id=6305
+ 'regress/regress-430201': [SKIP],
+ 'regress/regress-430201b': [SKIP],
}], # 'asan == True'
##############################################################################
@@ -387,7 +382,7 @@
# Flaky with baseline?
'regress/regress-2185-2': [SKIP],
- # Slow tests with --nocrankshaft.
+ # Slow tests with --noopt.
'compiler/osr-one': [PASS, SLOW],
'compiler/osr-two': [PASS, SLOW],
'wasm/grow-memory': [PASS, SLOW],
@@ -551,6 +546,7 @@
# Setting the timezone and locale with environment variables unavailable
'icu-date-to-string': [SKIP],
'icu-date-lord-howe': [SKIP],
+ 'regress/regress-6288': [SKIP],
}], # 'system == windows'
##############################################################################
@@ -601,11 +597,11 @@
}], # 'predictable == True'
##############################################################################
-['arch == ppc and simulator_run == True or arch == ppc64 and simulator_run == True', {
+['arch == ppc and simulator_run or arch == ppc64 and simulator_run', {
# take too long with the simulator.
'regress/regress-1132': [SKIP],
-}], # 'arch == ppc and simulator_run == True'
+}], # 'arch == ppc and simulator_run'
##############################################################################
['arch == ppc64', {
@@ -646,6 +642,13 @@
}], # variant == nooptimization
##############################################################################
+['variant == noturbofan and no_snap', {
+ # Too slow for old pipeline and nosnap.
+ 'regress/regress-2249': [SKIP],
+ 'harmony/futex': [SKIP],
+}], # variant == noturbofan and no_snap
+
+##############################################################################
['variant == turbofan_opt', {
'es6/array-iterator-turbo': [SKIP],
@@ -669,11 +672,6 @@
}], # 'gcov_coverage'
##############################################################################
-['variant == asm_wasm', {
- # Issue 6127: Currently {StashCode} breaks the source position table.
- 'wasm/asm-wasm-expr': [SKIP],
-}], # variant == asm_wasm
-
['variant == wasm_traps', {
# Skip stuff uninteresting for wasm traps
'bugs/*': [SKIP],
@@ -692,13 +690,4 @@
'whitespaces': [SKIP],
}], # variant == wasm_traps
-##############################################################################
-# This test allocates a 2G block of memory and if there are multiple
-# varients this leads kills by the OOM killer, crashes or messages
-# indicating the OS cannot allocate memory, exclude for Node.js runs
-# re-evalute when we move up to v8 5.1
-[ALWAYS, {
-'regress/regress-crbug-514081': [PASS, NO_VARIANTS],
-}], # ALWAYS
-
]
diff --git a/deps/v8/test/mjsunit/modules-turbo1.js b/deps/v8/test/mjsunit/modules-turbo1.js
index ce688e1dad..c8877d1f06 100644
--- a/deps/v8/test/mjsunit/modules-turbo1.js
+++ b/deps/v8/test/mjsunit/modules-turbo1.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// MODULE
-// Flags: --allow-natives-syntax --turbo --crankshaft --turbo-filter=*
+// Flags: --allow-natives-syntax --turbo --opt --turbo-filter=*
export let x = 0;
function foo() { x++ };
diff --git a/deps/v8/test/mjsunit/never-optimize.js b/deps/v8/test/mjsunit/never-optimize.js
index 2949f06268..5efaa47de3 100644
--- a/deps/v8/test/mjsunit/never-optimize.js
+++ b/deps/v8/test/mjsunit/never-optimize.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft --no-always-opt
+// Flags: --allow-natives-syntax --opt --no-always-opt
function o1() {
}
diff --git a/deps/v8/test/mjsunit/object-keys.js b/deps/v8/test/mjsunit/object-keys.js
new file mode 100644
index 0000000000..29eb85d6aa
--- /dev/null
+++ b/deps/v8/test/mjsunit/object-keys.js
@@ -0,0 +1,34 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Ensure that mutation of the Object.keys result doesn't affect the
+// enumeration cache for fast-mode objects.
+(function() {
+ const a = {x:1, y:2};
+ let k = Object.keys(a);
+ %HeapObjectVerify(k);
+ assertEquals(2, k.length);
+ assertEquals("x", k[0]);
+ assertEquals("y", k[1]);
+ k[0] = "y";
+ k[1] = "x";
+ k = Object.keys(a);
+ assertEquals(2, k.length);
+ assertEquals("x", k[0]);
+ assertEquals("y", k[1]);
+})();
+
+// Ensure that the copy-on-write keys are handled properly, even in
+// the presence of Symbols.
+(function() {
+ const s = Symbol();
+ const a = {[s]: 1};
+ let k = Object.keys(a);
+ %HeapObjectVerify(k);
+ assertEquals(0, k.length);
+ k.shift();
+ assertEquals(0, k.length);
+})();
diff --git a/deps/v8/test/mjsunit/object-literal.js b/deps/v8/test/mjsunit/object-literal.js
index 8fdf68d42e..5717837e34 100644
--- a/deps/v8/test/mjsunit/object-literal.js
+++ b/deps/v8/test/mjsunit/object-literal.js
@@ -25,55 +25,81 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-var obj = {
- a: 7,
- b: { x: 12, y: 24 },
- c: 'Zebra'
-}
-
-assertEquals(7, obj.a);
-assertEquals(12, obj.b.x);
-assertEquals(24, obj.b.y);
-assertEquals('Zebra', obj.c);
+// Flags: --allow-natives-syntax
-var z = 24;
+function testBasicPrototype() {
+ var obj = {
+ a: 7,
+ b: { x: 12, y: 24 },
+ c: 'Zebra'
+ }
-var obj2 = {
- a: 7,
- b: { x: 12, y: z },
- c: 'Zebra'
-}
+ assertEquals(7, obj.a);
+ assertEquals(12, obj.b.x);
+ assertEquals(24, obj.b.y);
+ assertEquals('Zebra', obj.c);
+ assertEquals(Object.getPrototypeOf(obj), Object.prototype);
+ assertEquals(Object.getPrototypeOf(obj.b), Object.prototype);
+};
+testBasicPrototype();
+testBasicPrototype();
-assertEquals(7, obj2.a);
-assertEquals(12, obj2.b.x);
-assertEquals(24, obj2.b.y);
-assertEquals('Zebra', obj2.c);
+function testDynamicValue() {
+ var z = 24;
-var arr = [];
-for (var i = 0; i < 2; i++) {
- arr[i] = {
+ var obj2 = {
a: 7,
- b: { x: 12, y: 24 },
+ b: { x: 12, y: z },
c: 'Zebra'
}
+
+ assertEquals(7, obj2.a);
+ assertEquals(12, obj2.b.x);
+ assertEquals(24, obj2.b.y);
+ assertEquals('Zebra', obj2.c);
}
+testDynamicValue();
+testDynamicValue();
+
+(function testMultipleInstatiations() {
+ var arr = [];
+ for (var i = 0; i < 2; i++) {
+ arr[i] = {
+ a: 7,
+ b: { x: 12, y: 24 },
+ c: 'Zebra'
+ }
+ }
-arr[0].b.x = 2;
-assertEquals(2, arr[0].b.x);
-assertEquals(12, arr[1].b.x);
+ arr[0].b.x = 2;
+ assertEquals(2, arr[0].b.x);
+ assertEquals(12, arr[1].b.x);
+})();
+function testSparseElements() {
+ let sa1 = {
+ '0': { x: 12, y: 24 },
+ '1000000': { x: 1, y: 2 }
+ };
-function makeSparseArray() {
- return {
- '0': { x: 12, y: 24 },
- '1000000': { x: 0, y: 0 }
- };
+ assertEquals(['0', '1000000'], Object.keys(sa1));
+ assertEquals(12, sa1[0].x);
+ assertEquals(24, sa1[0].y);
+ assertEquals(['x', 'y'], Object.keys(sa1[0]));
+ assertEquals(1, sa1[1000000].x);
+ assertEquals(2, sa1[1000000].y);
+ assertEquals(['x', 'y'], Object.keys(sa1[1000000]));
+ assertEquals(Object.prototype, Object.getPrototypeOf(sa1));
+ assertEquals(Object.prototype, Object.getPrototypeOf(sa1[0]));
+ assertEquals(Object.prototype, Object.getPrototypeOf(sa1[1000000]));
+ return sa1;
}
-var sa1 = makeSparseArray();
-sa1[0].x = 0;
-var sa2 = makeSparseArray();
-assertEquals(12, sa2[0].x);
+let object = testSparseElements();
+// modify the object and rerun the test, ensuring the literal didn't change.
+object[1] = "a";
+object[0].x = -12;
+testSparseElements();
// Test that non-constant literals work.
var n = new Object();
@@ -81,12 +107,19 @@ var n = new Object();
function makeNonConstantArray() { return [ [ n ] ]; }
var a = makeNonConstantArray();
+var b = makeNonConstantArray();
+assertTrue(a[0][0] === n);
+assertTrue(b[0][0] === n);
+assertFalse(a[0] === b[0]);
a[0][0].foo = "bar";
assertEquals("bar", n.foo);
function makeNonConstantObject() { return { a: { b: n } }; }
a = makeNonConstantObject();
+b = makeNonConstantObject();
+assertFalse(a.a === b.a);
+assertTrue(a.a.b === b.a.b);
a.a.b.bar = "foo";
assertEquals("foo", n.bar);
@@ -94,15 +127,23 @@ assertEquals("foo", n.bar);
function makeRegexpInArray() { return [ [ /a*/, {} ] ]; }
a = makeRegexpInArray();
-var b = makeRegexpInArray();
+b = makeRegexpInArray();
assertFalse(a[0][0] === b[0][0]);
assertFalse(a[0][1] === b[0][1]);
+assertEquals(Array.prototype, Object.getPrototypeOf(a));
+assertEquals(Array.prototype, Object.getPrototypeOf(b));
+assertEquals(Array.prototype, Object.getPrototypeOf(a[0]));
+assertEquals(Array.prototype, Object.getPrototypeOf(b[0]));
+assertEquals(RegExp.prototype, Object.getPrototypeOf(a[0][0]));
+assertEquals(RegExp.prototype, Object.getPrototypeOf(b[0][0]));
function makeRegexpInObject() { return { a: { b: /b*/, c: {} } }; }
a = makeRegexpInObject();
b = makeRegexpInObject();
assertFalse(a.a.b === b.a.b);
assertFalse(a.a.c === b.a.c);
+assertEquals(RegExp.prototype, Object.getPrototypeOf(a.a.b));
+assertEquals(RegExp.prototype, Object.getPrototypeOf(b.a.b));
// Test keywords are valid as property names in initializers and dot-access.
@@ -191,8 +232,16 @@ for (var i = 0; i < keywords.length; i++) {
testKeywordProperty(keywords[i]);
}
+function TestSimpleElements() {
+ var o = { 0:"zero", 1:"one", 2:"two" };
+ assertEquals({0:"zero", 1:"one", 2:"two"}, o);
+ o[0] = 0;
+ assertEquals({0:0, 1:"one", 2:"two"}, o);
+}
+TestSimpleElements();
+TestSimpleElements();
-(function TestNumericNames() {
+function TestNumericNames() {
var o = {
1: 1,
2.: 2,
@@ -211,55 +260,1224 @@ for (var i = 0; i < keywords.length; i++) {
1.30: 1.3
};
assertEquals(['1.2', '1.3'], Object.keys(o));
-})();
+}
+TestNumericNames();
+TestNumericNames();
+
+function TestNonNumberElementValues() {
+ var o = {
+ 1: true,
+ 2: false,
+ 3: undefined,
+ 4: ""
+ };
+ assertEquals(['1', '2', '3', '4'], Object.keys(o));
+ var o2 = {
+ 1: true,
+ 2: false,
+ 3: undefined,
+ 4: "",
+ a: 'a',
+ b: 'b'
+ };
+ assertEquals(['1', '2', '3', '4', 'a', 'b'], Object.keys(o2));
+ var o3 = {
+ __proto__:null,
+ 1: true,
+ 2: false,
+ 3: undefined,
+ 4: ""
+ };
+ assertEquals(['1', '2', '3', '4'], Object.keys(o3));
+ var o4 = {
+ __proto__:null,
+ 1: true,
+ 2: false,
+ 3: undefined,
+ 4: "",
+ a: 'a',
+ b: 'b'
+ };
+ assertEquals(['1', '2', '3', '4', 'a', 'b'], Object.keys(o4));
+}
+TestNonNumberElementValues();
+TestNonNumberElementValues();
+%OptimizeFunctionOnNextCall(TestNonNumberElementValues);
+TestNonNumberElementValues();
-function TestNumericNamesGetter(expectedKeys, object) {
- assertEquals(expectedKeys, Object.keys(object));
- expectedKeys.forEach(function(key) {
- var descr = Object.getOwnPropertyDescriptor(object, key);
- assertEquals('get ' + key, descr.get.name);
+function numericGetters() {
+ function TestNumericNamesGetter(expectedKeys, object) {
+ assertEquals(expectedKeys, Object.keys(object));
+ expectedKeys.forEach(function(key) {
+ var descr = Object.getOwnPropertyDescriptor(object, key);
+ assertEquals('get ' + key, descr.get.name);
+ });
+ }
+ TestNumericNamesGetter(['1', '2', '3', '4', '5', '6', '7', '8', '9'], {
+ get 1() {},
+ get 2.() {},
+ get 3.0() {},
+ get 4e0() {},
+ get 5E0() {},
+ get 6e-0() {},
+ get 7E-0() {},
+ get 0x8() {},
+ get 0X9() {},
+ });
+ TestNumericNamesGetter(['1.2', '1.3'], {
+ get 1.2() {},
+ get 1.30() {}
});
}
-TestNumericNamesGetter(['1', '2', '3', '4', '5', '6', '7', '8', '9'], {
- get 1() {},
- get 2.() {},
- get 3.0() {},
- get 4e0() {},
- get 5E0() {},
- get 6e-0() {},
- get 7E-0() {},
- get 0x8() {},
- get 0X9() {},
-});
-TestNumericNamesGetter(['1.2', '1.3'], {
- get 1.2() {},
- get 1.30() {}
-});
-
-
-function TestNumericNamesSetter(expectedKeys, object) {
- assertEquals(expectedKeys, Object.keys(object));
- expectedKeys.forEach(function(key) {
- var descr = Object.getOwnPropertyDescriptor(object, key);
- assertEquals('set ' + key, descr.set.name);
+numericGetters();
+numericGetters();
+
+function numericSetters() {
+ function TestNumericNamesSetter(expectedKeys, object) {
+ assertEquals(expectedKeys, Object.keys(object));
+ expectedKeys.forEach(function(key) {
+ var descr = Object.getOwnPropertyDescriptor(object, key);
+ assertEquals('set ' + key, descr.set.name);
+ });
+ }
+ TestNumericNamesSetter(['1', '2', '3', '4', '5', '6', '7', '8', '9'], {
+ set 1(_) {},
+ set 2.(_) {},
+ set 3.0(_) {},
+ set 4e0(_) {},
+ set 5E0(_) {},
+ set 6e-0(_) {},
+ set 7E-0(_) {},
+ set 0x8(_) {},
+ set 0X9(_) {},
});
+ TestNumericNamesSetter(['1.2', '1.3'], {
+ set 1.2(_) {; },
+ set 1.30(_) {; }
+ });
+};
+
+numericSetters();
+numericSetters();
+
+function TestProxyWithDefinitionInObjectLiteral() {
+ // Trap for set should not be used if the definition
+ // happens in the object literal.
+ var handler = {
+ set: function(target, name, value) {
+ }
+ };
+
+ const prop = 'a';
+
+ var p = new Proxy({}, handler);
+ p[prop] = 'my value';
+ assertEquals(undefined, p[prop]);
+
+
+ var l = new Proxy({[prop]: 'my value'}, handler);
+ assertEquals('my value', l[prop]);
+};
+TestProxyWithDefinitionInObjectLiteral();
+TestProxyWithDefinitionInObjectLiteral();
+
+(function TestLiteralWithNullProto() {
+ // Assume dictionary usage for simple null prototype literal objects,
+ // this is equivalent to Object.create(null). Note that on the first call
+ // the literal boilerplate is initialized, and from then on we use a the
+ // fast clone stub.
+ function testDictModeNullProtoLiteral(fn) {
+ let obj = fn();
+ assertFalse(%HasFastProperties(obj));
+ assertEquals(Object.getPrototypeOf(obj), null);
+ let next = fn();
+ assertFalse(obj === next);
+ obj = next;
+ assertFalse(%HasFastProperties(obj));
+ assertEquals(Object.getPrototypeOf(obj), null);
+ next = fn();
+ assertFalse(obj === next);
+ obj = next;
+ assertFalse(%HasFastProperties(obj));
+ assertEquals(Object.getPrototypeOf(obj), null);
+ }
+ testDictModeNullProtoLiteral(() => ({__proto__:null}));
+ testDictModeNullProtoLiteral(() => ({__proto__:null, a:1, b:2}));
+ testDictModeNullProtoLiteral(() => ({__proto__: null, ["a"]: 1}));
+ testDictModeNullProtoLiteral(() => ({__proto__: null, a: Object}));
+ testDictModeNullProtoLiteral(() => ({a:1, b:2, __proto__:null}));
+ testDictModeNullProtoLiteral(() => ({["a"]: 1, __proto__: null}));
+ testDictModeNullProtoLiteral(() => ({a: Object, __proto__: null}));
+})();
+
+function testNestedNullProtoLiteral() {
+ let obj;
+ obj = { foo: { __proto__:Math, bar:"barValue"}};
+ assertTrue(%HasFastProperties(obj));
+ assertTrue(%HasFastProperties(obj.foo));
+ assertEquals(Object.prototype, Object.getPrototypeOf(obj));
+ assertEquals(Math, Object.getPrototypeOf(obj.foo));
+ assertEquals(["foo"], Object.keys(obj));
+ assertEquals(["bar"], Object.keys(obj.foo));
+ assertEquals("barValue", obj.foo.bar);
+ obj.foo.bar = "barValue2";
+ assertEquals("barValue2", obj.foo.bar);
+
+ obj = { foo: { __proto__:null, bar:"barValue"}};
+ assertTrue(%HasFastProperties(obj));
+ assertFalse(%HasFastProperties(obj.foo));
+ assertEquals(Object.prototype, Object.getPrototypeOf(obj));
+ assertEquals(null, Object.getPrototypeOf(obj.foo));
+ assertEquals(["foo"], Object.keys(obj));
+ assertEquals(["bar"], Object.keys(obj.foo));
+ assertEquals("barValue", obj.foo.bar);
+ obj.foo.bar = "barValue2";
+ assertEquals("barValue2", obj.foo.bar);
}
-TestNumericNamesSetter(['1', '2', '3', '4', '5', '6', '7', '8', '9'], {
- set 1(_) {},
- set 2.(_) {},
- set 3.0(_) {},
- set 4e0(_) {},
- set 5E0(_) {},
- set 6e-0(_) {},
- set 7E-0(_) {},
- set 0x8(_) {},
- set 0X9(_) {},
-});
-TestNumericNamesSetter(['1.2', '1.3'], {
- set 1.2(_) {; },
- set 1.30(_) {; }
-});
+testNestedNullProtoLiteral();
+testNestedNullProtoLiteral();
+
+
+function TestSlowLiteralOptimized() {
+ function f() {
+ return {__proto__:null, bar:"barValue"};
+ }
+ let obj = f();
+ assertFalse(%HasFastProperties(obj));
+ assertEquals(Object.getPrototypeOf(obj), null);
+ assertEquals(["bar"], Object.keys(obj));
+ assertEquals("barValue", obj.bar);
+ obj.bar = "barValue2";
+ assertEquals("barValue2", obj.bar);
+
+ %OptimizeFunctionOnNextCall(f);
+ obj = f();
+ assertFalse(%HasFastProperties(obj));
+ assertEquals(Object.getPrototypeOf(obj), null);
+ assertEquals(["bar"], Object.keys(obj));
+ assertEquals("barValue", obj.bar);
+ obj.bar = "barValue2";
+ assertEquals("barValue2", obj.bar);
+};
+TestSlowLiteralOptimized();
+TestSlowLiteralOptimized();
+
+(function TestLargeDictionaryLiteral() {
+ // Create potential large-space object literal.
+ function createObject() {
+ // This literal has least kMaxRegularHeapObjectSize / 64 number of
+ // properties, forcing the backing store to be in large object space.
+ return { __proto__:null,
+ p1:'',p2:'',p3:'',p4:'',p5:'',p6:'',p7:'',p8:'',
+ p9:'',pa:'',pb:'',pc:'',pd:'',pe:'',pf:'',p10:'',
+ p11:'',p12:'',p13:'',p14:'',p15:'',p16:'',p17:'',p18:'',
+ p19:'',p1a:'',p1b:'',p1c:'',p1d:'',p1e:'',p1f:'',p20:'',
+ p21:'',p22:'',p23:'',p24:'',p25:'',p26:'',p27:'',p28:'',
+ p29:'',p2a:'',p2b:'',p2c:'',p2d:'',p2e:'',p2f:'',p30:'',
+ p31:'',p32:'',p33:'',p34:'',p35:'',p36:'',p37:'',p38:'',
+ p39:'',p3a:'',p3b:'',p3c:'',p3d:'',p3e:'',p3f:'',p40:'',
+ p41:'',p42:'',p43:'',p44:'',p45:'',p46:'',p47:'',p48:'',
+ p49:'',p4a:'',p4b:'',p4c:'',p4d:'',p4e:'',p4f:'',p50:'',
+ p51:'',p52:'',p53:'',p54:'',p55:'',p56:'',p57:'',p58:'',
+ p59:'',p5a:'',p5b:'',p5c:'',p5d:'',p5e:'',p5f:'',p60:'',
+ p61:'',p62:'',p63:'',p64:'',p65:'',p66:'',p67:'',p68:'',
+ p69:'',p6a:'',p6b:'',p6c:'',p6d:'',p6e:'',p6f:'',p70:'',
+ p71:'',p72:'',p73:'',p74:'',p75:'',p76:'',p77:'',p78:'',
+ p79:'',p7a:'',p7b:'',p7c:'',p7d:'',p7e:'',p7f:'',p80:'',
+ p81:'',p82:'',p83:'',p84:'',p85:'',p86:'',p87:'',p88:'',
+ p89:'',p8a:'',p8b:'',p8c:'',p8d:'',p8e:'',p8f:'',p90:'',
+ p91:'',p92:'',p93:'',p94:'',p95:'',p96:'',p97:'',p98:'',
+ p99:'',p9a:'',p9b:'',p9c:'',p9d:'',p9e:'',p9f:'',pa0:'',
+ pa1:'',pa2:'',pa3:'',pa4:'',pa5:'',pa6:'',pa7:'',pa8:'',
+ pa9:'',paa:'',pab:'',pac:'',pad:'',pae:'',paf:'',pb0:'',
+ pb1:'',pb2:'',pb3:'',pb4:'',pb5:'',pb6:'',pb7:'',pb8:'',
+ pb9:'',pba:'',pbb:'',pbc:'',pbd:'',pbe:'',pbf:'',pc0:'',
+ pc1:'',pc2:'',pc3:'',pc4:'',pc5:'',pc6:'',pc7:'',pc8:'',
+ pc9:'',pca:'',pcb:'',pcc:'',pcd:'',pce:'',pcf:'',pd0:'',
+ pd1:'',pd2:'',pd3:'',pd4:'',pd5:'',pd6:'',pd7:'',pd8:'',
+ pd9:'',pda:'',pdb:'',pdc:'',pdd:'',pde:'',pdf:'',pe0:'',
+ pe1:'',pe2:'',pe3:'',pe4:'',pe5:'',pe6:'',pe7:'',pe8:'',
+ pe9:'',pea:'',peb:'',pec:'',ped:'',pee:'',pef:'',pf0:'',
+ pf1:'',pf2:'',pf3:'',pf4:'',pf5:'',pf6:'',pf7:'',pf8:'',
+ pf9:'',pfa:'',pfb:'',pfc:'',pfd:'',pfe:'',pff:'',p100:'',
+ p101:'',p102:'',p103:'',p104:'',p105:'',p106:'',p107:'',p108:'',
+ p109:'',p10a:'',p10b:'',p10c:'',p10d:'',p10e:'',p10f:'',p110:'',
+ p111:'',p112:'',p113:'',p114:'',p115:'',p116:'',p117:'',p118:'',
+ p119:'',p11a:'',p11b:'',p11c:'',p11d:'',p11e:'',p11f:'',p120:'',
+ p121:'',p122:'',p123:'',p124:'',p125:'',p126:'',p127:'',p128:'',
+ p129:'',p12a:'',p12b:'',p12c:'',p12d:'',p12e:'',p12f:'',p130:'',
+ p131:'',p132:'',p133:'',p134:'',p135:'',p136:'',p137:'',p138:'',
+ p139:'',p13a:'',p13b:'',p13c:'',p13d:'',p13e:'',p13f:'',p140:'',
+ p141:'',p142:'',p143:'',p144:'',p145:'',p146:'',p147:'',p148:'',
+ p149:'',p14a:'',p14b:'',p14c:'',p14d:'',p14e:'',p14f:'',p150:'',
+ p151:'',p152:'',p153:'',p154:'',p155:'',p156:'',p157:'',p158:'',
+ p159:'',p15a:'',p15b:'',p15c:'',p15d:'',p15e:'',p15f:'',p160:'',
+ p161:'',p162:'',p163:'',p164:'',p165:'',p166:'',p167:'',p168:'',
+ p169:'',p16a:'',p16b:'',p16c:'',p16d:'',p16e:'',p16f:'',p170:'',
+ p171:'',p172:'',p173:'',p174:'',p175:'',p176:'',p177:'',p178:'',
+ p179:'',p17a:'',p17b:'',p17c:'',p17d:'',p17e:'',p17f:'',p180:'',
+ p181:'',p182:'',p183:'',p184:'',p185:'',p186:'',p187:'',p188:'',
+ p189:'',p18a:'',p18b:'',p18c:'',p18d:'',p18e:'',p18f:'',p190:'',
+ p191:'',p192:'',p193:'',p194:'',p195:'',p196:'',p197:'',p198:'',
+ p199:'',p19a:'',p19b:'',p19c:'',p19d:'',p19e:'',p19f:'',p1a0:'',
+ p1a1:'',p1a2:'',p1a3:'',p1a4:'',p1a5:'',p1a6:'',p1a7:'',p1a8:'',
+ p1a9:'',p1aa:'',p1ab:'',p1ac:'',p1ad:'',p1ae:'',p1af:'',p1b0:'',
+ p1b1:'',p1b2:'',p1b3:'',p1b4:'',p1b5:'',p1b6:'',p1b7:'',p1b8:'',
+ p1b9:'',p1ba:'',p1bb:'',p1bc:'',p1bd:'',p1be:'',p1bf:'',p1c0:'',
+ p1c1:'',p1c2:'',p1c3:'',p1c4:'',p1c5:'',p1c6:'',p1c7:'',p1c8:'',
+ p1c9:'',p1ca:'',p1cb:'',p1cc:'',p1cd:'',p1ce:'',p1cf:'',p1d0:'',
+ p1d1:'',p1d2:'',p1d3:'',p1d4:'',p1d5:'',p1d6:'',p1d7:'',p1d8:'',
+ p1d9:'',p1da:'',p1db:'',p1dc:'',p1dd:'',p1de:'',p1df:'',p1e0:'',
+ p1e1:'',p1e2:'',p1e3:'',p1e4:'',p1e5:'',p1e6:'',p1e7:'',p1e8:'',
+ p1e9:'',p1ea:'',p1eb:'',p1ec:'',p1ed:'',p1ee:'',p1ef:'',p1f0:'',
+ p1f1:'',p1f2:'',p1f3:'',p1f4:'',p1f5:'',p1f6:'',p1f7:'',p1f8:'',
+ p1f9:'',p1fa:'',p1fb:'',p1fc:'',p1fd:'',p1fe:'',p1ff:'',p200:'',
+ p201:'',p202:'',p203:'',p204:'',p205:'',p206:'',p207:'',p208:'',
+ p209:'',p20a:'',p20b:'',p20c:'',p20d:'',p20e:'',p20f:'',p210:'',
+ p211:'',p212:'',p213:'',p214:'',p215:'',p216:'',p217:'',p218:'',
+ p219:'',p21a:'',p21b:'',p21c:'',p21d:'',p21e:'',p21f:'',p220:'',
+ p221:'',p222:'',p223:'',p224:'',p225:'',p226:'',p227:'',p228:'',
+ p229:'',p22a:'',p22b:'',p22c:'',p22d:'',p22e:'',p22f:'',p230:'',
+ p231:'',p232:'',p233:'',p234:'',p235:'',p236:'',p237:'',p238:'',
+ p239:'',p23a:'',p23b:'',p23c:'',p23d:'',p23e:'',p23f:'',p240:'',
+ p241:'',p242:'',p243:'',p244:'',p245:'',p246:'',p247:'',p248:'',
+ p249:'',p24a:'',p24b:'',p24c:'',p24d:'',p24e:'',p24f:'',p250:'',
+ p251:'',p252:'',p253:'',p254:'',p255:'',p256:'',p257:'',p258:'',
+ p259:'',p25a:'',p25b:'',p25c:'',p25d:'',p25e:'',p25f:'',p260:'',
+ p261:'',p262:'',p263:'',p264:'',p265:'',p266:'',p267:'',p268:'',
+ p269:'',p26a:'',p26b:'',p26c:'',p26d:'',p26e:'',p26f:'',p270:'',
+ p271:'',p272:'',p273:'',p274:'',p275:'',p276:'',p277:'',p278:'',
+ p279:'',p27a:'',p27b:'',p27c:'',p27d:'',p27e:'',p27f:'',p280:'',
+ p281:'',p282:'',p283:'',p284:'',p285:'',p286:'',p287:'',p288:'',
+ p289:'',p28a:'',p28b:'',p28c:'',p28d:'',p28e:'',p28f:'',p290:'',
+ p291:'',p292:'',p293:'',p294:'',p295:'',p296:'',p297:'',p298:'',
+ p299:'',p29a:'',p29b:'',p29c:'',p29d:'',p29e:'',p29f:'',p2a0:'',
+ p2a1:'',p2a2:'',p2a3:'',p2a4:'',p2a5:'',p2a6:'',p2a7:'',p2a8:'',
+ p2a9:'',p2aa:'',p2ab:'',p2ac:'',p2ad:'',p2ae:'',p2af:'',p2b0:'',
+ p2b1:'',p2b2:'',p2b3:'',p2b4:'',p2b5:'',p2b6:'',p2b7:'',p2b8:'',
+ p2b9:'',p2ba:'',p2bb:'',p2bc:'',p2bd:'',p2be:'',p2bf:'',p2c0:'',
+ p2c1:'',p2c2:'',p2c3:'',p2c4:'',p2c5:'',p2c6:'',p2c7:'',p2c8:'',
+ p2c9:'',p2ca:'',p2cb:'',p2cc:'',p2cd:'',p2ce:'',p2cf:'',p2d0:'',
+ p2d1:'',p2d2:'',p2d3:'',p2d4:'',p2d5:'',p2d6:'',p2d7:'',p2d8:'',
+ p2d9:'',p2da:'',p2db:'',p2dc:'',p2dd:'',p2de:'',p2df:'',p2e0:'',
+ p2e1:'',p2e2:'',p2e3:'',p2e4:'',p2e5:'',p2e6:'',p2e7:'',p2e8:'',
+ p2e9:'',p2ea:'',p2eb:'',p2ec:'',p2ed:'',p2ee:'',p2ef:'',p2f0:'',
+ p2f1:'',p2f2:'',p2f3:'',p2f4:'',p2f5:'',p2f6:'',p2f7:'',p2f8:'',
+ p2f9:'',p2fa:'',p2fb:'',p2fc:'',p2fd:'',p2fe:'',p2ff:'',p300:'',
+ p301:'',p302:'',p303:'',p304:'',p305:'',p306:'',p307:'',p308:'',
+ p309:'',p30a:'',p30b:'',p30c:'',p30d:'',p30e:'',p30f:'',p310:'',
+ p311:'',p312:'',p313:'',p314:'',p315:'',p316:'',p317:'',p318:'',
+ p319:'',p31a:'',p31b:'',p31c:'',p31d:'',p31e:'',p31f:'',p320:'',
+ p321:'',p322:'',p323:'',p324:'',p325:'',p326:'',p327:'',p328:'',
+ p329:'',p32a:'',p32b:'',p32c:'',p32d:'',p32e:'',p32f:'',p330:'',
+ p331:'',p332:'',p333:'',p334:'',p335:'',p336:'',p337:'',p338:'',
+ p339:'',p33a:'',p33b:'',p33c:'',p33d:'',p33e:'',p33f:'',p340:'',
+ p341:'',p342:'',p343:'',p344:'',p345:'',p346:'',p347:'',p348:'',
+ p349:'',p34a:'',p34b:'',p34c:'',p34d:'',p34e:'',p34f:'',p350:'',
+ p351:'',p352:'',p353:'',p354:'',p355:'',p356:'',p357:'',p358:'',
+ p359:'',p35a:'',p35b:'',p35c:'',p35d:'',p35e:'',p35f:'',p360:'',
+ p361:'',p362:'',p363:'',p364:'',p365:'',p366:'',p367:'',p368:'',
+ p369:'',p36a:'',p36b:'',p36c:'',p36d:'',p36e:'',p36f:'',p370:'',
+ p371:'',p372:'',p373:'',p374:'',p375:'',p376:'',p377:'',p378:'',
+ p379:'',p37a:'',p37b:'',p37c:'',p37d:'',p37e:'',p37f:'',p380:'',
+ p381:'',p382:'',p383:'',p384:'',p385:'',p386:'',p387:'',p388:'',
+ p389:'',p38a:'',p38b:'',p38c:'',p38d:'',p38e:'',p38f:'',p390:'',
+ p391:'',p392:'',p393:'',p394:'',p395:'',p396:'',p397:'',p398:'',
+ p399:'',p39a:'',p39b:'',p39c:'',p39d:'',p39e:'',p39f:'',p3a0:'',
+ p3a1:'',p3a2:'',p3a3:'',p3a4:'',p3a5:'',p3a6:'',p3a7:'',p3a8:'',
+ p3a9:'',p3aa:'',p3ab:'',p3ac:'',p3ad:'',p3ae:'',p3af:'',p3b0:'',
+ p3b1:'',p3b2:'',p3b3:'',p3b4:'',p3b5:'',p3b6:'',p3b7:'',p3b8:'',
+ p3b9:'',p3ba:'',p3bb:'',p3bc:'',p3bd:'',p3be:'',p3bf:'',p3c0:'',
+ p3c1:'',p3c2:'',p3c3:'',p3c4:'',p3c5:'',p3c6:'',p3c7:'',p3c8:'',
+ p3c9:'',p3ca:'',p3cb:'',p3cc:'',p3cd:'',p3ce:'',p3cf:'',p3d0:'',
+ p3d1:'',p3d2:'',p3d3:'',p3d4:'',p3d5:'',p3d6:'',p3d7:'',p3d8:'',
+ p3d9:'',p3da:'',p3db:'',p3dc:'',p3dd:'',p3de:'',p3df:'',p3e0:'',
+ p3e1:'',p3e2:'',p3e3:'',p3e4:'',p3e5:'',p3e6:'',p3e7:'',p3e8:'',
+ p3e9:'',p3ea:'',p3eb:'',p3ec:'',p3ed:'',p3ee:'',p3ef:'',p3f0:'',
+ p3f1:'',p3f2:'',p3f3:'',p3f4:'',p3f5:'',p3f6:'',p3f7:'',p3f8:'',
+ p3f9:'',p3fa:'',p3fb:'',p3fc:'',p3fd:'',p3fe:'',p3ff:'',p400:'',
+ p401:'',p402:'',p403:'',p404:'',p405:'',p406:'',p407:'',p408:'',
+ p409:'',p40a:'',p40b:'',p40c:'',p40d:'',p40e:'',p40f:'',p410:'',
+ p411:'',p412:'',p413:'',p414:'',p415:'',p416:'',p417:'',p418:'',
+ p419:'',p41a:'',p41b:'',p41c:'',p41d:'',p41e:'',p41f:'',p420:'',
+ p421:'',p422:'',p423:'',p424:'',p425:'',p426:'',p427:'',p428:'',
+ p429:'',p42a:'',p42b:'',p42c:'',p42d:'',p42e:'',p42f:'',p430:'',
+ p431:'',p432:'',p433:'',p434:'',p435:'',p436:'',p437:'',p438:'',
+ p439:'',p43a:'',p43b:'',p43c:'',p43d:'',p43e:'',p43f:'',p440:'',
+ p441:'',p442:'',p443:'',p444:'',p445:'',p446:'',p447:'',p448:'',
+ p449:'',p44a:'',p44b:'',p44c:'',p44d:'',p44e:'',p44f:'',p450:'',
+ p451:'',p452:'',p453:'',p454:'',p455:'',p456:'',p457:'',p458:'',
+ p459:'',p45a:'',p45b:'',p45c:'',p45d:'',p45e:'',p45f:'',p460:'',
+ p461:'',p462:'',p463:'',p464:'',p465:'',p466:'',p467:'',p468:'',
+ p469:'',p46a:'',p46b:'',p46c:'',p46d:'',p46e:'',p46f:'',p470:'',
+ p471:'',p472:'',p473:'',p474:'',p475:'',p476:'',p477:'',p478:'',
+ p479:'',p47a:'',p47b:'',p47c:'',p47d:'',p47e:'',p47f:'',p480:'',
+ p481:'',p482:'',p483:'',p484:'',p485:'',p486:'',p487:'',p488:'',
+ p489:'',p48a:'',p48b:'',p48c:'',p48d:'',p48e:'',p48f:'',p490:'',
+ p491:'',p492:'',p493:'',p494:'',p495:'',p496:'',p497:'',p498:'',
+ p499:'',p49a:'',p49b:'',p49c:'',p49d:'',p49e:'',p49f:'',p4a0:'',
+ p4a1:'',p4a2:'',p4a3:'',p4a4:'',p4a5:'',p4a6:'',p4a7:'',p4a8:'',
+ p4a9:'',p4aa:'',p4ab:'',p4ac:'',p4ad:'',p4ae:'',p4af:'',p4b0:'',
+ p4b1:'',p4b2:'',p4b3:'',p4b4:'',p4b5:'',p4b6:'',p4b7:'',p4b8:'',
+ p4b9:'',p4ba:'',p4bb:'',p4bc:'',p4bd:'',p4be:'',p4bf:'',p4c0:'',
+ p4c1:'',p4c2:'',p4c3:'',p4c4:'',p4c5:'',p4c6:'',p4c7:'',p4c8:'',
+ p4c9:'',p4ca:'',p4cb:'',p4cc:'',p4cd:'',p4ce:'',p4cf:'',p4d0:'',
+ p4d1:'',p4d2:'',p4d3:'',p4d4:'',p4d5:'',p4d6:'',p4d7:'',p4d8:'',
+ p4d9:'',p4da:'',p4db:'',p4dc:'',p4dd:'',p4de:'',p4df:'',p4e0:'',
+ p4e1:'',p4e2:'',p4e3:'',p4e4:'',p4e5:'',p4e6:'',p4e7:'',p4e8:'',
+ p4e9:'',p4ea:'',p4eb:'',p4ec:'',p4ed:'',p4ee:'',p4ef:'',p4f0:'',
+ p4f1:'',p4f2:'',p4f3:'',p4f4:'',p4f5:'',p4f6:'',p4f7:'',p4f8:'',
+ p4f9:'',p4fa:'',p4fb:'',p4fc:'',p4fd:'',p4fe:'',p4ff:'',p500:'',
+ p501:'',p502:'',p503:'',p504:'',p505:'',p506:'',p507:'',p508:'',
+ p509:'',p50a:'',p50b:'',p50c:'',p50d:'',p50e:'',p50f:'',p510:'',
+ p511:'',p512:'',p513:'',p514:'',p515:'',p516:'',p517:'',p518:'',
+ p519:'',p51a:'',p51b:'',p51c:'',p51d:'',p51e:'',p51f:'',p520:'',
+ p521:'',p522:'',p523:'',p524:'',p525:'',p526:'',p527:'',p528:'',
+ p529:'',p52a:'',p52b:'',p52c:'',p52d:'',p52e:'',p52f:'',p530:'',
+ p531:'',p532:'',p533:'',p534:'',p535:'',p536:'',p537:'',p538:'',
+ p539:'',p53a:'',p53b:'',p53c:'',p53d:'',p53e:'',p53f:'',p540:'',
+ p541:'',p542:'',p543:'',p544:'',p545:'',p546:'',p547:'',p548:'',
+ p549:'',p54a:'',p54b:'',p54c:'',p54d:'',p54e:'',p54f:'',p550:'',
+ p551:'',p552:'',p553:'',p554:'',p555:'',p556:'',p557:'',p558:'',
+ p559:'',p55a:'',p55b:'',p55c:'',p55d:'',p55e:'',p55f:'',p560:'',
+ p561:'',p562:'',p563:'',p564:'',p565:'',p566:'',p567:'',p568:'',
+ p569:'',p56a:'',p56b:'',p56c:'',p56d:'',p56e:'',p56f:'',p570:'',
+ p571:'',p572:'',p573:'',p574:'',p575:'',p576:'',p577:'',p578:'',
+ p579:'',p57a:'',p57b:'',p57c:'',p57d:'',p57e:'',p57f:'',p580:'',
+ p581:'',p582:'',p583:'',p584:'',p585:'',p586:'',p587:'',p588:'',
+ p589:'',p58a:'',p58b:'',p58c:'',p58d:'',p58e:'',p58f:'',p590:'',
+ p591:'',p592:'',p593:'',p594:'',p595:'',p596:'',p597:'',p598:'',
+ p599:'',p59a:'',p59b:'',p59c:'',p59d:'',p59e:'',p59f:'',p5a0:'',
+ p5a1:'',p5a2:'',p5a3:'',p5a4:'',p5a5:'',p5a6:'',p5a7:'',p5a8:'',
+ p5a9:'',p5aa:'',p5ab:'',p5ac:'',p5ad:'',p5ae:'',p5af:'',p5b0:'',
+ p5b1:'',p5b2:'',p5b3:'',p5b4:'',p5b5:'',p5b6:'',p5b7:'',p5b8:'',
+ p5b9:'',p5ba:'',p5bb:'',p5bc:'',p5bd:'',p5be:'',p5bf:'',p5c0:'',
+ p5c1:'',p5c2:'',p5c3:'',p5c4:'',p5c5:'',p5c6:'',p5c7:'',p5c8:'',
+ p5c9:'',p5ca:'',p5cb:'',p5cc:'',p5cd:'',p5ce:'',p5cf:'',p5d0:'',
+ p5d1:'',p5d2:'',p5d3:'',p5d4:'',p5d5:'',p5d6:'',p5d7:'',p5d8:'',
+ p5d9:'',p5da:'',p5db:'',p5dc:'',p5dd:'',p5de:'',p5df:'',p5e0:'',
+ p5e1:'',p5e2:'',p5e3:'',p5e4:'',p5e5:'',p5e6:'',p5e7:'',p5e8:'',
+ p5e9:'',p5ea:'',p5eb:'',p5ec:'',p5ed:'',p5ee:'',p5ef:'',p5f0:'',
+ p5f1:'',p5f2:'',p5f3:'',p5f4:'',p5f5:'',p5f6:'',p5f7:'',p5f8:'',
+ p5f9:'',p5fa:'',p5fb:'',p5fc:'',p5fd:'',p5fe:'',p5ff:'',p600:'',
+ p601:'',p602:'',p603:'',p604:'',p605:'',p606:'',p607:'',p608:'',
+ p609:'',p60a:'',p60b:'',p60c:'',p60d:'',p60e:'',p60f:'',p610:'',
+ p611:'',p612:'',p613:'',p614:'',p615:'',p616:'',p617:'',p618:'',
+ p619:'',p61a:'',p61b:'',p61c:'',p61d:'',p61e:'',p61f:'',p620:'',
+ p621:'',p622:'',p623:'',p624:'',p625:'',p626:'',p627:'',p628:'',
+ p629:'',p62a:'',p62b:'',p62c:'',p62d:'',p62e:'',p62f:'',p630:'',
+ p631:'',p632:'',p633:'',p634:'',p635:'',p636:'',p637:'',p638:'',
+ p639:'',p63a:'',p63b:'',p63c:'',p63d:'',p63e:'',p63f:'',p640:'',
+ p641:'',p642:'',p643:'',p644:'',p645:'',p646:'',p647:'',p648:'',
+ p649:'',p64a:'',p64b:'',p64c:'',p64d:'',p64e:'',p64f:'',p650:'',
+ p651:'',p652:'',p653:'',p654:'',p655:'',p656:'',p657:'',p658:'',
+ p659:'',p65a:'',p65b:'',p65c:'',p65d:'',p65e:'',p65f:'',p660:'',
+ p661:'',p662:'',p663:'',p664:'',p665:'',p666:'',p667:'',p668:'',
+ p669:'',p66a:'',p66b:'',p66c:'',p66d:'',p66e:'',p66f:'',p670:'',
+ p671:'',p672:'',p673:'',p674:'',p675:'',p676:'',p677:'',p678:'',
+ p679:'',p67a:'',p67b:'',p67c:'',p67d:'',p67e:'',p67f:'',p680:'',
+ p681:'',p682:'',p683:'',p684:'',p685:'',p686:'',p687:'',p688:'',
+ p689:'',p68a:'',p68b:'',p68c:'',p68d:'',p68e:'',p68f:'',p690:'',
+ p691:'',p692:'',p693:'',p694:'',p695:'',p696:'',p697:'',p698:'',
+ p699:'',p69a:'',p69b:'',p69c:'',p69d:'',p69e:'',p69f:'',p6a0:'',
+ p6a1:'',p6a2:'',p6a3:'',p6a4:'',p6a5:'',p6a6:'',p6a7:'',p6a8:'',
+ p6a9:'',p6aa:'',p6ab:'',p6ac:'',p6ad:'',p6ae:'',p6af:'',p6b0:'',
+ p6b1:'',p6b2:'',p6b3:'',p6b4:'',p6b5:'',p6b6:'',p6b7:'',p6b8:'',
+ p6b9:'',p6ba:'',p6bb:'',p6bc:'',p6bd:'',p6be:'',p6bf:'',p6c0:'',
+ p6c1:'',p6c2:'',p6c3:'',p6c4:'',p6c5:'',p6c6:'',p6c7:'',p6c8:'',
+ p6c9:'',p6ca:'',p6cb:'',p6cc:'',p6cd:'',p6ce:'',p6cf:'',p6d0:'',
+ p6d1:'',p6d2:'',p6d3:'',p6d4:'',p6d5:'',p6d6:'',p6d7:'',p6d8:'',
+ p6d9:'',p6da:'',p6db:'',p6dc:'',p6dd:'',p6de:'',p6df:'',p6e0:'',
+ p6e1:'',p6e2:'',p6e3:'',p6e4:'',p6e5:'',p6e6:'',p6e7:'',p6e8:'',
+ p6e9:'',p6ea:'',p6eb:'',p6ec:'',p6ed:'',p6ee:'',p6ef:'',p6f0:'',
+ p6f1:'',p6f2:'',p6f3:'',p6f4:'',p6f5:'',p6f6:'',p6f7:'',p6f8:'',
+ p6f9:'',p6fa:'',p6fb:'',p6fc:'',p6fd:'',p6fe:'',p6ff:'',p700:'',
+ p701:'',p702:'',p703:'',p704:'',p705:'',p706:'',p707:'',p708:'',
+ p709:'',p70a:'',p70b:'',p70c:'',p70d:'',p70e:'',p70f:'',p710:'',
+ p711:'',p712:'',p713:'',p714:'',p715:'',p716:'',p717:'',p718:'',
+ p719:'',p71a:'',p71b:'',p71c:'',p71d:'',p71e:'',p71f:'',p720:'',
+ p721:'',p722:'',p723:'',p724:'',p725:'',p726:'',p727:'',p728:'',
+ p729:'',p72a:'',p72b:'',p72c:'',p72d:'',p72e:'',p72f:'',p730:'',
+ p731:'',p732:'',p733:'',p734:'',p735:'',p736:'',p737:'',p738:'',
+ p739:'',p73a:'',p73b:'',p73c:'',p73d:'',p73e:'',p73f:'',p740:'',
+ p741:'',p742:'',p743:'',p744:'',p745:'',p746:'',p747:'',p748:'',
+ p749:'',p74a:'',p74b:'',p74c:'',p74d:'',p74e:'',p74f:'',p750:'',
+ p751:'',p752:'',p753:'',p754:'',p755:'',p756:'',p757:'',p758:'',
+ p759:'',p75a:'',p75b:'',p75c:'',p75d:'',p75e:'',p75f:'',p760:'',
+ p761:'',p762:'',p763:'',p764:'',p765:'',p766:'',p767:'',p768:'',
+ p769:'',p76a:'',p76b:'',p76c:'',p76d:'',p76e:'',p76f:'',p770:'',
+ p771:'',p772:'',p773:'',p774:'',p775:'',p776:'',p777:'',p778:'',
+ p779:'',p77a:'',p77b:'',p77c:'',p77d:'',p77e:'',p77f:'',p780:'',
+ p781:'',p782:'',p783:'',p784:'',p785:'',p786:'',p787:'',p788:'',
+ p789:'',p78a:'',p78b:'',p78c:'',p78d:'',p78e:'',p78f:'',p790:'',
+ p791:'',p792:'',p793:'',p794:'',p795:'',p796:'',p797:'',p798:'',
+ p799:'',p79a:'',p79b:'',p79c:'',p79d:'',p79e:'',p79f:'',p7a0:'',
+ p7a1:'',p7a2:'',p7a3:'',p7a4:'',p7a5:'',p7a6:'',p7a7:'',p7a8:'',
+ p7a9:'',p7aa:'',p7ab:'',p7ac:'',p7ad:'',p7ae:'',p7af:'',p7b0:'',
+ p7b1:'',p7b2:'',p7b3:'',p7b4:'',p7b5:'',p7b6:'',p7b7:'',p7b8:'',
+ p7b9:'',p7ba:'',p7bb:'',p7bc:'',p7bd:'',p7be:'',p7bf:'',p7c0:'',
+ p7c1:'',p7c2:'',p7c3:'',p7c4:'',p7c5:'',p7c6:'',p7c7:'',p7c8:'',
+ p7c9:'',p7ca:'',p7cb:'',p7cc:'',p7cd:'',p7ce:'',p7cf:'',p7d0:'',
+ p7d1:'',p7d2:'',p7d3:'',p7d4:'',p7d5:'',p7d6:'',p7d7:'',p7d8:'',
+ p7d9:'',p7da:'',p7db:'',p7dc:'',p7dd:'',p7de:'',p7df:'',p7e0:'',
+ p7e1:'',p7e2:'',p7e3:'',p7e4:'',p7e5:'',p7e6:'',p7e7:'',p7e8:'',
+ p7e9:'',p7ea:'',p7eb:'',p7ec:'',p7ed:'',p7ee:'',p7ef:'',p7f0:'',
+ p7f1:'',p7f2:'',p7f3:'',p7f4:'',p7f5:'',p7f6:'',p7f7:'',p7f8:'',
+ p7f9:'',p7fa:'',p7fb:'',p7fc:'',p7fd:'',p7fe:'',p7ff:'',p800:'',
+ p801:'',p802:'',p803:'',p804:'',p805:'',p806:'',p807:'',p808:'',
+ p809:'',p80a:'',p80b:'',p80c:'',p80d:'',p80e:'',p80f:'',p810:'',
+ p811:'',p812:'',p813:'',p814:'',p815:'',p816:'',p817:'',p818:'',
+ p819:'',p81a:'',p81b:'',p81c:'',p81d:'',p81e:'',p81f:'',p820:'',
+ p821:'',p822:'',p823:'',p824:'',p825:'',p826:'',p827:'',p828:'',
+ p829:'',p82a:'',p82b:'',p82c:'',p82d:'',p82e:'',p82f:'',p830:'',
+ p831:'',p832:'',p833:'',p834:'',p835:'',p836:'',p837:'',p838:'',
+ p839:'',p83a:'',p83b:'',p83c:'',p83d:'',p83e:'',p83f:'',p840:'',
+ p841:'',p842:'',p843:'',p844:'',p845:'',p846:'',p847:'',p848:'',
+ p849:'',p84a:'',p84b:'',p84c:'',p84d:'',p84e:'',p84f:'',p850:'',
+ p851:'',p852:'',p853:'',p854:'',p855:'',p856:'',p857:'',p858:'',
+ p859:'',p85a:'',p85b:'',p85c:'',p85d:'',p85e:'',p85f:'',p860:'',
+ p861:'',p862:'',p863:'',p864:'',p865:'',p866:'',p867:'',p868:'',
+ p869:'',p86a:'',p86b:'',p86c:'',p86d:'',p86e:'',p86f:'',p870:'',
+ p871:'',p872:'',p873:'',p874:'',p875:'',p876:'',p877:'',p878:'',
+ p879:'',p87a:'',p87b:'',p87c:'',p87d:'',p87e:'',p87f:'',p880:'',
+ p881:'',p882:'',p883:'',p884:'',p885:'',p886:'',p887:'',p888:'',
+ p889:'',p88a:'',p88b:'',p88c:'',p88d:'',p88e:'',p88f:'',p890:'',
+ p891:'',p892:'',p893:'',p894:'',p895:'',p896:'',p897:'',p898:'',
+ p899:'',p89a:'',p89b:'',p89c:'',p89d:'',p89e:'',p89f:'',p8a0:'',
+ p8a1:'',p8a2:'',p8a3:'',p8a4:'',p8a5:'',p8a6:'',p8a7:'',p8a8:'',
+ p8a9:'',p8aa:'',p8ab:'',p8ac:'',p8ad:'',p8ae:'',p8af:'',p8b0:'',
+ p8b1:'',p8b2:'',p8b3:'',p8b4:'',p8b5:'',p8b6:'',p8b7:'',p8b8:'',
+ p8b9:'',p8ba:'',p8bb:'',p8bc:'',p8bd:'',p8be:'',p8bf:'',p8c0:'',
+ p8c1:'',p8c2:'',p8c3:'',p8c4:'',p8c5:'',p8c6:'',p8c7:'',p8c8:'',
+ p8c9:'',p8ca:'',p8cb:'',p8cc:'',p8cd:'',p8ce:'',p8cf:'',p8d0:'',
+ p8d1:'',p8d2:'',p8d3:'',p8d4:'',p8d5:'',p8d6:'',p8d7:'',p8d8:'',
+ p8d9:'',p8da:'',p8db:'',p8dc:'',p8dd:'',p8de:'',p8df:'',p8e0:'',
+ p8e1:'',p8e2:'',p8e3:'',p8e4:'',p8e5:'',p8e6:'',p8e7:'',p8e8:'',
+ p8e9:'',p8ea:'',p8eb:'',p8ec:'',p8ed:'',p8ee:'',p8ef:'',p8f0:'',
+ p8f1:'',p8f2:'',p8f3:'',p8f4:'',p8f5:'',p8f6:'',p8f7:'',p8f8:'',
+ p8f9:'',p8fa:'',p8fb:'',p8fc:'',p8fd:'',p8fe:'',p8ff:'',p900:'',
+ p901:'',p902:'',p903:'',p904:'',p905:'',p906:'',p907:'',p908:'',
+ p909:'',p90a:'',p90b:'',p90c:'',p90d:'',p90e:'',p90f:'',p910:'',
+ p911:'',p912:'',p913:'',p914:'',p915:'',p916:'',p917:'',p918:'',
+ p919:'',p91a:'',p91b:'',p91c:'',p91d:'',p91e:'',p91f:'',p920:'',
+ p921:'',p922:'',p923:'',p924:'',p925:'',p926:'',p927:'',p928:'',
+ p929:'',p92a:'',p92b:'',p92c:'',p92d:'',p92e:'',p92f:'',p930:'',
+ p931:'',p932:'',p933:'',p934:'',p935:'',p936:'',p937:'',p938:'',
+ p939:'',p93a:'',p93b:'',p93c:'',p93d:'',p93e:'',p93f:'',p940:'',
+ p941:'',p942:'',p943:'',p944:'',p945:'',p946:'',p947:'',p948:'',
+ p949:'',p94a:'',p94b:'',p94c:'',p94d:'',p94e:'',p94f:'',p950:'',
+ p951:'',p952:'',p953:'',p954:'',p955:'',p956:'',p957:'',p958:'',
+ p959:'',p95a:'',p95b:'',p95c:'',p95d:'',p95e:'',p95f:'',p960:'',
+ p961:'',p962:'',p963:'',p964:'',p965:'',p966:'',p967:'',p968:'',
+ p969:'',p96a:'',p96b:'',p96c:'',p96d:'',p96e:'',p96f:'',p970:'',
+ p971:'',p972:'',p973:'',p974:'',p975:'',p976:'',p977:'',p978:'',
+ p979:'',p97a:'',p97b:'',p97c:'',p97d:'',p97e:'',p97f:'',p980:'',
+ p981:'',p982:'',p983:'',p984:'',p985:'',p986:'',p987:'',p988:'',
+ p989:'',p98a:'',p98b:'',p98c:'',p98d:'',p98e:'',p98f:'',p990:'',
+ p991:'',p992:'',p993:'',p994:'',p995:'',p996:'',p997:'',p998:'',
+ p999:'',p99a:'',p99b:'',p99c:'',p99d:'',p99e:'',p99f:'',p9a0:'',
+ p9a1:'',p9a2:'',p9a3:'',p9a4:'',p9a5:'',p9a6:'',p9a7:'',p9a8:'',
+ p9a9:'',p9aa:'',p9ab:'',p9ac:'',p9ad:'',p9ae:'',p9af:'',p9b0:'',
+ p9b1:'',p9b2:'',p9b3:'',p9b4:'',p9b5:'',p9b6:'',p9b7:'',p9b8:'',
+ p9b9:'',p9ba:'',p9bb:'',p9bc:'',p9bd:'',p9be:'',p9bf:'',p9c0:'',
+ p9c1:'',p9c2:'',p9c3:'',p9c4:'',p9c5:'',p9c6:'',p9c7:'',p9c8:'',
+ p9c9:'',p9ca:'',p9cb:'',p9cc:'',p9cd:'',p9ce:'',p9cf:'',p9d0:'',
+ p9d1:'',p9d2:'',p9d3:'',p9d4:'',p9d5:'',p9d6:'',p9d7:'',p9d8:'',
+ p9d9:'',p9da:'',p9db:'',p9dc:'',p9dd:'',p9de:'',p9df:'',p9e0:'',
+ p9e1:'',p9e2:'',p9e3:'',p9e4:'',p9e5:'',p9e6:'',p9e7:'',p9e8:'',
+ p9e9:'',p9ea:'',p9eb:'',p9ec:'',p9ed:'',p9ee:'',p9ef:'',p9f0:'',
+ p9f1:'',p9f2:'',p9f3:'',p9f4:'',p9f5:'',p9f6:'',p9f7:'',p9f8:'',
+ p9f9:'',p9fa:'',p9fb:'',p9fc:'',p9fd:'',p9fe:'',p9ff:'',pa00:'',
+ pa01:'',pa02:'',pa03:'',pa04:'',pa05:'',pa06:'',pa07:'',pa08:'',
+ pa09:'',pa0a:'',pa0b:'',pa0c:'',pa0d:'',pa0e:'',pa0f:'',pa10:'',
+ pa11:'',pa12:'',pa13:'',pa14:'',pa15:'',pa16:'',pa17:'',pa18:'',
+ pa19:'',pa1a:'',pa1b:'',pa1c:'',pa1d:'',pa1e:'',pa1f:'',pa20:'',
+ pa21:'',pa22:'',pa23:'',pa24:'',pa25:'',pa26:'',pa27:'',pa28:'',
+ pa29:'',pa2a:'',pa2b:'',pa2c:'',pa2d:'',pa2e:'',pa2f:'',pa30:'',
+ pa31:'',pa32:'',pa33:'',pa34:'',pa35:'',pa36:'',pa37:'',pa38:'',
+ pa39:'',pa3a:'',pa3b:'',pa3c:'',pa3d:'',pa3e:'',pa3f:'',pa40:'',
+ pa41:'',pa42:'',pa43:'',pa44:'',pa45:'',pa46:'',pa47:'',pa48:'',
+ pa49:'',pa4a:'',pa4b:'',pa4c:'',pa4d:'',pa4e:'',pa4f:'',pa50:'',
+ pa51:'',pa52:'',pa53:'',pa54:'',pa55:'',pa56:'',pa57:'',pa58:'',
+ pa59:'',pa5a:'',pa5b:'',pa5c:'',pa5d:'',pa5e:'',pa5f:'',pa60:'',
+ pa61:'',pa62:'',pa63:'',pa64:'',pa65:'',pa66:'',pa67:'',pa68:'',
+ pa69:'',pa6a:'',pa6b:'',pa6c:'',pa6d:'',pa6e:'',pa6f:'',pa70:'',
+ pa71:'',pa72:'',pa73:'',pa74:'',pa75:'',pa76:'',pa77:'',pa78:'',
+ pa79:'',pa7a:'',pa7b:'',pa7c:'',pa7d:'',pa7e:'',pa7f:'',pa80:'',
+ pa81:'',pa82:'',pa83:'',pa84:'',pa85:'',pa86:'',pa87:'',pa88:'',
+ pa89:'',pa8a:'',pa8b:'',pa8c:'',pa8d:'',pa8e:'',pa8f:'',pa90:'',
+ pa91:'',pa92:'',pa93:'',pa94:'',pa95:'',pa96:'',pa97:'',pa98:'',
+ pa99:'',pa9a:'',pa9b:'',pa9c:'',pa9d:'',pa9e:'',pa9f:'',paa0:'',
+ paa1:'',paa2:'',paa3:'',paa4:'',paa5:'',paa6:'',paa7:'',paa8:'',
+ paa9:'',paaa:'',paab:'',paac:'',paad:'',paae:'',paaf:'',pab0:'',
+ pab1:'',pab2:'',pab3:'',pab4:'',pab5:'',pab6:'',pab7:'',pab8:'',
+ pab9:'',paba:'',pabb:'',pabc:'',pabd:'',pabe:'',pabf:'',pac0:'',
+ pac1:'',pac2:'',pac3:'',pac4:'',pac5:'',pac6:'',pac7:'',pac8:'',
+ pac9:'',paca:'',pacb:'',pacc:'',pacd:'',pace:'',pacf:'',pad0:'',
+ pad1:'',pad2:'',pad3:'',pad4:'',pad5:'',pad6:'',pad7:'',pad8:'',
+ pad9:'',pada:'',padb:'',padc:'',padd:'',pade:'',padf:'',pae0:'',
+ pae1:'',pae2:'',pae3:'',pae4:'',pae5:'',pae6:'',pae7:'',pae8:'',
+ pae9:'',paea:'',paeb:'',paec:'',paed:'',paee:'',paef:'',paf0:'',
+ paf1:'',paf2:'',paf3:'',paf4:'',paf5:'',paf6:'',paf7:'',paf8:'',
+ paf9:'',pafa:'',pafb:'',pafc:'',pafd:'',pafe:'',paff:'',pb00:'',
+ pb01:'',pb02:'',pb03:'',pb04:'',pb05:'',pb06:'',pb07:'',pb08:'',
+ pb09:'',pb0a:'',pb0b:'',pb0c:'',pb0d:'',pb0e:'',pb0f:'',pb10:'',
+ pb11:'',pb12:'',pb13:'',pb14:'',pb15:'',pb16:'',pb17:'',pb18:'',
+ pb19:'',pb1a:'',pb1b:'',pb1c:'',pb1d:'',pb1e:'',pb1f:'',pb20:'',
+ pb21:'',pb22:'',pb23:'',pb24:'',pb25:'',pb26:'',pb27:'',pb28:'',
+ pb29:'',pb2a:'',pb2b:'',pb2c:'',pb2d:'',pb2e:'',pb2f:'',pb30:'',
+ pb31:'',pb32:'',pb33:'',pb34:'',pb35:'',pb36:'',pb37:'',pb38:'',
+ pb39:'',pb3a:'',pb3b:'',pb3c:'',pb3d:'',pb3e:'',pb3f:'',pb40:'',
+ pb41:'',pb42:'',pb43:'',pb44:'',pb45:'',pb46:'',pb47:'',pb48:'',
+ pb49:'',pb4a:'',pb4b:'',pb4c:'',pb4d:'',pb4e:'',pb4f:'',pb50:'',
+ pb51:'',pb52:'',pb53:'',pb54:'',pb55:'',pb56:'',pb57:'',pb58:'',
+ pb59:'',pb5a:'',pb5b:'',pb5c:'',pb5d:'',pb5e:'',pb5f:'',pb60:'',
+ pb61:'',pb62:'',pb63:'',pb64:'',pb65:'',pb66:'',pb67:'',pb68:'',
+ pb69:'',pb6a:'',pb6b:'',pb6c:'',pb6d:'',pb6e:'',pb6f:'',pb70:'',
+ pb71:'',pb72:'',pb73:'',pb74:'',pb75:'',pb76:'',pb77:'',pb78:'',
+ pb79:'',pb7a:'',pb7b:'',pb7c:'',pb7d:'',pb7e:'',pb7f:'',pb80:'',
+ pb81:'',pb82:'',pb83:'',pb84:'',pb85:'',pb86:'',pb87:'',pb88:'',
+ pb89:'',pb8a:'',pb8b:'',pb8c:'',pb8d:'',pb8e:'',pb8f:'',pb90:'',
+ pb91:'',pb92:'',pb93:'',pb94:'',pb95:'',pb96:'',pb97:'',pb98:'',
+ pb99:'',pb9a:'',pb9b:'',pb9c:'',pb9d:'',pb9e:'',pb9f:'',pba0:'',
+ pba1:'',pba2:'',pba3:'',pba4:'',pba5:'',pba6:'',pba7:'',pba8:'',
+ pba9:'',pbaa:'',pbab:'',pbac:'',pbad:'',pbae:'',pbaf:'',pbb0:'',
+ pbb1:'',pbb2:'',pbb3:'',pbb4:'',pbb5:'',pbb6:'',pbb7:'',pbb8:'',
+ pbb9:'',pbba:'',pbbb:'',pbbc:'',pbbd:'',pbbe:'',pbbf:'',pbc0:'',
+ pbc1:'',pbc2:'',pbc3:'',pbc4:'',pbc5:'',pbc6:'',pbc7:'',pbc8:'',
+ pbc9:'',pbca:'',pbcb:'',pbcc:'',pbcd:'',pbce:'',pbcf:'',pbd0:'',
+ pbd1:'',pbd2:'',pbd3:'',pbd4:'',pbd5:'',pbd6:'',pbd7:'',pbd8:'',
+ pbd9:'',pbda:'',pbdb:'',pbdc:'',pbdd:'',pbde:'',pbdf:'',pbe0:'',
+ pbe1:'',pbe2:'',pbe3:'',pbe4:'',pbe5:'',pbe6:'',pbe7:'',pbe8:'',
+ pbe9:'',pbea:'',pbeb:'',pbec:'',pbed:'',pbee:'',pbef:'',pbf0:'',
+ pbf1:'',pbf2:'',pbf3:'',pbf4:'',pbf5:'',pbf6:'',pbf7:'',pbf8:'',
+ pbf9:'',pbfa:'',pbfb:'',pbfc:'',pbfd:'',pbfe:'',pbff:'',pc00:'',
+ pc01:'',pc02:'',pc03:'',pc04:'',pc05:'',pc06:'',pc07:'',pc08:'',
+ pc09:'',pc0a:'',pc0b:'',pc0c:'',pc0d:'',pc0e:'',pc0f:'',pc10:'',
+ pc11:'',pc12:'',pc13:'',pc14:'',pc15:'',pc16:'',pc17:'',pc18:'',
+ pc19:'',pc1a:'',pc1b:'',pc1c:'',pc1d:'',pc1e:'',pc1f:'',pc20:'',
+ pc21:'',pc22:'',pc23:'',pc24:'',pc25:'',pc26:'',pc27:'',pc28:'',
+ pc29:'',pc2a:'',pc2b:'',pc2c:'',pc2d:'',pc2e:'',pc2f:'',pc30:'',
+ pc31:'',pc32:'',pc33:'',pc34:'',pc35:'',pc36:'',pc37:'',pc38:'',
+ pc39:'',pc3a:'',pc3b:'',pc3c:'',pc3d:'',pc3e:'',pc3f:'',pc40:'',
+ pc41:'',pc42:'',pc43:'',pc44:'',pc45:'',pc46:'',pc47:'',pc48:'',
+ pc49:'',pc4a:'',pc4b:'',pc4c:'',pc4d:'',pc4e:'',pc4f:'',pc50:'',
+ pc51:'',pc52:'',pc53:'',pc54:'',pc55:'',pc56:'',pc57:'',pc58:'',
+ pc59:'',pc5a:'',pc5b:'',pc5c:'',pc5d:'',pc5e:'',pc5f:'',pc60:'',
+ pc61:'',pc62:'',pc63:'',pc64:'',pc65:'',pc66:'',pc67:'',pc68:'',
+ pc69:'',pc6a:'',pc6b:'',pc6c:'',pc6d:'',pc6e:'',pc6f:'',pc70:'',
+ pc71:'',pc72:'',pc73:'',pc74:'',pc75:'',pc76:'',pc77:'',pc78:'',
+ pc79:'',pc7a:'',pc7b:'',pc7c:'',pc7d:'',pc7e:'',pc7f:'',pc80:'',
+ pc81:'',pc82:'',pc83:'',pc84:'',pc85:'',pc86:'',pc87:'',pc88:'',
+ pc89:'',pc8a:'',pc8b:'',pc8c:'',pc8d:'',pc8e:'',pc8f:'',pc90:'',
+ pc91:'',pc92:'',pc93:'',pc94:'',pc95:'',pc96:'',pc97:'',pc98:'',
+ pc99:'',pc9a:'',pc9b:'',pc9c:'',pc9d:'',pc9e:'',pc9f:'',pca0:'',
+ pca1:'',pca2:'',pca3:'',pca4:'',pca5:'',pca6:'',pca7:'',pca8:'',
+ pca9:'',pcaa:'',pcab:'',pcac:'',pcad:'',pcae:'',pcaf:'',pcb0:'',
+ pcb1:'',pcb2:'',pcb3:'',pcb4:'',pcb5:'',pcb6:'',pcb7:'',pcb8:'',
+ pcb9:'',pcba:'',pcbb:'',pcbc:'',pcbd:'',pcbe:'',pcbf:'',pcc0:'',
+ pcc1:'',pcc2:'',pcc3:'',pcc4:'',pcc5:'',pcc6:'',pcc7:'',pcc8:'',
+ pcc9:'',pcca:'',pccb:'',pccc:'',pccd:'',pcce:'',pccf:'',pcd0:'',
+ pcd1:'',pcd2:'',pcd3:'',pcd4:'',pcd5:'',pcd6:'',pcd7:'',pcd8:'',
+ pcd9:'',pcda:'',pcdb:'',pcdc:'',pcdd:'',pcde:'',pcdf:'',pce0:'',
+ pce1:'',pce2:'',pce3:'',pce4:'',pce5:'',pce6:'',pce7:'',pce8:'',
+ pce9:'',pcea:'',pceb:'',pcec:'',pced:'',pcee:'',pcef:'',pcf0:'',
+ pcf1:'',pcf2:'',pcf3:'',pcf4:'',pcf5:'',pcf6:'',pcf7:'',pcf8:'',
+ pcf9:'',pcfa:'',pcfb:'',pcfc:'',pcfd:'',pcfe:'',pcff:'',pd00:'',
+ pd01:'',pd02:'',pd03:'',pd04:'',pd05:'',pd06:'',pd07:'',pd08:'',
+ pd09:'',pd0a:'',pd0b:'',pd0c:'',pd0d:'',pd0e:'',pd0f:'',pd10:'',
+ pd11:'',pd12:'',pd13:'',pd14:'',pd15:'',pd16:'',pd17:'',pd18:'',
+ pd19:'',pd1a:'',pd1b:'',pd1c:'',pd1d:'',pd1e:'',pd1f:'',pd20:'',
+ pd21:'',pd22:'',pd23:'',pd24:'',pd25:'',pd26:'',pd27:'',pd28:'',
+ pd29:'',pd2a:'',pd2b:'',pd2c:'',pd2d:'',pd2e:'',pd2f:'',pd30:'',
+ pd31:'',pd32:'',pd33:'',pd34:'',pd35:'',pd36:'',pd37:'',pd38:'',
+ pd39:'',pd3a:'',pd3b:'',pd3c:'',pd3d:'',pd3e:'',pd3f:'',pd40:'',
+ pd41:'',pd42:'',pd43:'',pd44:'',pd45:'',pd46:'',pd47:'',pd48:'',
+ pd49:'',pd4a:'',pd4b:'',pd4c:'',pd4d:'',pd4e:'',pd4f:'',pd50:'',
+ pd51:'',pd52:'',pd53:'',pd54:'',pd55:'',pd56:'',pd57:'',pd58:'',
+ pd59:'',pd5a:'',pd5b:'',pd5c:'',pd5d:'',pd5e:'',pd5f:'',pd60:'',
+ pd61:'',pd62:'',pd63:'',pd64:'',pd65:'',pd66:'',pd67:'',pd68:'',
+ pd69:'',pd6a:'',pd6b:'',pd6c:'',pd6d:'',pd6e:'',pd6f:'',pd70:'',
+ pd71:'',pd72:'',pd73:'',pd74:'',pd75:'',pd76:'',pd77:'',pd78:'',
+ pd79:'',pd7a:'',pd7b:'',pd7c:'',pd7d:'',pd7e:'',pd7f:'',pd80:'',
+ pd81:'',pd82:'',pd83:'',pd84:'',pd85:'',pd86:'',pd87:'',pd88:'',
+ pd89:'',pd8a:'',pd8b:'',pd8c:'',pd8d:'',pd8e:'',pd8f:'',pd90:'',
+ pd91:'',pd92:'',pd93:'',pd94:'',pd95:'',pd96:'',pd97:'',pd98:'',
+ pd99:'',pd9a:'',pd9b:'',pd9c:'',pd9d:'',pd9e:'',pd9f:'',pda0:'',
+ pda1:'',pda2:'',pda3:'',pda4:'',pda5:'',pda6:'',pda7:'',pda8:'',
+ pda9:'',pdaa:'',pdab:'',pdac:'',pdad:'',pdae:'',pdaf:'',pdb0:'',
+ pdb1:'',pdb2:'',pdb3:'',pdb4:'',pdb5:'',pdb6:'',pdb7:'',pdb8:'',
+ pdb9:'',pdba:'',pdbb:'',pdbc:'',pdbd:'',pdbe:'',pdbf:'',pdc0:'',
+ pdc1:'',pdc2:'',pdc3:'',pdc4:'',pdc5:'',pdc6:'',pdc7:'',pdc8:'',
+ pdc9:'',pdca:'',pdcb:'',pdcc:'',pdcd:'',pdce:'',pdcf:'',pdd0:'',
+ pdd1:'',pdd2:'',pdd3:'',pdd4:'',pdd5:'',pdd6:'',pdd7:'',pdd8:'',
+ pdd9:'',pdda:'',pddb:'',pddc:'',pddd:'',pdde:'',pddf:'',pde0:'',
+ pde1:'',pde2:'',pde3:'',pde4:'',pde5:'',pde6:'',pde7:'',pde8:'',
+ pde9:'',pdea:'',pdeb:'',pdec:'',pded:'',pdee:'',pdef:'',pdf0:'',
+ pdf1:'',pdf2:'',pdf3:'',pdf4:'',pdf5:'',pdf6:'',pdf7:'',pdf8:'',
+ pdf9:'',pdfa:'',pdfb:'',pdfc:'',pdfd:'',pdfe:'',pdff:'',pe00:'',
+ pe01:'',pe02:'',pe03:'',pe04:'',pe05:'',pe06:'',pe07:'',pe08:'',
+ pe09:'',pe0a:'',pe0b:'',pe0c:'',pe0d:'',pe0e:'',pe0f:'',pe10:'',
+ pe11:'',pe12:'',pe13:'',pe14:'',pe15:'',pe16:'',pe17:'',pe18:'',
+ pe19:'',pe1a:'',pe1b:'',pe1c:'',pe1d:'',pe1e:'',pe1f:'',pe20:'',
+ pe21:'',pe22:'',pe23:'',pe24:'',pe25:'',pe26:'',pe27:'',pe28:'',
+ pe29:'',pe2a:'',pe2b:'',pe2c:'',pe2d:'',pe2e:'',pe2f:'',pe30:'',
+ pe31:'',pe32:'',pe33:'',pe34:'',pe35:'',pe36:'',pe37:'',pe38:'',
+ pe39:'',pe3a:'',pe3b:'',pe3c:'',pe3d:'',pe3e:'',pe3f:'',pe40:'',
+ pe41:'',pe42:'',pe43:'',pe44:'',pe45:'',pe46:'',pe47:'',pe48:'',
+ pe49:'',pe4a:'',pe4b:'',pe4c:'',pe4d:'',pe4e:'',pe4f:'',pe50:'',
+ pe51:'',pe52:'',pe53:'',pe54:'',pe55:'',pe56:'',pe57:'',pe58:'',
+ pe59:'',pe5a:'',pe5b:'',pe5c:'',pe5d:'',pe5e:'',pe5f:'',pe60:'',
+ pe61:'',pe62:'',pe63:'',pe64:'',pe65:'',pe66:'',pe67:'',pe68:'',
+ pe69:'',pe6a:'',pe6b:'',pe6c:'',pe6d:'',pe6e:'',pe6f:'',pe70:'',
+ pe71:'',pe72:'',pe73:'',pe74:'',pe75:'',pe76:'',pe77:'',pe78:'',
+ pe79:'',pe7a:'',pe7b:'',pe7c:'',pe7d:'',pe7e:'',pe7f:'',pe80:'',
+ pe81:'',pe82:'',pe83:'',pe84:'',pe85:'',pe86:'',pe87:'',pe88:'',
+ pe89:'',pe8a:'',pe8b:'',pe8c:'',pe8d:'',pe8e:'',pe8f:'',pe90:'',
+ pe91:'',pe92:'',pe93:'',pe94:'',pe95:'',pe96:'',pe97:'',pe98:'',
+ pe99:'',pe9a:'',pe9b:'',pe9c:'',pe9d:'',pe9e:'',pe9f:'',pea0:'',
+ pea1:'',pea2:'',pea3:'',pea4:'',pea5:'',pea6:'',pea7:'',pea8:'',
+ pea9:'',peaa:'',peab:'',peac:'',pead:'',peae:'',peaf:'',peb0:'',
+ peb1:'',peb2:'',peb3:'',peb4:'',peb5:'',peb6:'',peb7:'',peb8:'',
+ peb9:'',peba:'',pebb:'',pebc:'',pebd:'',pebe:'',pebf:'',pec0:'',
+ pec1:'',pec2:'',pec3:'',pec4:'',pec5:'',pec6:'',pec7:'',pec8:'',
+ pec9:'',peca:'',pecb:'',pecc:'',pecd:'',pece:'',pecf:'',ped0:'',
+ ped1:'',ped2:'',ped3:'',ped4:'',ped5:'',ped6:'',ped7:'',ped8:'',
+ ped9:'',peda:'',pedb:'',pedc:'',pedd:'',pede:'',pedf:'',pee0:'',
+ pee1:'',pee2:'',pee3:'',pee4:'',pee5:'',pee6:'',pee7:'',pee8:'',
+ pee9:'',peea:'',peeb:'',peec:'',peed:'',peee:'',peef:'',pef0:'',
+ pef1:'',pef2:'',pef3:'',pef4:'',pef5:'',pef6:'',pef7:'',pef8:'',
+ pef9:'',pefa:'',pefb:'',pefc:'',pefd:'',pefe:'',peff:'',pf00:'',
+ pf01:'',pf02:'',pf03:'',pf04:'',pf05:'',pf06:'',pf07:'',pf08:'',
+ pf09:'',pf0a:'',pf0b:'',pf0c:'',pf0d:'',pf0e:'',pf0f:'',pf10:'',
+ pf11:'',pf12:'',pf13:'',pf14:'',pf15:'',pf16:'',pf17:'',pf18:'',
+ pf19:'',pf1a:'',pf1b:'',pf1c:'',pf1d:'',pf1e:'',pf1f:'',pf20:'',
+ pf21:'',pf22:'',pf23:'',pf24:'',pf25:'',pf26:'',pf27:'',pf28:'',
+ pf29:'',pf2a:'',pf2b:'',pf2c:'',pf2d:'',pf2e:'',pf2f:'',pf30:'',
+ pf31:'',pf32:'',pf33:'',pf34:'',pf35:'',pf36:'',pf37:'',pf38:'',
+ pf39:'',pf3a:'',pf3b:'',pf3c:'',pf3d:'',pf3e:'',pf3f:'',pf40:'',
+ pf41:'',pf42:'',pf43:'',pf44:'',pf45:'',pf46:'',pf47:'',pf48:'',
+ pf49:'',pf4a:'',pf4b:'',pf4c:'',pf4d:'',pf4e:'',pf4f:'',pf50:'',
+ pf51:'',pf52:'',pf53:'',pf54:'',pf55:'',pf56:'',pf57:'',pf58:'',
+ pf59:'',pf5a:'',pf5b:'',pf5c:'',pf5d:'',pf5e:'',pf5f:'',pf60:'',
+ pf61:'',pf62:'',pf63:'',pf64:'',pf65:'',pf66:'',pf67:'',pf68:'',
+ pf69:'',pf6a:'',pf6b:'',pf6c:'',pf6d:'',pf6e:'',pf6f:'',pf70:'',
+ pf71:'',pf72:'',pf73:'',pf74:'',pf75:'',pf76:'',pf77:'',pf78:'',
+ pf79:'',pf7a:'',pf7b:'',pf7c:'',pf7d:'',pf7e:'',pf7f:'',pf80:'',
+ pf81:'',pf82:'',pf83:'',pf84:'',pf85:'',pf86:'',pf87:'',pf88:'',
+ pf89:'',pf8a:'',pf8b:'',pf8c:'',pf8d:'',pf8e:'',pf8f:'',pf90:'',
+ pf91:'',pf92:'',pf93:'',pf94:'',pf95:'',pf96:'',pf97:'',pf98:'',
+ pf99:'',pf9a:'',pf9b:'',pf9c:'',pf9d:'',pf9e:'',pf9f:'',pfa0:'',
+ pfa1:'',pfa2:'',pfa3:'',pfa4:'',pfa5:'',pfa6:'',pfa7:'',pfa8:'',
+ pfa9:'',pfaa:'',pfab:'',pfac:'',pfad:'',pfae:'',pfaf:'',pfb0:'',
+ pfb1:'',pfb2:'',pfb3:'',pfb4:'',pfb5:'',pfb6:'',pfb7:'',pfb8:'',
+ pfb9:'',pfba:'',pfbb:'',pfbc:'',pfbd:'',pfbe:'',pfbf:'',pfc0:'',
+ pfc1:'',pfc2:'',pfc3:'',pfc4:'',pfc5:'',pfc6:'',pfc7:'',pfc8:'',
+ pfc9:'',pfca:'',pfcb:'',pfcc:'',pfcd:'',pfce:'',pfcf:'',pfd0:'',
+ pfd1:'',pfd2:'',pfd3:'',pfd4:'',pfd5:'',pfd6:'',pfd7:'',pfd8:'',
+ pfd9:'',pfda:'',pfdb:'',pfdc:'',pfdd:'',pfde:'',pfdf:'',pfe0:'',
+ pfe1:'',pfe2:'',pfe3:'',pfe4:'',pfe5:'',pfe6:'',pfe7:'',pfe8:'',
+ pfe9:'',pfea:'',pfeb:'',pfec:'',pfed:'',pfee:'',pfef:'',pff0:'',
+ pff1:'',pff2:'',pff3:'',pff4:'',pff5:'',pff6:'',pff7:'',pff8:'',
+ pff9:'',pffa:'',pffb:'',pffc:'',pffd:'',pffe:'',pfff:'',p1000:'',
+ p1001:'',p1002:'',p1003:'',p1004:'',p1005:'',p1006:'',p1007:'',p1008:'',
+ p1009:'',p100a:'',p100b:'',p100c:'',p100d:'',p100e:'',p100f:'',p1010:'',
+ p1011:'',p1012:'',p1013:'',p1014:'',p1015:'',p1016:'',p1017:'',p1018:'',
+ p1019:'',p101a:'',p101b:'',p101c:'',p101d:'',p101e:'',p101f:'',p1020:'',
+ p1021:'',p1022:'',p1023:'',p1024:'',p1025:'',p1026:'',p1027:'',p1028:'',
+ p1029:'',p102a:'',p102b:'',p102c:'',p102d:'',p102e:'',p102f:'',p1030:'',
+ p1031:'',p1032:'',p1033:'',p1034:'',p1035:'',p1036:'',p1037:'',p1038:'',
+ p1039:'',p103a:'',p103b:'',p103c:'',p103d:'',p103e:'',p103f:'',p1040:'',
+ p1041:'',p1042:'',p1043:'',p1044:'',p1045:'',p1046:'',p1047:'',p1048:'',
+ p1049:'',p104a:'',p104b:'',p104c:'',p104d:'',p104e:'',p104f:'',p1050:'',
+ p1051:'',p1052:'',p1053:'',p1054:'',p1055:'',p1056:'',p1057:'',p1058:'',
+ p1059:'',p105a:'',p105b:'',p105c:'',p105d:'',p105e:'',p105f:'',p1060:'',
+ p1061:'',p1062:'',p1063:'',p1064:'',p1065:'',p1066:'',p1067:'',p1068:'',
+ p1069:'',p106a:'',p106b:'',p106c:'',p106d:'',p106e:'',p106f:'',p1070:'',
+ p1071:'',p1072:'',p1073:'',p1074:'',p1075:'',p1076:'',p1077:'',p1078:'',
+ p1079:'',p107a:'',p107b:'',p107c:'',p107d:'',p107e:'',p107f:'',p1080:'',
+ p1081:'',p1082:'',p1083:'',p1084:'',p1085:'',p1086:'',p1087:'',p1088:'',
+ p1089:'',p108a:'',p108b:'',p108c:'',p108d:'',p108e:'',p108f:'',p1090:'',
+ p1091:'',p1092:'',p1093:'',p1094:'',p1095:'',p1096:'',p1097:'',p1098:'',
+ p1099:'',p109a:'',p109b:'',p109c:'',p109d:'',p109e:'',p109f:'',p10a0:'',
+ p10a1:'',p10a2:'',p10a3:'',p10a4:'',p10a5:'',p10a6:'',p10a7:'',p10a8:'',
+ p10a9:'',p10aa:'',p10ab:'',p10ac:'',p10ad:'',p10ae:'',p10af:'',p10b0:'',
+ p10b1:'',p10b2:'',p10b3:'',p10b4:'',p10b5:'',p10b6:'',p10b7:'',p10b8:'',
+ p10b9:'',p10ba:'',p10bb:'',p10bc:'',p10bd:'',p10be:'',p10bf:'',p10c0:'',
+ p10c1:'',p10c2:'',p10c3:'',p10c4:'',p10c5:'',p10c6:'',p10c7:'',p10c8:'',
+ p10c9:'',p10ca:'',p10cb:'',p10cc:'',p10cd:'',p10ce:'',p10cf:'',p10d0:'',
+ p10d1:'',p10d2:'',p10d3:'',p10d4:'',p10d5:'',p10d6:'',p10d7:'',p10d8:'',
+ p10d9:'',p10da:'',p10db:'',p10dc:'',p10dd:'',p10de:'',p10df:'',p10e0:'',
+ p10e1:'',p10e2:'',p10e3:'',p10e4:'',p10e5:'',p10e6:'',p10e7:'',p10e8:'',
+ p10e9:'',p10ea:'',p10eb:'',p10ec:'',p10ed:'',p10ee:'',p10ef:'',p10f0:'',
+ p10f1:'',p10f2:'',p10f3:'',p10f4:'',p10f5:'',p10f6:'',p10f7:'',p10f8:'',
+ p10f9:'',p10fa:'',p10fb:'',p10fc:'',p10fd:'',p10fe:'',p10ff:'',p1100:'',
+ p1101:'',p1102:'',p1103:'',p1104:'',p1105:'',p1106:'',p1107:'',p1108:'',
+ p1109:'',p110a:'',p110b:'',p110c:'',p110d:'',p110e:'',p110f:'',p1110:'',
+ p1111:'',p1112:'',p1113:'',p1114:'',p1115:'',p1116:'',p1117:'',p1118:'',
+ p1119:'',p111a:'',p111b:'',p111c:'',p111d:'',p111e:'',p111f:'',p1120:'',
+ p1121:'',p1122:'',p1123:'',p1124:'',p1125:'',p1126:'',p1127:'',p1128:'',
+ p1129:'',p112a:'',p112b:'',p112c:'',p112d:'',p112e:'',p112f:'',p1130:'',
+ p1131:'',p1132:'',p1133:'',p1134:'',p1135:'',p1136:'',p1137:'',p1138:'',
+ p1139:'',p113a:'',p113b:'',p113c:'',p113d:'',p113e:'',p113f:'',p1140:'',
+ p1141:'',p1142:'',p1143:'',p1144:'',p1145:'',p1146:'',p1147:'',p1148:'',
+ p1149:'',p114a:'',p114b:'',p114c:'',p114d:'',p114e:'',p114f:'',p1150:'',
+ p1151:'',p1152:'',p1153:'',p1154:'',p1155:'',p1156:'',p1157:'',p1158:'',
+ p1159:'',p115a:'',p115b:'',p115c:'',p115d:'',p115e:'',p115f:'',p1160:'',
+ p1161:'',p1162:'',p1163:'',p1164:'',p1165:'',p1166:'',p1167:'',p1168:'',
+ p1169:'',p116a:'',p116b:'',p116c:'',p116d:'',p116e:'',p116f:'',p1170:'',
+ p1171:'',p1172:'',p1173:'',p1174:'',p1175:'',p1176:'',p1177:'',p1178:'',
+ p1179:'',p117a:'',p117b:'',p117c:'',p117d:'',p117e:'',p117f:'',p1180:'',
+ p1181:'',p1182:'',p1183:'',p1184:'',p1185:'',p1186:'',p1187:'',p1188:'',
+ p1189:'',p118a:'',p118b:'',p118c:'',p118d:'',p118e:'',p118f:'',p1190:'',
+ p1191:'',p1192:'',p1193:'',p1194:'',p1195:'',p1196:'',p1197:'',p1198:'',
+ p1199:'',p119a:'',p119b:'',p119c:'',p119d:'',p119e:'',p119f:'',p11a0:'',
+ p11a1:'',p11a2:'',p11a3:'',p11a4:'',p11a5:'',p11a6:'',p11a7:'',p11a8:'',
+ p11a9:'',p11aa:'',p11ab:'',p11ac:'',p11ad:'',p11ae:'',p11af:'',p11b0:'',
+ p11b1:'',p11b2:'',p11b3:'',p11b4:'',p11b5:'',p11b6:'',p11b7:'',p11b8:'',
+ p11b9:'',p11ba:'',p11bb:'',p11bc:'',p11bd:'',p11be:'',p11bf:'',p11c0:'',
+ p11c1:'',p11c2:'',p11c3:'',p11c4:'',p11c5:'',p11c6:'',p11c7:'',p11c8:'',
+ p11c9:'',p11ca:'',p11cb:'',p11cc:'',p11cd:'',p11ce:'',p11cf:'',p11d0:'',
+ p11d1:'',p11d2:'',p11d3:'',p11d4:'',p11d5:'',p11d6:'',p11d7:'',p11d8:'',
+ p11d9:'',p11da:'',p11db:'',p11dc:'',p11dd:'',p11de:'',p11df:'',p11e0:'',
+ p11e1:'',p11e2:'',p11e3:'',p11e4:'',p11e5:'',p11e6:'',p11e7:'',p11e8:'',
+ p11e9:'',p11ea:'',p11eb:'',p11ec:'',p11ed:'',p11ee:'',p11ef:'',p11f0:'',
+ p11f1:'',p11f2:'',p11f3:'',p11f4:'',p11f5:'',p11f6:'',p11f7:'',p11f8:'',
+ p11f9:'',p11fa:'',p11fb:'',p11fc:'',p11fd:'',p11fe:'',p11ff:'',p1200:'',
+ p1201:'',p1202:'',p1203:'',p1204:'',p1205:'',p1206:'',p1207:'',p1208:'',
+ p1209:'',p120a:'',p120b:'',p120c:'',p120d:'',p120e:'',p120f:'',p1210:'',
+ p1211:'',p1212:'',p1213:'',p1214:'',p1215:'',p1216:'',p1217:'',p1218:'',
+ p1219:'',p121a:'',p121b:'',p121c:'',p121d:'',p121e:'',p121f:'',p1220:'',
+ p1221:'',p1222:'',p1223:'',p1224:'',p1225:'',p1226:'',p1227:'',p1228:'',
+ p1229:'',p122a:'',p122b:'',p122c:'',p122d:'',p122e:'',p122f:'',p1230:'',
+ p1231:'',p1232:'',p1233:'',p1234:'',p1235:'',p1236:'',p1237:'',p1238:'',
+ p1239:'',p123a:'',p123b:'',p123c:'',p123d:'',p123e:'',p123f:'',p1240:'',
+ p1241:'',p1242:'',p1243:'',p1244:'',p1245:'',p1246:'',p1247:'',p1248:'',
+ p1249:'',p124a:'',p124b:'',p124c:'',p124d:'',p124e:'',p124f:'',p1250:'',
+ p1251:'',p1252:'',p1253:'',p1254:'',p1255:'',p1256:'',p1257:'',p1258:'',
+ p1259:'',p125a:'',p125b:'',p125c:'',p125d:'',p125e:'',p125f:'',p1260:'',
+ p1261:'',p1262:'',p1263:'',p1264:'',p1265:'',p1266:'',p1267:'',p1268:'',
+ p1269:'',p126a:'',p126b:'',p126c:'',p126d:'',p126e:'',p126f:'',p1270:'',
+ p1271:'',p1272:'',p1273:'',p1274:'',p1275:'',p1276:'',p1277:'',p1278:'',
+ p1279:'',p127a:'',p127b:'',p127c:'',p127d:'',p127e:'',p127f:'',p1280:'',
+ p1281:'',p1282:'',p1283:'',p1284:'',p1285:'',p1286:'',p1287:'',p1288:'',
+ p1289:'',p128a:'',p128b:'',p128c:'',p128d:'',p128e:'',p128f:'',p1290:'',
+ p1291:'',p1292:'',p1293:'',p1294:'',p1295:'',p1296:'',p1297:'',p1298:'',
+ p1299:'',p129a:'',p129b:'',p129c:'',p129d:'',p129e:'',p129f:'',p12a0:'',
+ p12a1:'',p12a2:'',p12a3:'',p12a4:'',p12a5:'',p12a6:'',p12a7:'',p12a8:'',
+ p12a9:'',p12aa:'',p12ab:'',p12ac:'',p12ad:'',p12ae:'',p12af:'',p12b0:'',
+ p12b1:'',p12b2:'',p12b3:'',p12b4:'',p12b5:'',p12b6:'',p12b7:'',p12b8:'',
+ p12b9:'',p12ba:'',p12bb:'',p12bc:'',p12bd:'',p12be:'',p12bf:'',p12c0:'',
+ p12c1:'',p12c2:'',p12c3:'',p12c4:'',p12c5:'',p12c6:'',p12c7:'',p12c8:'',
+ p12c9:'',p12ca:'',p12cb:'',p12cc:'',p12cd:'',p12ce:'',p12cf:'',p12d0:'',
+ p12d1:'',p12d2:'',p12d3:'',p12d4:'',p12d5:'',p12d6:'',p12d7:'',p12d8:'',
+ p12d9:'',p12da:'',p12db:'',p12dc:'',p12dd:'',p12de:'',p12df:'',p12e0:'',
+ p12e1:'',p12e2:'',p12e3:'',p12e4:'',p12e5:'',p12e6:'',p12e7:'',p12e8:'',
+ p12e9:'',p12ea:'',p12eb:'',p12ec:'',p12ed:'',p12ee:'',p12ef:'',p12f0:'',
+ p12f1:'',p12f2:'',p12f3:'',p12f4:'',p12f5:'',p12f6:'',p12f7:'',p12f8:'',
+ p12f9:'',p12fa:'',p12fb:'',p12fc:'',p12fd:'',p12fe:'',p12ff:'',p1300:'',
+ p1301:'',p1302:'',p1303:'',p1304:'',p1305:'',p1306:'',p1307:'',p1308:'',
+ p1309:'',p130a:'',p130b:'',p130c:'',p130d:'',p130e:'',p130f:'',p1310:'',
+ p1311:'',p1312:'',p1313:'',p1314:'',p1315:'',p1316:'',p1317:'',p1318:'',
+ p1319:'',p131a:'',p131b:'',p131c:'',p131d:'',p131e:'',p131f:'',p1320:'',
+ p1321:'',p1322:'',p1323:'',p1324:'',p1325:'',p1326:'',p1327:'',p1328:'',
+ p1329:'',p132a:'',p132b:'',p132c:'',p132d:'',p132e:'',p132f:'',p1330:'',
+ p1331:'',p1332:'',p1333:'',p1334:'',p1335:'',p1336:'',p1337:'',p1338:'',
+ p1339:'',p133a:'',p133b:'',p133c:'',p133d:'',p133e:'',p133f:'',p1340:'',
+ p1341:'',p1342:'',p1343:'',p1344:'',p1345:'',p1346:'',p1347:'',p1348:'',
+ p1349:'',p134a:'',p134b:'',p134c:'',p134d:'',p134e:'',p134f:'',p1350:'',
+ p1351:'',p1352:'',p1353:'',p1354:'',p1355:'',p1356:'',p1357:'',p1358:'',
+ p1359:'',p135a:'',p135b:'',p135c:'',p135d:'',p135e:'',p135f:'',p1360:'',
+ p1361:'',p1362:'',p1363:'',p1364:'',p1365:'',p1366:'',p1367:'',p1368:'',
+ p1369:'',p136a:'',p136b:'',p136c:'',p136d:'',p136e:'',p136f:'',p1370:'',
+ p1371:'',p1372:'',p1373:'',p1374:'',p1375:'',p1376:'',p1377:'',p1378:'',
+ p1379:'',p137a:'',p137b:'',p137c:'',p137d:'',p137e:'',p137f:'',p1380:'',
+ p1381:'',p1382:'',p1383:'',p1384:'',p1385:'',p1386:'',p1387:'',p1388:'',
+ p1389:'',p138a:'',p138b:'',p138c:'',p138d:'',p138e:'',p138f:'',p1390:'',
+ p1391:'',p1392:'',p1393:'',p1394:'',p1395:'',p1396:'',p1397:'',p1398:'',
+ p1399:'',p139a:'',p139b:'',p139c:'',p139d:'',p139e:'',p139f:'',p13a0:'',
+ p13a1:'',p13a2:'',p13a3:'',p13a4:'',p13a5:'',p13a6:'',p13a7:'',p13a8:'',
+ p13a9:'',p13aa:'',p13ab:'',p13ac:'',p13ad:'',p13ae:'',p13af:'',p13b0:'',
+ p13b1:'',p13b2:'',p13b3:'',p13b4:'',p13b5:'',p13b6:'',p13b7:'',p13b8:'',
+ p13b9:'',p13ba:'',p13bb:'',p13bc:'',p13bd:'',p13be:'',p13bf:'',p13c0:'',
+ p13c1:'',p13c2:'',p13c3:'',p13c4:'',p13c5:'',p13c6:'',p13c7:'',p13c8:'',
+ p13c9:'',p13ca:'',p13cb:'',p13cc:'',p13cd:'',p13ce:'',p13cf:'',p13d0:'',
+ p13d1:'',p13d2:'',p13d3:'',p13d4:'',p13d5:'',p13d6:'',p13d7:'',p13d8:'',
+ p13d9:'',p13da:'',p13db:'',p13dc:'',p13dd:'',p13de:'',p13df:'',p13e0:'',
+ p13e1:'',p13e2:'',p13e3:'',p13e4:'',p13e5:'',p13e6:'',p13e7:'',p13e8:'',
+ p13e9:'',p13ea:'',p13eb:'',p13ec:'',p13ed:'',p13ee:'',p13ef:'',p13f0:'',
+ p13f1:'',p13f2:'',p13f3:'',p13f4:'',p13f5:'',p13f6:'',p13f7:'',p13f8:'',
+ p13f9:'',p13fa:'',p13fb:'',p13fc:'',p13fd:'',p13fe:'',p13ff:'',p1400:'',
+ p1401:'',p1402:'',p1403:'',p1404:'',p1405:'',p1406:'',p1407:'',p1408:'',
+ p1409:'',p140a:'',p140b:'',p140c:'',p140d:'',p140e:'',p140f:'',p1410:'',
+ p1411:'',p1412:'',p1413:'',p1414:'',p1415:'',p1416:'',p1417:'',p1418:'',
+ p1419:'',p141a:'',p141b:'',p141c:'',p141d:'',p141e:'',p141f:'',p1420:'',
+ p1421:'',p1422:'',p1423:'',p1424:'',p1425:'',p1426:'',p1427:'',p1428:'',
+ p1429:'',p142a:'',p142b:'',p142c:'',p142d:'',p142e:'',p142f:'',p1430:'',
+ p1431:'',p1432:'',p1433:'',p1434:'',p1435:'',p1436:'',p1437:'',p1438:'',
+ p1439:'',p143a:'',p143b:'',p143c:'',p143d:'',p143e:'',p143f:'',p1440:'',
+ p1441:'',p1442:'',p1443:'',p1444:'',p1445:'',p1446:'',p1447:'',p1448:'',
+ p1449:'',p144a:'',p144b:'',p144c:'',p144d:'',p144e:'',p144f:'',p1450:'',
+ p1451:'',p1452:'',p1453:'',p1454:'',p1455:'',p1456:'',p1457:'',p1458:'',
+ p1459:'',p145a:'',p145b:'',p145c:'',p145d:'',p145e:'',p145f:'',p1460:'',
+ p1461:'',p1462:'',p1463:'',p1464:'',p1465:'',p1466:'',p1467:'',p1468:'',
+ p1469:'',p146a:'',p146b:'',p146c:'',p146d:'',p146e:'',p146f:'',p1470:'',
+ p1471:'',p1472:'',p1473:'',p1474:'',p1475:'',p1476:'',p1477:'',p1478:'',
+ p1479:'',p147a:'',p147b:'',p147c:'',p147d:'',p147e:'',p147f:'',p1480:'',
+ p1481:'',p1482:'',p1483:'',p1484:'',p1485:'',p1486:'',p1487:'',p1488:'',
+ p1489:'',p148a:'',p148b:'',p148c:'',p148d:'',p148e:'',p148f:'',p1490:'',
+ p1491:'',p1492:'',p1493:'',p1494:'',p1495:'',p1496:'',p1497:'',p1498:'',
+ p1499:'',p149a:'',p149b:'',p149c:'',p149d:'',p149e:'',p149f:'',p14a0:'',
+ p14a1:'',p14a2:'',p14a3:'',p14a4:'',p14a5:'',p14a6:'',p14a7:'',p14a8:'',
+ p14a9:'',p14aa:'',p14ab:'',p14ac:'',p14ad:'',p14ae:'',p14af:'',p14b0:'',
+ p14b1:'',p14b2:'',p14b3:'',p14b4:'',p14b5:'',p14b6:'',p14b7:'',p14b8:'',
+ p14b9:'',p14ba:'',p14bb:'',p14bc:'',p14bd:'',p14be:'',p14bf:'',p14c0:'',
+ p14c1:'',p14c2:'',p14c3:'',p14c4:'',p14c5:'',p14c6:'',p14c7:'',p14c8:'',
+ p14c9:'',p14ca:'',p14cb:'',p14cc:'',p14cd:'',p14ce:'',p14cf:'',p14d0:'',
+ p14d1:'',p14d2:'',p14d3:'',p14d4:'',p14d5:'',p14d6:'',p14d7:'',p14d8:'',
+ p14d9:'',p14da:'',p14db:'',p14dc:'',p14dd:'',p14de:'',p14df:'',p14e0:'',
+ p14e1:'',p14e2:'',p14e3:'',p14e4:'',p14e5:'',p14e6:'',p14e7:'',p14e8:'',
+ p14e9:'',p14ea:'',p14eb:'',p14ec:'',p14ed:'',p14ee:'',p14ef:'',p14f0:'',
+ p14f1:'',p14f2:'',p14f3:'',p14f4:'',p14f5:'',p14f6:'',p14f7:'',p14f8:'',
+ p14f9:'',p14fa:'',p14fb:'',p14fc:'',p14fd:'',p14fe:'',p14ff:'',p1500:'',
+ p1501:'',p1502:'',p1503:'',p1504:'',p1505:'',p1506:'',p1507:'',p1508:'',
+ p1509:'',p150a:'',p150b:'',p150c:'',p150d:'',p150e:'',p150f:'',p1510:'',
+ p1511:'',p1512:'',p1513:'',p1514:'',p1515:'',p1516:'',p1517:'',p1518:'',
+ p1519:'',p151a:'',p151b:'',p151c:'',p151d:'',p151e:'',p151f:'',p1520:'',
+ p1521:'',p1522:'',p1523:'',p1524:'',p1525:'',p1526:'',p1527:'',p1528:'',
+ p1529:'',p152a:'',p152b:'',p152c:'',p152d:'',p152e:'',p152f:'',p1530:'',
+ p1531:'',p1532:'',p1533:'',p1534:'',p1535:'',p1536:'',p1537:'',p1538:'',
+ p1539:'',p153a:'',p153b:'',p153c:'',p153d:'',p153e:'',p153f:'',p1540:'',
+ p1541:'',p1542:'',p1543:'',p1544:'',p1545:'',p1546:'',p1547:'',p1548:'',
+ p1549:'',p154a:'',p154b:'',p154c:'',p154d:'',p154e:'',p154f:'',p1550:'',
+ p1551:'',p1552:'',p1553:'',p1554:'',p1555:'',p1556:'',p1557:'',p1558:'',
+ p1559:'',p155a:'',p155b:'',p155c:'',p155d:'',p155e:'',p155f:'',p1560:'',
+ p1561:'',p1562:'',p1563:'',p1564:'',p1565:'',p1566:'',p1567:'',p1568:'',
+ p1569:'',p156a:'',p156b:'',p156c:'',p156d:'',p156e:'',p156f:'',p1570:'',
+ p1571:'',p1572:'',p1573:'',p1574:'',p1575:'',p1576:'',p1577:'',p1578:'',
+ p1579:'',p157a:'',p157b:'',p157c:'',p157d:'',p157e:'',p157f:'',p1580:'',
+ p1581:'',p1582:'',p1583:'',p1584:'',p1585:'',p1586:'',p1587:'',p1588:'',
+ p1589:'',p158a:'',p158b:'',p158c:'',p158d:'',p158e:'',p158f:'',p1590:'',
+ p1591:'',p1592:'',p1593:'',p1594:'',p1595:'',p1596:'',p1597:'',p1598:'',
+ p1599:'',p159a:'',p159b:'',p159c:'',p159d:'',p159e:'',p159f:'',p15a0:'',
+ p15a1:'',p15a2:'',p15a3:'',p15a4:'',p15a5:'',p15a6:'',p15a7:'',p15a8:'',
+ p15a9:'',p15aa:'',p15ab:'',p15ac:'',p15ad:'',p15ae:'',p15af:'',p15b0:'',
+ p15b1:'',p15b2:'',p15b3:'',p15b4:'',p15b5:'',p15b6:'',p15b7:'',p15b8:'',
+ p15b9:'',p15ba:'',p15bb:'',p15bc:'',p15bd:'',p15be:'',p15bf:'',p15c0:'',
+ p15c1:'',p15c2:'',p15c3:'',p15c4:'',p15c5:'',p15c6:'',p15c7:'',p15c8:'',
+ p15c9:'',p15ca:'',p15cb:'',p15cc:'',p15cd:'',p15ce:'',p15cf:'',p15d0:'',
+ p15d1:'',p15d2:'',p15d3:'',p15d4:'',p15d5:'',p15d6:'',p15d7:'',p15d8:'',
+ p15d9:'',p15da:'',p15db:'',p15dc:'',p15dd:'',p15de:'',p15df:'',p15e0:'',
+ p15e1:'',p15e2:'',p15e3:'',p15e4:'',p15e5:'',p15e6:'',p15e7:'',p15e8:'',
+ p15e9:'',p15ea:'',p15eb:'',p15ec:'',p15ed:'',p15ee:'',p15ef:'',p15f0:'',
+ p15f1:'',p15f2:'',p15f3:'',p15f4:'',p15f5:'',p15f6:'',p15f7:'',p15f8:'',
+ p15f9:'',p15fa:'',p15fb:'',p15fc:'',p15fd:'',p15fe:'',p15ff:'',p1600:'',
+ p1601:'',p1602:'',p1603:'',p1604:'',p1605:'',p1606:'',p1607:'',p1608:'',
+ p1609:'',p160a:'',p160b:'',p160c:'',p160d:'',p160e:'',p160f:'',p1610:'',
+ p1611:'',p1612:'',p1613:'',p1614:'',p1615:'',p1616:'',p1617:'',p1618:'',
+ p1619:'',p161a:'',p161b:'',p161c:'',p161d:'',p161e:'',p161f:'',p1620:'',
+ p1621:'',p1622:'',p1623:'',p1624:'',p1625:'',p1626:'',p1627:'',p1628:'',
+ p1629:'',p162a:'',p162b:'',p162c:'',p162d:'',p162e:'',p162f:'',p1630:'',
+ p1631:'',p1632:'',p1633:'',p1634:'',p1635:'',p1636:'',p1637:'',p1638:'',
+ p1639:'',p163a:'',p163b:'',p163c:'',p163d:'',p163e:'',p163f:'',p1640:'',
+ p1641:'',p1642:'',p1643:'',p1644:'',p1645:'',p1646:'',p1647:'',p1648:'',
+ p1649:'',p164a:'',p164b:'',p164c:'',p164d:'',p164e:'',p164f:'',p1650:'',
+ p1651:'',p1652:'',p1653:'',p1654:'',p1655:'',p1656:'',p1657:'',p1658:'',
+ p1659:'',p165a:'',p165b:'',p165c:'',p165d:'',p165e:'',p165f:'',p1660:'',
+ p1661:'',p1662:'',p1663:'',p1664:'',p1665:'',p1666:'',p1667:'',p1668:'',
+ p1669:'',p166a:'',p166b:'',p166c:'',p166d:'',p166e:'',p166f:'',p1670:'',
+ p1671:'',p1672:'',p1673:'',p1674:'',p1675:'',p1676:'',p1677:'',p1678:'',
+ p1679:'',p167a:'',p167b:'',p167c:'',p167d:'',p167e:'',p167f:'',p1680:'',
+ p1681:'',p1682:'',p1683:'',p1684:'',p1685:'',p1686:'',p1687:'',p1688:'',
+ p1689:'',p168a:'',p168b:'',p168c:'',p168d:'',p168e:'',p168f:'',p1690:'',
+ p1691:'',p1692:'',p1693:'',p1694:'',p1695:'',p1696:'',p1697:'',p1698:'',
+ p1699:'',p169a:'',p169b:'',p169c:'',p169d:'',p169e:'',p169f:'',p16a0:'',
+ p16a1:'',p16a2:'',p16a3:'',p16a4:'',p16a5:'',p16a6:'',p16a7:'',p16a8:'',
+ p16a9:'',p16aa:'',p16ab:'',p16ac:'',p16ad:'',p16ae:'',p16af:'',p16b0:'',
+ p16b1:'',p16b2:'',p16b3:'',p16b4:'',p16b5:'',p16b6:'',p16b7:'',p16b8:'',
+ p16b9:'',p16ba:'',p16bb:'',p16bc:'',p16bd:'',p16be:'',p16bf:'',p16c0:'',
+ p16c1:'',p16c2:'',p16c3:'',p16c4:'',p16c5:'',p16c6:'',p16c7:'',p16c8:'',
+ p16c9:'',p16ca:'',p16cb:'',p16cc:'',p16cd:'',p16ce:'',p16cf:'',p16d0:'',
+ p16d1:'',p16d2:'',p16d3:'',p16d4:'',p16d5:'',p16d6:'',p16d7:'',p16d8:'',
+ p16d9:'',p16da:'',p16db:'',p16dc:'',p16dd:'',p16de:'',p16df:'',p16e0:'',
+ p16e1:'',p16e2:'',p16e3:'',p16e4:'',p16e5:'',p16e6:'',p16e7:'',p16e8:'',
+ p16e9:'',p16ea:'',p16eb:'',p16ec:'',p16ed:'',p16ee:'',p16ef:'',p16f0:'',
+ p16f1:'',p16f2:'',p16f3:'',p16f4:'',p16f5:'',p16f6:'',p16f7:'',p16f8:'',
+ p16f9:'',p16fa:'',p16fb:'',p16fc:'',p16fd:'',p16fe:'',p16ff:'',p1700:'',
+ p1701:'',p1702:'',p1703:'',p1704:'',p1705:'',p1706:'',p1707:'',p1708:'',
+ p1709:'',p170a:'',p170b:'',p170c:'',p170d:'',p170e:'',p170f:'',p1710:'',
+ p1711:'',p1712:'',p1713:'',p1714:'',p1715:'',p1716:'',p1717:'',p1718:'',
+ p1719:'',p171a:'',p171b:'',p171c:'',p171d:'',p171e:'',p171f:'',p1720:'',
+ p1721:'',p1722:'',p1723:'',p1724:'',p1725:'',p1726:'',p1727:'',p1728:'',
+ p1729:'',p172a:'',p172b:'',p172c:'',p172d:'',p172e:'',p172f:'',p1730:'',
+ p1731:'',p1732:'',p1733:'',p1734:'',p1735:'',p1736:'',p1737:'',p1738:'',
+ p1739:'',p173a:'',p173b:'',p173c:'',p173d:'',p173e:'',p173f:'',p1740:'',
+ p1741:'',p1742:'',p1743:'',p1744:'',p1745:'',p1746:'',p1747:'',p1748:'',
+ p1749:'',p174a:'',p174b:'',p174c:'',p174d:'',p174e:'',p174f:'',p1750:'',
+ p1751:'',p1752:'',p1753:'',p1754:'',p1755:'',p1756:'',p1757:'',p1758:'',
+ p1759:'',p175a:'',p175b:'',p175c:'',p175d:'',p175e:'',p175f:'',p1760:'',
+ p1761:'',p1762:'',p1763:'',p1764:'',p1765:'',p1766:'',p1767:'',p1768:'',
+ p1769:'',p176a:'',p176b:'',p176c:'',p176d:'',p176e:'',p176f:'',p1770:'',
+ p1771:'',p1772:'',p1773:'',p1774:'',p1775:'',p1776:'',p1777:'',p1778:'',
+ p1779:'',p177a:'',p177b:'',p177c:'',p177d:'',p177e:'',p177f:'',p1780:'',
+ p1781:'',p1782:'',p1783:'',p1784:'',p1785:'',p1786:'',p1787:'',p1788:'',
+ p1789:'',p178a:'',p178b:'',p178c:'',p178d:'',p178e:'',p178f:'',p1790:'',
+ p1791:'',p1792:'',p1793:'',p1794:'',p1795:'',p1796:'',p1797:'',p1798:'',
+ p1799:'',p179a:'',p179b:'',p179c:'',p179d:'',p179e:'',p179f:'',p17a0:'',
+ p17a1:'',p17a2:'',p17a3:'',p17a4:'',p17a5:'',p17a6:'',p17a7:'',p17a8:'',
+ p17a9:'',p17aa:'',p17ab:'',p17ac:'',p17ad:'',p17ae:'',p17af:'',p17b0:'',
+ p17b1:'',p17b2:'',p17b3:'',p17b4:'',p17b5:'',p17b6:'',p17b7:'',p17b8:'',
+ p17b9:'',p17ba:'',p17bb:'',p17bc:'',p17bd:'',p17be:'',p17bf:'',p17c0:'',
+ p17c1:'',p17c2:'',p17c3:'',p17c4:'',p17c5:'',p17c6:'',p17c7:'',p17c8:'',
+ p17c9:'',p17ca:'',p17cb:'',p17cc:'',p17cd:'',p17ce:'',p17cf:'',p17d0:'',
+ p17d1:'',p17d2:'',p17d3:'',p17d4:'',p17d5:'',p17d6:'',p17d7:'',p17d8:'',
+ p17d9:'',p17da:'',p17db:'',p17dc:'',p17dd:'',p17de:'',p17df:'',p17e0:'',
+ p17e1:'',p17e2:'',p17e3:'',p17e4:'',p17e5:'',p17e6:'',p17e7:'',p17e8:'',
+ p17e9:'',p17ea:'',p17eb:'',p17ec:'',p17ed:'',p17ee:'',p17ef:'',p17f0:'',
+ p17f1:'',p17f2:'',p17f3:'',p17f4:'',p17f5:'',p17f6:'',p17f7:'',p17f8:'',
+ p17f9:'',p17fa:'',p17fb:'',p17fc:'',p17fd:'',p17fe:'',p17ff:'',p1800:'',
+ p1801:'',p1802:'',p1803:'',p1804:'',p1805:'',p1806:'',p1807:'',p1808:'',
+ p1809:'',p180a:'',p180b:'',p180c:'',p180d:'',p180e:'',p180f:'',p1810:'',
+ p1811:'',p1812:'',p1813:'',p1814:'',p1815:'',p1816:'',p1817:'',p1818:'',
+ p1819:'',p181a:'',p181b:'',p181c:'',p181d:'',p181e:'',p181f:'',p1820:'',
+ p1821:'',p1822:'',p1823:'',p1824:'',p1825:'',p1826:'',p1827:'',p1828:'',
+ p1829:'',p182a:'',p182b:'',p182c:'',p182d:'',p182e:'',p182f:'',p1830:'',
+ p1831:'',p1832:'',p1833:'',p1834:'',p1835:'',p1836:'',p1837:'',p1838:'',
+ p1839:'',p183a:'',p183b:'',p183c:'',p183d:'',p183e:'',p183f:'',p1840:'',
+ p1841:'',p1842:'',p1843:'',p1844:'',p1845:'',p1846:'',p1847:'',p1848:'',
+ p1849:'',p184a:'',p184b:'',p184c:'',p184d:'',p184e:'',p184f:'',p1850:'',
+ p1851:'',p1852:'',p1853:'',p1854:'',p1855:'',p1856:'',p1857:'',p1858:'',
+ p1859:'',p185a:'',p185b:'',p185c:'',p185d:'',p185e:'',p185f:'',p1860:'',
+ p1861:'',p1862:'',p1863:'',p1864:'',p1865:'',p1866:'',p1867:'',p1868:'',
+ p1869:'',p186a:'',p186b:'',p186c:'',p186d:'',p186e:'',p186f:'',p1870:'',
+ p1871:'',p1872:'',p1873:'',p1874:'',p1875:'',p1876:'',p1877:'',p1878:'',
+ p1879:'',p187a:'',p187b:'',p187c:'',p187d:'',p187e:'',p187f:'',p1880:'',
+ p1881:'',p1882:'',p1883:'',p1884:'',p1885:'',p1886:'',p1887:'',p1888:'',
+ p1889:'',p188a:'',p188b:'',p188c:'',p188d:'',p188e:'',p188f:'',p1890:'',
+ p1891:'',p1892:'',p1893:'',p1894:'',p1895:'',p1896:'',p1897:'',p1898:'',
+ p1899:'',p189a:'',p189b:'',p189c:'',p189d:'',p189e:'',p189f:'',p18a0:'',
+ p18a1:'',p18a2:'',p18a3:'',p18a4:'',p18a5:'',p18a6:'',p18a7:'',p18a8:'',
+ p18a9:'',p18aa:'',p18ab:'',p18ac:'',p18ad:'',p18ae:'',p18af:'',p18b0:'',
+ p18b1:'',p18b2:'',p18b3:'',p18b4:'',p18b5:'',p18b6:'',p18b7:'',p18b8:'',
+ p18b9:'',p18ba:'',p18bb:'',p18bc:'',p18bd:'',p18be:'',p18bf:'',p18c0:'',
+ p18c1:'',p18c2:'',p18c3:'',p18c4:'',p18c5:'',p18c6:'',p18c7:'',p18c8:'',
+ p18c9:'',p18ca:'',p18cb:'',p18cc:'',p18cd:'',p18ce:'',p18cf:'',p18d0:'',
+ p18d1:'',p18d2:'',p18d3:'',p18d4:'',p18d5:'',p18d6:'',p18d7:'',p18d8:'',
+ p18d9:'',p18da:'',p18db:'',p18dc:'',p18dd:'',p18de:'',p18df:'',p18e0:'',
+ p18e1:'',p18e2:'',p18e3:'',p18e4:'',p18e5:'',p18e6:'',p18e7:'',p18e8:'',
+ p18e9:'',p18ea:'',p18eb:'',p18ec:'',p18ed:'',p18ee:'',p18ef:'',p18f0:'',
+ p18f1:'',p18f2:'',p18f3:'',p18f4:'',p18f5:'',p18f6:'',p18f7:'',p18f8:'',
+ p18f9:'',p18fa:'',p18fb:'',p18fc:'',p18fd:'',p18fe:'',p18ff:'',p1900:'',
+ p1901:'',p1902:'',p1903:'',p1904:'',p1905:'',p1906:'',p1907:'',p1908:'',
+ p1909:'',p190a:'',p190b:'',p190c:'',p190d:'',p190e:'',p190f:'',p1910:'',
+ p1911:'',p1912:'',p1913:'',p1914:'',p1915:'',p1916:'',p1917:'',p1918:'',
+ p1919:'',p191a:'',p191b:'',p191c:'',p191d:'',p191e:'',p191f:'',p1920:'',
+ p1921:'',p1922:'',p1923:'',p1924:'',p1925:'',p1926:'',p1927:'',p1928:'',
+ p1929:'',p192a:'',p192b:'',p192c:'',p192d:'',p192e:'',p192f:'',p1930:'',
+ p1931:'',p1932:'',p1933:'',p1934:'',p1935:'',p1936:'',p1937:'',p1938:'',
+ p1939:'',p193a:'',p193b:'',p193c:'',p193d:'',p193e:'',p193f:'',p1940:'',
+ p1941:'',p1942:'',p1943:'',p1944:'',p1945:'',p1946:'',p1947:'',p1948:'',
+ p1949:'',p194a:'',p194b:'',p194c:'',p194d:'',p194e:'',p194f:'',p1950:'',
+ p1951:'',p1952:'',p1953:'',p1954:'',p1955:'',p1956:'',p1957:'',p1958:'',
+ p1959:'',p195a:'',p195b:'',p195c:'',p195d:'',p195e:'',p195f:'',p1960:'',
+ p1961:'',p1962:'',p1963:'',p1964:'',p1965:'',p1966:'',p1967:'',p1968:'',
+ p1969:'',p196a:'',p196b:'',p196c:'',p196d:'',p196e:'',p196f:'',p1970:'',
+ p1971:'',p1972:'',p1973:'',p1974:'',p1975:'',p1976:'',p1977:'',p1978:'',
+ p1979:'',p197a:'',p197b:'',p197c:'',p197d:'',p197e:'',p197f:'',p1980:'',
+ p1981:'',p1982:'',p1983:'',p1984:'',p1985:'',p1986:'',p1987:'',p1988:'',
+ p1989:'',p198a:'',p198b:'',p198c:'',p198d:'',p198e:'',p198f:'',p1990:'',
+ p1991:'',p1992:'',p1993:'',p1994:'',p1995:'',p1996:'',p1997:'',p1998:'',
+ p1999:'',p199a:'',p199b:'',p199c:'',p199d:'',p199e:'',p199f:'',p19a0:'',
+ p19a1:'',p19a2:'',p19a3:'',p19a4:'',p19a5:'',p19a6:'',p19a7:'',p19a8:'',
+ p19a9:'',p19aa:'',p19ab:'',p19ac:'',p19ad:'',p19ae:'',p19af:'',p19b0:'',
+ p19b1:'',p19b2:'',p19b3:'',p19b4:'',p19b5:'',p19b6:'',p19b7:'',p19b8:'',
+ p19b9:'',p19ba:'',p19bb:'',p19bc:'',p19bd:'',p19be:'',p19bf:'',p19c0:'',
+ p19c1:'',p19c2:'',p19c3:'',p19c4:'',p19c5:'',p19c6:'',p19c7:'',p19c8:'',
+ p19c9:'',p19ca:'',p19cb:'',p19cc:'',p19cd:'',p19ce:'',p19cf:'',p19d0:'',
+ p19d1:'',p19d2:'',p19d3:'',p19d4:'',p19d5:'',p19d6:'',p19d7:'',p19d8:'',
+ p19d9:'',p19da:'',p19db:'',p19dc:'',p19dd:'',p19de:'',p19df:'',p19e0:'',
+ p19e1:'',p19e2:'',p19e3:'',p19e4:'',p19e5:'',p19e6:'',p19e7:'',p19e8:'',
+ p19e9:'',p19ea:'',p19eb:'',p19ec:'',p19ed:'',p19ee:'',p19ef:'',p19f0:'',
+ p19f1:'',p19f2:'',p19f3:'',p19f4:'',p19f5:'',p19f6:'',p19f7:'',p19f8:'',
+ p19f9:'',p19fa:'',p19fb:'',p19fc:'',p19fd:'',p19fe:'',p19ff:'',p1a00:'',
+ p1a01:'',p1a02:'',p1a03:'',p1a04:'',p1a05:'',p1a06:'',p1a07:'',p1a08:'',
+ p1a09:'',p1a0a:'',p1a0b:'',p1a0c:'',p1a0d:'',p1a0e:'',p1a0f:'',p1a10:'',
+ p1a11:'',p1a12:'',p1a13:'',p1a14:'',p1a15:'',p1a16:'',p1a17:'',p1a18:'',
+ p1a19:'',p1a1a:'',p1a1b:'',p1a1c:'',p1a1d:'',p1a1e:'',p1a1f:'',p1a20:'',
+ p1a21:'',p1a22:'',p1a23:'',p1a24:'',p1a25:'',p1a26:'',p1a27:'',p1a28:'',
+ p1a29:'',p1a2a:'',p1a2b:'',p1a2c:'',p1a2d:'',p1a2e:'',p1a2f:'',p1a30:'',
+ p1a31:'',p1a32:'',p1a33:'',p1a34:'',p1a35:'',p1a36:'',p1a37:'',p1a38:'',
+ p1a39:'',p1a3a:'',p1a3b:'',p1a3c:'',p1a3d:'',p1a3e:'',p1a3f:'',p1a40:'',
+ p1a41:'',p1a42:'',p1a43:'',p1a44:'',p1a45:'',p1a46:'',p1a47:'',p1a48:'',
+ p1a49:'',p1a4a:'',p1a4b:'',p1a4c:'',p1a4d:'',p1a4e:'',p1a4f:'',p1a50:'',
+ p1a51:'',p1a52:'',p1a53:'',p1a54:'',p1a55:'',p1a56:'',p1a57:'',p1a58:'',
+ p1a59:'',p1a5a:'',p1a5b:'',p1a5c:'',p1a5d:'',p1a5e:'',p1a5f:'',p1a60:'',
+ p1a61:'',p1a62:'',p1a63:'',p1a64:'',p1a65:'',p1a66:'',p1a67:'',p1a68:'',
+ p1a69:'',p1a6a:'',p1a6b:'',p1a6c:'',p1a6d:'',p1a6e:'',p1a6f:'',p1a70:'',
+ p1a71:'',p1a72:'',p1a73:'',p1a74:'',p1a75:'',p1a76:'',p1a77:'',p1a78:'',
+ p1a79:'',p1a7a:'',p1a7b:'',p1a7c:'',p1a7d:'',p1a7e:'',p1a7f:'',p1a80:'',
+ p1a81:'',p1a82:'',p1a83:'',p1a84:'',p1a85:'',p1a86:'',p1a87:'',p1a88:'',
+ p1a89:'',p1a8a:'',p1a8b:'',p1a8c:'',p1a8d:'',p1a8e:'',p1a8f:'',p1a90:'',
+ p1a91:'',p1a92:'',p1a93:'',p1a94:'',p1a95:'',p1a96:'',p1a97:'',p1a98:'',
+ p1a99:'',p1a9a:'',p1a9b:'',p1a9c:'',p1a9d:'',p1a9e:'',p1a9f:'',p1aa0:'',
+ p1aa1:'',p1aa2:'',p1aa3:'',p1aa4:'',p1aa5:'',p1aa6:'',p1aa7:'',p1aa8:'',
+ p1aa9:'',p1aaa:'',p1aab:'',p1aac:'',p1aad:'',p1aae:'',p1aaf:'',p1ab0:'',
+ p1ab1:'',p1ab2:'',p1ab3:'',p1ab4:'',p1ab5:'',p1ab6:'',p1ab7:'',p1ab8:'',
+ p1ab9:'',p1aba:'',p1abb:'',p1abc:'',p1abd:'',p1abe:'',p1abf:'',p1ac0:'',
+ p1ac1:'',p1ac2:'',p1ac3:'',p1ac4:'',p1ac5:'',p1ac6:'',p1ac7:'',p1ac8:'',
+ p1ac9:'',p1aca:'',p1acb:'',p1acc:'',p1acd:'',p1ace:'',p1acf:'',p1ad0:'',
+ p1ad1:'',p1ad2:'',p1ad3:'',p1ad4:'',p1ad5:'',p1ad6:'',p1ad7:'',p1ad8:'',
+ p1ad9:'',p1ada:'',p1adb:'',p1adc:'',p1add:'',p1ade:'',p1adf:'',p1ae0:'',
+ p1ae1:'',p1ae2:'',p1ae3:'',p1ae4:'',p1ae5:'',p1ae6:'',p1ae7:'',p1ae8:'',
+ p1ae9:'',p1aea:'',p1aeb:'',p1aec:'',p1aed:'',p1aee:'',p1aef:'',p1af0:'',
+ p1af1:'',p1af2:'',p1af3:'',p1af4:'',p1af5:'',p1af6:'',p1af7:'',p1af8:'',
+ p1af9:'',p1afa:'',p1afb:'',p1afc:'',p1afd:'',p1afe:'',p1aff:'',p1b00:'',
+ p1b01:'',p1b02:'',p1b03:'',p1b04:'',p1b05:'',p1b06:'',p1b07:'',p1b08:'',
+ p1b09:'',p1b0a:'',p1b0b:'',p1b0c:'',p1b0d:'',p1b0e:'',p1b0f:'',p1b10:'',
+ p1b11:'',p1b12:'',p1b13:'',p1b14:'',p1b15:'',p1b16:'',p1b17:'',p1b18:'',
+ p1b19:'',p1b1a:'',p1b1b:'',p1b1c:'',p1b1d:'',p1b1e:'',p1b1f:'',p1b20:'',
+ p1b21:'',p1b22:'',p1b23:'',p1b24:'',p1b25:'',p1b26:'',p1b27:'',p1b28:'',
+ p1b29:'',p1b2a:'',p1b2b:'',p1b2c:'',p1b2d:'',p1b2e:'',p1b2f:'',p1b30:'',
+ p1b31:'',p1b32:'',p1b33:'',p1b34:'',p1b35:'',p1b36:'',p1b37:'',p1b38:'',
+ p1b39:'',p1b3a:'',p1b3b:'',p1b3c:'',p1b3d:'',p1b3e:'',p1b3f:'',p1b40:'',
+ p1b41:'',p1b42:'',p1b43:'',p1b44:'',p1b45:'',p1b46:'',p1b47:'',p1b48:'',
+ p1b49:'',p1b4a:'',p1b4b:'',p1b4c:'',p1b4d:'',p1b4e:'',p1b4f:'',p1b50:'',
+ p1b51:'',p1b52:'',p1b53:'',p1b54:'',p1b55:'',p1b56:'',p1b57:'',p1b58:'',
+ p1b59:'',p1b5a:'',p1b5b:'',p1b5c:'',p1b5d:'',p1b5e:'',p1b5f:'',p1b60:'',
+ p1b61:'',p1b62:'',p1b63:'',p1b64:'',p1b65:'',p1b66:'',p1b67:'',p1b68:'',
+ p1b69:'',p1b6a:'',p1b6b:'',p1b6c:'',p1b6d:'',p1b6e:'',p1b6f:'',p1b70:'',
+ p1b71:'',p1b72:'',p1b73:'',p1b74:'',p1b75:'',p1b76:'',p1b77:'',p1b78:'',
+ p1b79:'',p1b7a:'',p1b7b:'',p1b7c:'',p1b7d:'',p1b7e:'',p1b7f:'',p1b80:'',
+ p1b81:'',p1b82:'',p1b83:'',p1b84:'',p1b85:'',p1b86:'',p1b87:'',p1b88:'',
+ p1b89:'',p1b8a:'',p1b8b:'',p1b8c:'',p1b8d:'',p1b8e:'',p1b8f:'',p1b90:'',
+ p1b91:'',p1b92:'',p1b93:'',p1b94:'',p1b95:'',p1b96:'',p1b97:'',p1b98:'',
+ p1b99:'',p1b9a:'',p1b9b:'',p1b9c:'',p1b9d:'',p1b9e:'',p1b9f:'',p1ba0:'',
+ p1ba1:'',p1ba2:'',p1ba3:'',p1ba4:'',p1ba5:'',p1ba6:'',p1ba7:'',p1ba8:'',
+ p1ba9:'',p1baa:'',p1bab:'',p1bac:'',p1bad:'',p1bae:'',p1baf:'',p1bb0:'',
+ p1bb1:'',p1bb2:'',p1bb3:'',p1bb4:'',p1bb5:'',p1bb6:'',p1bb7:'',p1bb8:'',
+ p1bb9:'',p1bba:'',p1bbb:'',p1bbc:'',p1bbd:'',p1bbe:'',p1bbf:'',p1bc0:'',
+ p1bc1:'',p1bc2:'',p1bc3:'',p1bc4:'',p1bc5:'',p1bc6:'',p1bc7:'',p1bc8:'',
+ p1bc9:'',p1bca:'',p1bcb:'',p1bcc:'',p1bcd:'',p1bce:'',p1bcf:'',p1bd0:'',
+ p1bd1:'',p1bd2:'',p1bd3:'',p1bd4:'',p1bd5:'',p1bd6:'',p1bd7:'',p1bd8:'',
+ p1bd9:'',p1bda:'',p1bdb:'',p1bdc:'',p1bdd:'',p1bde:'',p1bdf:'',p1be0:'',
+ p1be1:'',p1be2:'',p1be3:'',p1be4:'',p1be5:'',p1be6:'',p1be7:'',p1be8:'',
+ p1be9:'',p1bea:'',p1beb:'',p1bec:'',p1bed:'',p1bee:'',p1bef:'',p1bf0:'',
+ p1bf1:'',p1bf2:'',p1bf3:'',p1bf4:'',p1bf5:'',p1bf6:'',p1bf7:'',p1bf8:'',
+ p1bf9:'',p1bfa:'',p1bfb:'',p1bfc:'',p1bfd:'',p1bfe:'',p1bff:'',p1c00:'',
+ p1c01:'',p1c02:'',p1c03:'',p1c04:'',p1c05:'',p1c06:'',p1c07:'',p1c08:'',
+ p1c09:'',p1c0a:'',p1c0b:'',p1c0c:'',p1c0d:'',p1c0e:'',p1c0f:'',p1c10:'',
+ p1c11:'',p1c12:'',p1c13:'',p1c14:'',p1c15:'',p1c16:'',p1c17:'',p1c18:'',
+ p1c19:'',p1c1a:'',p1c1b:'',p1c1c:'',p1c1d:'',p1c1e:'',p1c1f:'',p1c20:'',
+ p1c21:'',p1c22:'',p1c23:'',p1c24:'',p1c25:'',p1c26:'',p1c27:'',p1c28:'',
+ p1c29:'',p1c2a:'',p1c2b:'',p1c2c:'',p1c2d:'',p1c2e:'',p1c2f:'',p1c30:'',
+ p1c31:'',p1c32:'',p1c33:'',p1c34:'',p1c35:'',p1c36:'',p1c37:'',p1c38:'',
+ p1c39:'',p1c3a:'',p1c3b:'',p1c3c:'',p1c3d:'',p1c3e:'',p1c3f:'',p1c40:'',
+ p1c41:'',p1c42:'',p1c43:'',p1c44:'',p1c45:'',p1c46:'',p1c47:'',p1c48:'',
+ p1c49:'',p1c4a:'',p1c4b:'',p1c4c:'',p1c4d:'',p1c4e:'',p1c4f:'',p1c50:'',
+ p1c51:'',p1c52:'',p1c53:'',p1c54:'',p1c55:'',p1c56:'',p1c57:'',p1c58:'',
+ p1c59:'',p1c5a:'',p1c5b:'',p1c5c:'',p1c5d:'',p1c5e:'',p1c5f:'',p1c60:'',
+ p1c61:'',p1c62:'',p1c63:'',p1c64:'',p1c65:'',p1c66:'',p1c67:'',p1c68:'',
+ p1c69:'',p1c6a:'',p1c6b:'',p1c6c:'',p1c6d:'',p1c6e:'',p1c6f:'',p1c70:'',
+ p1c71:'',p1c72:'',p1c73:'',p1c74:'',p1c75:'',p1c76:'',p1c77:'',p1c78:'',
+ p1c79:'',p1c7a:'',p1c7b:'',p1c7c:'',p1c7d:'',p1c7e:'',p1c7f:'',p1c80:'',
+ p1c81:'',p1c82:'',p1c83:'',p1c84:'',p1c85:'',p1c86:'',p1c87:'',p1c88:'',
+ p1c89:'',p1c8a:'',p1c8b:'',p1c8c:'',p1c8d:'',p1c8e:'',p1c8f:'',p1c90:'',
+ p1c91:'',p1c92:'',p1c93:'',p1c94:'',p1c95:'',p1c96:'',p1c97:'',p1c98:'',
+ p1c99:'',p1c9a:'',p1c9b:'',p1c9c:'',p1c9d:'',p1c9e:'',p1c9f:'',p1ca0:'',
+ p1ca1:'',p1ca2:'',p1ca3:'',p1ca4:'',p1ca5:'',p1ca6:'',p1ca7:'',p1ca8:'',
+ p1ca9:'',p1caa:'',p1cab:'',p1cac:'',p1cad:'',p1cae:'',p1caf:'',p1cb0:'',
+ p1cb1:'',p1cb2:'',p1cb3:'',p1cb4:'',p1cb5:'',p1cb6:'',p1cb7:'',p1cb8:'',
+ p1cb9:'',p1cba:'',p1cbb:'',p1cbc:'',p1cbd:'',p1cbe:'',p1cbf:'',p1cc0:'',
+ p1cc1:'',p1cc2:'',p1cc3:'',p1cc4:'',p1cc5:'',p1cc6:'',p1cc7:'',p1cc8:'',
+ p1cc9:'',p1cca:'',p1ccb:'',p1ccc:'',p1ccd:'',p1cce:'',p1ccf:'',p1cd0:'',
+ p1cd1:'',p1cd2:'',p1cd3:'',p1cd4:'',p1cd5:'',p1cd6:'',p1cd7:'',p1cd8:'',
+ p1cd9:'',p1cda:'',p1cdb:'',p1cdc:'',p1cdd:'',p1cde:'',p1cdf:'',p1ce0:'',
+ p1ce1:'',p1ce2:'',p1ce3:'',p1ce4:'',p1ce5:'',p1ce6:'',p1ce7:'',p1ce8:'',
+ p1ce9:'',p1cea:'',p1ceb:'',p1cec:'',p1ced:'',p1cee:'',p1cef:'',p1cf0:'',
+ p1cf1:'',p1cf2:'',p1cf3:'',p1cf4:'',p1cf5:'',p1cf6:'',p1cf7:'',p1cf8:'',
+ p1cf9:'',p1cfa:'',p1cfb:'',p1cfc:'',p1cfd:'',p1cfe:'',p1cff:'',p1d00:'',
+ p1d01:'',p1d02:'',p1d03:'',p1d04:'',p1d05:'',p1d06:'',p1d07:'',p1d08:'',
+ p1d09:'',p1d0a:'',p1d0b:'',p1d0c:'',p1d0d:'',p1d0e:'',p1d0f:'',p1d10:'',
+ p1d11:'',p1d12:'',p1d13:'',p1d14:'',p1d15:'',p1d16:'',p1d17:'',p1d18:'',
+ p1d19:'',p1d1a:'',p1d1b:'',p1d1c:'',p1d1d:'',p1d1e:'',p1d1f:'',p1d20:'',
+ p1d21:'',p1d22:'',p1d23:'',p1d24:'',p1d25:'',p1d26:'',p1d27:'',p1d28:'',
+ p1d29:'',p1d2a:'',p1d2b:'',p1d2c:'',p1d2d:'',p1d2e:'',p1d2f:'',p1d30:'',
+ p1d31:'',p1d32:'',p1d33:'',p1d34:'',p1d35:'',p1d36:'',p1d37:'',p1d38:'',
+ p1d39:'',p1d3a:'',p1d3b:'',p1d3c:'',p1d3d:'',p1d3e:'',p1d3f:'',p1d40:'',
+ p1d41:'',p1d42:'',p1d43:'',p1d44:'',p1d45:'',p1d46:'',p1d47:'',p1d48:'',
+ p1d49:'',p1d4a:'',p1d4b:'',p1d4c:'',p1d4d:'',p1d4e:'',p1d4f:'',p1d50:'',
+ p1d51:'',p1d52:'',p1d53:'',p1d54:'',p1d55:'',p1d56:'',p1d57:'',p1d58:'',
+ p1d59:'',p1d5a:'',p1d5b:'',p1d5c:'',p1d5d:'',p1d5e:'',p1d5f:'',p1d60:'',
+ p1d61:'',p1d62:'',p1d63:'',p1d64:'',p1d65:'',p1d66:'',p1d67:'',p1d68:'',
+ p1d69:'',p1d6a:'',p1d6b:'',p1d6c:'',p1d6d:'',p1d6e:'',p1d6f:'',p1d70:'',
+ p1d71:'',p1d72:'',p1d73:'',p1d74:'',p1d75:'',p1d76:'',p1d77:'',p1d78:'',
+ p1d79:'',p1d7a:'',p1d7b:'',p1d7c:'',p1d7d:'',p1d7e:'',p1d7f:'',p1d80:'',
+ p1d81:'',p1d82:'',p1d83:'',p1d84:'',p1d85:'',p1d86:'',p1d87:'',p1d88:'',
+ p1d89:'',p1d8a:'',p1d8b:'',p1d8c:'',p1d8d:'',p1d8e:'',p1d8f:'',p1d90:'',
+ p1d91:'',p1d92:'',p1d93:'',p1d94:'',p1d95:'',p1d96:'',p1d97:'',p1d98:'',
+ p1d99:'',p1d9a:'',p1d9b:'',p1d9c:'',p1d9d:'',p1d9e:'',p1d9f:'',p1da0:'',
+ p1da1:'',p1da2:'',p1da3:'',p1da4:'',p1da5:'',p1da6:'',p1da7:'',p1da8:'',
+ p1da9:'',p1daa:'',p1dab:'',p1dac:'',p1dad:'',p1dae:'',p1daf:'',p1db0:'',
+ p1db1:'',p1db2:'',p1db3:'',p1db4:'',p1db5:'',p1db6:'',p1db7:'',p1db8:'',
+ p1db9:'',p1dba:'',p1dbb:'',p1dbc:'',p1dbd:'',p1dbe:'',p1dbf:'',p1dc0:'',
+ p1dc1:'',p1dc2:'',p1dc3:'',p1dc4:'',p1dc5:'',p1dc6:'',p1dc7:'',p1dc8:'',
+ p1dc9:'',p1dca:'',p1dcb:'',p1dcc:'',p1dcd:'',p1dce:'',p1dcf:'',p1dd0:'',
+ p1dd1:'',p1dd2:'',p1dd3:'',p1dd4:'',p1dd5:'',p1dd6:'',p1dd7:'',p1dd8:'',
+ p1dd9:'',p1dda:'',p1ddb:'',p1ddc:'',p1ddd:'',p1dde:'',p1ddf:'',p1de0:'',
+ p1de1:'',p1de2:'',p1de3:'',p1de4:'',p1de5:'',p1de6:'',p1de7:'',p1de8:'',
+ p1de9:'',p1dea:'',p1deb:'',p1dec:'',p1ded:'',p1dee:'',p1def:'',p1df0:'',
+ p1df1:'',p1df2:'',p1df3:'',p1df4:'',p1df5:'',p1df6:'',p1df7:'',p1df8:'',
+ p1df9:'',p1dfa:'',p1dfb:'',p1dfc:'',p1dfd:'',p1dfe:'',p1dff:'',p1e00:'',
+ p1e01:'',p1e02:'',p1e03:'',p1e04:'',p1e05:'',p1e06:'',p1e07:'',p1e08:'',
+ p1e09:'',p1e0a:'',p1e0b:'',p1e0c:'',p1e0d:'',p1e0e:'',p1e0f:'',p1e10:'',
+ p1e11:'',p1e12:'',p1e13:'',p1e14:'',p1e15:'',p1e16:'',p1e17:'',p1e18:'',
+ p1e19:'',p1e1a:'',p1e1b:'',p1e1c:'',p1e1d:'',p1e1e:'',p1e1f:'',p1e20:'',
+ p1e21:'',p1e22:'',p1e23:'',p1e24:'',p1e25:'',p1e26:'',p1e27:'',p1e28:'',
+ p1e29:'',p1e2a:'',p1e2b:'',p1e2c:'',p1e2d:'',p1e2e:'',p1e2f:'',p1e30:'',
+ p1e31:'',p1e32:'',p1e33:'',p1e34:'',p1e35:'',p1e36:'',p1e37:'',p1e38:'',
+ p1e39:'',p1e3a:'',p1e3b:'',p1e3c:'',p1e3d:'',p1e3e:'',p1e3f:'',p1e40:'',
+ p1e41:'',p1e42:'',p1e43:'',p1e44:'',p1e45:'',p1e46:'',p1e47:'',p1e48:'',
+ p1e49:'',p1e4a:'',p1e4b:'',p1e4c:'',p1e4d:'',p1e4e:'',p1e4f:'',p1e50:'',
+ p1e51:'',p1e52:'',p1e53:'',p1e54:'',p1e55:'',p1e56:'',p1e57:'',p1e58:'',
+ p1e59:'',p1e5a:'',p1e5b:'',p1e5c:'',p1e5d:'',p1e5e:'',p1e5f:'',p1e60:'',
+ p1e61:'',p1e62:'',p1e63:'',p1e64:'',p1e65:'',p1e66:'',p1e67:'',p1e68:'',
+ p1e69:'',p1e6a:'',p1e6b:'',p1e6c:'',p1e6d:'',p1e6e:'',p1e6f:'',p1e70:'',
+ p1e71:'',p1e72:'',p1e73:'',p1e74:'',p1e75:'',p1e76:'',p1e77:'',p1e78:'',
+ p1e79:'',p1e7a:'',p1e7b:'',p1e7c:'',p1e7d:'',p1e7e:'',p1e7f:'',p1e80:'',
+ p1e81:'',p1e82:'',p1e83:'',p1e84:'',p1e85:'',p1e86:'',p1e87:'',p1e88:'',
+ p1e89:'',p1e8a:'',p1e8b:'',p1e8c:'',p1e8d:'',p1e8e:'',p1e8f:'',p1e90:'',
+ p1e91:'',p1e92:'',p1e93:'',p1e94:'',p1e95:'',p1e96:'',p1e97:'',p1e98:'',
+ p1e99:'',p1e9a:'',p1e9b:'',p1e9c:'',p1e9d:'',p1e9e:'',p1e9f:'',p1ea0:'',
+ p1ea1:'',p1ea2:'',p1ea3:'',p1ea4:'',p1ea5:'',p1ea6:'',p1ea7:'',p1ea8:'',
+ p1ea9:'',p1eaa:'',p1eab:'',p1eac:'',p1ead:'',p1eae:'',p1eaf:'',p1eb0:'',
+ p1eb1:'',p1eb2:'',p1eb3:'',p1eb4:'',p1eb5:'',p1eb6:'',p1eb7:'',p1eb8:'',
+ p1eb9:'',p1eba:'',p1ebb:'',p1ebc:'',p1ebd:'',p1ebe:'',p1ebf:'',p1ec0:'',
+ p1ec1:'',p1ec2:'',p1ec3:'',p1ec4:'',p1ec5:'',p1ec6:'',p1ec7:'',p1ec8:'',
+ p1ec9:'',p1eca:'',p1ecb:'',p1ecc:'',p1ecd:'',p1ece:'',p1ecf:'',p1ed0:'',
+ p1ed1:'',p1ed2:'',p1ed3:'',p1ed4:'',p1ed5:'',p1ed6:'',p1ed7:'',p1ed8:'',
+ p1ed9:'',p1eda:'',p1edb:'',p1edc:'',p1edd:'',p1ede:'',p1edf:'',p1ee0:'',
+ p1ee1:'',p1ee2:'',p1ee3:'',p1ee4:'',p1ee5:'',p1ee6:'',p1ee7:'',p1ee8:'',
+ p1ee9:'',p1eea:'',p1eeb:'',p1eec:'',p1eed:'',p1eee:'',p1eef:'',p1ef0:'',
+ p1ef1:'',p1ef2:'',p1ef3:'',p1ef4:'',p1ef5:'',p1ef6:'',p1ef7:'',p1ef8:'',
+ p1ef9:'',p1efa:'',p1efb:'',p1efc:'',p1efd:'',p1efe:'',p1eff:'',p1f00:''
+ }
+ }
+ let object = createObject();
+ assertFalse(%HasFastProperties(object ));
+ assertEquals(Object.getPrototypeOf(object ), null);
+ let keys = Object.keys(object);
+ // modify original object
+ object['new_property'] = {};
+ object[1] = 12;
+
+ let object2 = createObject();
+ assertFalse(object === object2 );
+ assertFalse(%HasFastProperties(object2 ));
+ assertEquals(Object.getPrototypeOf(object2), null);
+ assertEquals(keys, Object.keys(object2));
+})();
+
(function TestPrototypeInObjectLiteral() {
// The prototype chain should not be used if the definition
@@ -282,22 +1500,3 @@ TestNumericNamesSetter(['1.2', '1.3'], {
delete Object.prototype.c;
})();
-
-(function TestProxyWithDefinitionInObjectLiteral() {
- // Trap for set should not be used if the definition
- // happens in the object literal.
- var handler = {
- set: function(target, name, value) {
- }
- };
-
- const prop = 'a';
-
- var p = new Proxy({}, handler);
- p[prop] = 'my value';
- assertEquals(undefined, p[prop]);
-
-
- var l = new Proxy({[prop]: 'my value'}, handler);
- assertEquals('my value', l[prop]);
-})();
diff --git a/deps/v8/test/mjsunit/object-seal.js b/deps/v8/test/mjsunit/object-seal.js
index a901b1f480..f685b41927 100644
--- a/deps/v8/test/mjsunit/object-seal.js
+++ b/deps/v8/test/mjsunit/object-seal.js
@@ -28,7 +28,7 @@
// Tests the Object.seal and Object.isSealed methods - ES 19.1.2.17 and
// ES 19.1.2.13
-// Flags: --allow-natives-syntax --crankshaft --noalways-opt
+// Flags: --allow-natives-syntax --opt --noalways-opt
// Test that we return obj if non-object is passed as argument
var non_objects = new Array(undefined, null, 1, -1, 0, 42.43, Symbol("test"));
diff --git a/deps/v8/test/mjsunit/osr-elements-kind.js b/deps/v8/test/mjsunit/osr-elements-kind.js
index aee7017134..3f27bf2295 100644
--- a/deps/v8/test/mjsunit/osr-elements-kind.js
+++ b/deps/v8/test/mjsunit/osr-elements-kind.js
@@ -116,7 +116,7 @@ function construct_doubles() {
return a;
}
-// Test transition chain SMI->DOUBLE->FAST (crankshafted function will
+// Test transition chain SMI->DOUBLE->FAST (optimized function will
// transition to FAST directly).
function convert_mixed(array, value, kind) {
array[1] = value;
diff --git a/deps/v8/test/mjsunit/parse-tasks.js b/deps/v8/test/mjsunit/parse-tasks.js
new file mode 100644
index 0000000000..11b48ebe0e
--- /dev/null
+++ b/deps/v8/test/mjsunit/parse-tasks.js
@@ -0,0 +1,55 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --compiler-dispatcher --use-parse-tasks --use-external-strings
+
+(function(a) {
+ assertEquals(a, "IIFE");
+})("IIFE");
+
+(function(a, ...rest) {
+ assertEquals(a, 1);
+ assertEquals(rest.length, 2);
+ assertEquals(rest[0], 2);
+ assertEquals(rest[1], 3);
+})(1,2,3);
+
+var outer_var = 42;
+
+function lazy_outer() {
+ return 42;
+}
+
+var eager_outer = (function() { return 42; });
+
+(function() {
+ assertEquals(outer_var, 42);
+ assertEquals(lazy_outer(), 42);
+ assertEquals(eager_outer(), 42);
+})();
+
+var gen = (function*() {
+ yield 1;
+ yield 2;
+})();
+
+assertEquals(gen.next().value, 1);
+assertEquals(gen.next().value, 2);
+
+var result = (function recursive(a=0) {
+ if (a == 1) {
+ return 42;
+ }
+ return recursive(1);
+})();
+
+assertEquals(result, 42);
+
+var a = 42;
+var b;
+var c = (a, b = (function z(){ return a+1; })());
+assertEquals(b, 43);
+assertEquals(c, 43);
+var c = (a, b = (function z(){ return a+1; })()) => { return b; };
+assertEquals(c(314), 315);
diff --git a/deps/v8/test/mjsunit/polymorph-arrays.js b/deps/v8/test/mjsunit/polymorph-arrays.js
index 6a05c9f013..7d3221a20c 100644
--- a/deps/v8/test/mjsunit/polymorph-arrays.js
+++ b/deps/v8/test/mjsunit/polymorph-arrays.js
@@ -83,7 +83,7 @@ function testPolymorphicLoads() {
load = make_polymorphic_load_function();
assertEquals(undefined, load(sparse_object_array, new Object()));
- // Try with crankshaft.
+ // Try with optimizing compiler.
load = make_polymorphic_load_function();
%OptimizeFunctionOnNextCall(load);
assertEquals(1, load(object_array, 1));
diff --git a/deps/v8/test/mjsunit/proto-elements-add-during-foreach.js b/deps/v8/test/mjsunit/proto-elements-add-during-foreach.js
index a99e8070d0..8ee4ebc37c 100644
--- a/deps/v8/test/mjsunit/proto-elements-add-during-foreach.js
+++ b/deps/v8/test/mjsunit/proto-elements-add-during-foreach.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --enable-fast-array-builtins
-
var a = [0,1,2,,,,7];
var proto = {}
a.__proto__ = proto;
diff --git a/deps/v8/test/mjsunit/regexp.js b/deps/v8/test/mjsunit/regexp.js
index 6fb5660c08..dd4832b567 100644
--- a/deps/v8/test/mjsunit/regexp.js
+++ b/deps/v8/test/mjsunit/regexp.js
@@ -803,3 +803,8 @@ assertTrue(/^[\444]*$/.test("\u{24}4"));
assertTrue(/^[\d-X]*$/.test("234-X-432")); // CharacterRangeOrUnion.
assertTrue(/^[\d-X-Z]*$/.test("234-XZ-432"));
assertFalse(/^[\d-X-Z]*$/.test("234-XYZ-432"));
+
+// Lone leading surrogates. Just here to exercise specific parsing code-paths.
+
+assertFalse(/\uDB88|\uDBEC|aa/.test(""));
+assertFalse(/\uDB88|\uDBEC|aa/u.test(""));
diff --git a/deps/v8/test/mjsunit/regress/regress-105.js b/deps/v8/test/mjsunit/regress/regress-105.js
index 8b8030ffec..877cb82317 100644
--- a/deps/v8/test/mjsunit/regress/regress-105.js
+++ b/deps/v8/test/mjsunit/regress/regress-105.js
@@ -26,12 +26,12 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
var custom_valueOf = function() {
- assertEquals(Number, custom_valueOf.caller);
+ assertEquals(null, custom_valueOf.caller);
return 2;
}
var custom_toString = function() {
- assertEquals(String, custom_toString.caller);
+ assertEquals(null, custom_toString.caller);
return "I used to be an adventurer like you";
}
diff --git a/deps/v8/test/mjsunit/regress/regress-1119.js b/deps/v8/test/mjsunit/regress/regress-1119.js
index 24ab49aa95..1163ca042e 100644
--- a/deps/v8/test/mjsunit/regress/regress-1119.js
+++ b/deps/v8/test/mjsunit/regress/regress-1119.js
@@ -28,8 +28,6 @@
// Test runtime declaration of properties with var which are intercepted
// by JS accessors.
-// Flags: --es52-globals
-
this.__defineSetter__("x", function() { hasBeenInvoked = true; });
this.__defineSetter__("y", function() { throw 'exception'; });
diff --git a/deps/v8/test/mjsunit/regress/regress-115452.js b/deps/v8/test/mjsunit/regress/regress-115452.js
index d95bba893c..f745e1bad3 100644
--- a/deps/v8/test/mjsunit/regress/regress-115452.js
+++ b/deps/v8/test/mjsunit/regress/regress-115452.js
@@ -27,8 +27,6 @@
// Test that a function declaration cannot overwrite a read-only property.
-// Flags: --es52-globals
-
function foobl() {}
assertTrue(typeof this.foobl == "function");
assertTrue(Object.getOwnPropertyDescriptor(this, "foobl").writable);
diff --git a/deps/v8/test/mjsunit/regress/regress-1240.js b/deps/v8/test/mjsunit/regress/regress-1240.js
index 1a0bf2edb6..57d72b0a5a 100644
--- a/deps/v8/test/mjsunit/regress/regress-1240.js
+++ b/deps/v8/test/mjsunit/regress/regress-1240.js
@@ -33,7 +33,9 @@ var a = {};
Object.defineProperty(a, 'b',
{ get: function () { return 42; }, configurable: false });
// Do not allow us to redefine b on a.
-a.__defineGetter__('b', function _b(){ return 'foo'; });
+try {
+ a.__defineGetter__('b', function _b(){ return 'foo'; });
+} catch (e) {}
assertEquals(42, a.b);
var desc = Object.getOwnPropertyDescriptor(a, 'b');
assertFalse(desc.configurable);
diff --git a/deps/v8/test/mjsunit/regress/regress-1493017.js b/deps/v8/test/mjsunit/regress/regress-1493017.js
index 99a1dad2e0..b46397c8a0 100644
--- a/deps/v8/test/mjsunit/regress/regress-1493017.js
+++ b/deps/v8/test/mjsunit/regress/regress-1493017.js
@@ -28,7 +28,7 @@
// Test collection of abandoned maps. Tests that deleted map
// transitions do not show up as properties in for in.
-// Flags: --expose-gc --collect-maps
+// Flags: --expose-gc
function C() {}
diff --git a/deps/v8/test/mjsunit/regress/regress-2132.js b/deps/v8/test/mjsunit/regress/regress-2132.js
index 51938c8027..c2f6c297b4 100644
--- a/deps/v8/test/mjsunit/regress/regress-2132.js
+++ b/deps/v8/test/mjsunit/regress/regress-2132.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft --no-always-opt
+// Flags: --allow-natives-syntax --opt --no-always-opt
function mul(x, y) {
return (x * y) | 0;
diff --git a/deps/v8/test/mjsunit/regress/regress-2250.js b/deps/v8/test/mjsunit/regress/regress-2250.js
index 013771971e..e2ce546628 100644
--- a/deps/v8/test/mjsunit/regress/regress-2250.js
+++ b/deps/v8/test/mjsunit/regress/regress-2250.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
// The original problem from the bug: In the example below SMI check for b
// generated for inlining of equals invocation (marked with (*)) will be hoisted
diff --git a/deps/v8/test/mjsunit/regress/regress-2315.js b/deps/v8/test/mjsunit/regress/regress-2315.js
index 41211c42e9..9e40d0d3e3 100644
--- a/deps/v8/test/mjsunit/regress/regress-2315.js
+++ b/deps/v8/test/mjsunit/regress/regress-2315.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
var foo = (function() {
return eval("(function bar() { return 1; })");
diff --git a/deps/v8/test/mjsunit/regress/regress-2339.js b/deps/v8/test/mjsunit/regress/regress-2339.js
index 9db2f9c2bf..d7d2bb398a 100644
--- a/deps/v8/test/mjsunit/regress/regress-2339.js
+++ b/deps/v8/test/mjsunit/regress/regress-2339.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --expose-gc --crankshaft
+// Flags: --allow-natives-syntax --expose-gc --opt
function simple() {
return simple_two_args(0, undefined);
diff --git a/deps/v8/test/mjsunit/regress/regress-2451.js b/deps/v8/test/mjsunit/regress/regress-2451.js
index 1a486be8b1..08efda2325 100644
--- a/deps/v8/test/mjsunit/regress/regress-2451.js
+++ b/deps/v8/test/mjsunit/regress/regress-2451.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
function f() {
assertEquals(-1.0, Math.round(-1.5));
diff --git a/deps/v8/test/mjsunit/regress/regress-252797.js b/deps/v8/test/mjsunit/regress/regress-252797.js
index c3bb139965..08b22176af 100644
--- a/deps/v8/test/mjsunit/regress/regress-252797.js
+++ b/deps/v8/test/mjsunit/regress/regress-252797.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
// The type feedback oracle had a bug when retrieving the map from an IC
// starting with a negative lookup.
@@ -45,6 +45,7 @@ assertFalse(%HasFastProperties(holder));
// Create a receiver into dictionary mode.
var receiver = Object.create(holder, {
killMe: {value: 0, configurable: true},
+ keepMe: {value: 0, configurable: true}
});
delete receiver.killMe;
assertFalse(%HasFastProperties(receiver));
diff --git a/deps/v8/test/mjsunit/regress/regress-2618.js b/deps/v8/test/mjsunit/regress/regress-2618.js
index be3168c1cd..10ed81f0be 100644
--- a/deps/v8/test/mjsunit/regress/regress-2618.js
+++ b/deps/v8/test/mjsunit/regress/regress-2618.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --use-osr --allow-natives-syntax --ignition-osr --crankshaft
+// Flags: --use-osr --allow-natives-syntax --ignition-osr --opt
function f() {
do {
diff --git a/deps/v8/test/mjsunit/regress/regress-3176.js b/deps/v8/test/mjsunit/regress/regress-3176.js
index 370065d777..dbfe8218fd 100644
--- a/deps/v8/test/mjsunit/regress/regress-3176.js
+++ b/deps/v8/test/mjsunit/regress/regress-3176.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
function foo(a) {
var sum = 0;
diff --git a/deps/v8/test/mjsunit/regress/regress-330046.js b/deps/v8/test/mjsunit/regress/regress-330046.js
index eb0d3f38a2..24557b4cc6 100644
--- a/deps/v8/test/mjsunit/regress/regress-330046.js
+++ b/deps/v8/test/mjsunit/regress/regress-330046.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --use-osr --allow-natives-syntax --crankshaft
+// Flags: --use-osr --allow-natives-syntax --opt
var o1 = {a : 10};
var o2 = { };
diff --git a/deps/v8/test/mjsunit/regress/regress-3408144.js b/deps/v8/test/mjsunit/regress/regress-3408144.js
index 6e292d635a..8bd7b20eb9 100644
--- a/deps/v8/test/mjsunit/regress/regress-3408144.js
+++ b/deps/v8/test/mjsunit/regress/regress-3408144.js
@@ -28,8 +28,6 @@
// Test incorrect code generation for alternations on ARM.
-// Flags: --nofull-compiler
-
function foo() {
return (0 > ("10"||10) - 1);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-347914.js b/deps/v8/test/mjsunit/regress/regress-347914.js
index ec693ee92c..0137397cf2 100644
--- a/deps/v8/test/mjsunit/regress/regress-347914.js
+++ b/deps/v8/test/mjsunit/regress/regress-347914.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --debug-code --gc-interval=201 --verify-heap --max-inlined-source-size=999999 --max-inlined-nodes=999999 --max-inlined-nodes-cumulative=999999
-// Flags: --crankshaft --no-always-opt
+// Flags: --opt --no-always-opt
// Begin stripped down and modified version of mjsunit.js for easy minimization in CF.
function MjsUnitAssertionError(message) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-353004.js b/deps/v8/test/mjsunit/regress/regress-353004.js
index 7e1fb7e939..233a0f11d1 100644
--- a/deps/v8/test/mjsunit/regress/regress-353004.js
+++ b/deps/v8/test/mjsunit/regress/regress-353004.js
@@ -59,18 +59,18 @@ assertThrows(function() {
var buffer9 = new ArrayBuffer(1024);
var array9 = new Uint8Array(buffer9);
-var array10 = array9.subarray({valueOf : function() {
+assertThrows(() =>
+ array9.subarray({valueOf : function() {
%ArrayBufferNeuter(buffer9);
return 0;
- }}, 1024);
+ }}, 1024), TypeError);
assertEquals(0, array9.length);
-assertEquals(0, array10.length);
var buffer11 = new ArrayBuffer(1024);
var array11 = new Uint8Array(buffer11);
-var array12 = array11.subarray(0, {valueOf : function() {
- %ArrayBufferNeuter(buffer11);
- return 1024;
- }});
+assertThrows(() =>
+ array11.subarray(0, {valueOf : function() {
+ %ArrayBufferNeuter(buffer11);
+ return 1024;
+ }}), TypeError);
assertEquals(0, array11.length);
-assertEquals(0, array12.length);
diff --git a/deps/v8/test/mjsunit/regress/regress-3650-3.js b/deps/v8/test/mjsunit/regress/regress-3650-3.js
index f842428262..6195b12441 100644
--- a/deps/v8/test/mjsunit/regress/regress-3650-3.js
+++ b/deps/v8/test/mjsunit/regress/regress-3650-3.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
function foo(a) {
for (var d in a) {
diff --git a/deps/v8/test/mjsunit/regress/regress-3709.js b/deps/v8/test/mjsunit/regress/regress-3709.js
index 0f6f7b71d4..ecb906a908 100644
--- a/deps/v8/test/mjsunit/regress/regress-3709.js
+++ b/deps/v8/test/mjsunit/regress/regress-3709.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft --no-always-opt
+// Flags: --allow-natives-syntax --opt --no-always-opt
function getobj() {
return { bar : function() { return 0}};
diff --git a/deps/v8/test/mjsunit/regress/regress-385565.js b/deps/v8/test/mjsunit/regress/regress-385565.js
index 0244cdf75c..541e461d96 100644
--- a/deps/v8/test/mjsunit/regress/regress-385565.js
+++ b/deps/v8/test/mjsunit/regress/regress-385565.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft --no-always-opt
+// Flags: --allow-natives-syntax --opt --no-always-opt
var calls = 0;
diff --git a/deps/v8/test/mjsunit/regress/regress-410912.js b/deps/v8/test/mjsunit/regress/regress-410912.js
index 5691161521..9a2e46d7bf 100644
--- a/deps/v8/test/mjsunit/regress/regress-410912.js
+++ b/deps/v8/test/mjsunit/regress/regress-410912.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --expose-gc --crankshaft --no-always-opt
+// Flags: --allow-natives-syntax --expose-gc --opt --no-always-opt
var assertDoesNotThrow;
var assertInstanceof;
diff --git a/deps/v8/test/mjsunit/regress/regress-4380.js b/deps/v8/test/mjsunit/regress/regress-4380.js
index f51241ac7d..06a64790ef 100644
--- a/deps/v8/test/mjsunit/regress/regress-4380.js
+++ b/deps/v8/test/mjsunit/regress/regress-4380.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
function bar(a) {
var x = a[0];
diff --git a/deps/v8/test/mjsunit/regress/regress-4665.js b/deps/v8/test/mjsunit/regress/regress-4665.js
index a75d68f105..9aed4ed0ce 100644
--- a/deps/v8/test/mjsunit/regress/regress-4665.js
+++ b/deps/v8/test/mjsunit/regress/regress-4665.js
@@ -11,12 +11,14 @@ FirstBuffer.__proto__ = Uint8Array
var buf = new Uint8Array(10)
buf.__proto__ = FirstBuffer.prototype
-var buf2 = buf.subarray(2)
-assertEquals(8, buf2.length);
+assertThrows(() => buf.subarray(2), TypeError);
// Second test case
+let seen_args = [];
+
function SecondBuffer (arg) {
+ seen_args.push(arg);
var arr = new Uint8Array(arg)
arr.__proto__ = SecondBuffer.prototype
return arr
@@ -25,7 +27,9 @@ SecondBuffer.prototype.__proto__ = Uint8Array.prototype
SecondBuffer.__proto__ = Uint8Array
var buf3 = new SecondBuffer(10)
+assertEquals([10], seen_args);
var buf4 = buf3.subarray(2)
-assertEquals(8, buf4.length);
+assertEquals(10, buf4.length);
+assertEquals([10, buf3.buffer], seen_args);
diff --git a/deps/v8/test/mjsunit/regress/regress-475705.js b/deps/v8/test/mjsunit/regress/regress-475705.js
index ff96e041b1..ec8416aed4 100644
--- a/deps/v8/test/mjsunit/regress/regress-475705.js
+++ b/deps/v8/test/mjsunit/regress/regress-475705.js
@@ -5,7 +5,7 @@
// Crankshaft changes the stack usage and messes up the binary search for the
// stack depth that causes a stack overflow. The issue only arises without
// regexp optimization, which can happen on pages that create a lot of regexps.
-// Flags: --nocrankshaft --noregexp-optimization
+// Flags: --noopt --noregexp-optimization
// Should not crash with a stack overflow in the regexp compiler, even when the
// JS has used most of the stack.
diff --git a/deps/v8/test/mjsunit/regress/regress-4825.js b/deps/v8/test/mjsunit/regress/regress-4825.js
index 5ad096f3ed..fafd3db73b 100644
--- a/deps/v8/test/mjsunit/regress/regress-4825.js
+++ b/deps/v8/test/mjsunit/regress/regress-4825.js
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Flags: --allow-natives-syntax
+
function enumerate(o) {
var keys = [];
for (var key in o) keys.push(key);
@@ -10,11 +12,13 @@ function enumerate(o) {
(function testSlowSloppyArgumentsElements() {
function slowSloppyArguments(a, b, c) {
+ %HeapObjectVerify(arguments);
arguments[10000] = "last";
arguments[4000] = "first";
arguments[6000] = "second";
arguments[5999] = "x";
arguments[3999] = "y";
+ %HeapObjectVerify(arguments);
return arguments;
}
assertEquals(["0", "1", "2", "3999", "4000", "5999", "6000", "10000"],
@@ -29,10 +33,12 @@ function enumerate(o) {
Object.defineProperty(arguments, 10000, {
enumerable: false, configurable: false, value: "NOPE"
});
+ %HeapObjectVerify(arguments);
arguments[4000] = "first";
arguments[6000] = "second";
arguments[5999] = "x";
arguments[3999] = "y";
+ %HeapObjectVerify(arguments);
return arguments;
}
@@ -43,11 +49,13 @@ function enumerate(o) {
enumerate(slowSloppyArguments(1,2,3)));
})();
+
(function testFastSloppyArgumentsElements() {
function fastSloppyArguments(a, b, c) {
arguments[5] = 1;
arguments[7] = 0;
arguments[3] = 2;
+ %HeapObjectVerify(arguments);
return arguments;
}
assertEquals(["0", "1", "2", "3", "5", "7"],
@@ -58,7 +66,11 @@ function enumerate(o) {
function fastSloppyArguments2(a, b, c) {
delete arguments[0];
+ %DebugPrint(arguments);
+ %HeapObjectVerify(arguments);
arguments[0] = "test";
+ %DebugPrint(arguments);
+ %HeapObjectVerify(arguments);
return arguments;
}
@@ -71,8 +83,10 @@ function enumerate(o) {
Object.defineProperty(arguments, 5, {
enumerable: false, configurable: false, value: "NOPE"
});
+ %HeapObjectVerify(arguments);
arguments[7] = 0;
arguments[3] = 2;
+ %HeapObjectVerify(arguments);
return arguments;
}
assertEquals(
@@ -83,10 +97,12 @@ function enumerate(o) {
function fastSloppyArguments2(a, b, c) {
delete arguments[0];
+ %HeapObjectVerify(arguments);
Object.defineProperty(arguments, 1, {
enumerable: false, configurable: false, value: "NOPE"
});
arguments[0] = "test";
+ %HeapObjectVerify(arguments);
return arguments;
}
diff --git a/deps/v8/test/mjsunit/regress/regress-5404.js b/deps/v8/test/mjsunit/regress/regress-5404.js
index b776a73bd0..72c5d30fe3 100644
--- a/deps/v8/test/mjsunit/regress/regress-5404.js
+++ b/deps/v8/test/mjsunit/regress/regress-5404.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
function foo(a, b) {
return a + "0123456789012";
diff --git a/deps/v8/test/mjsunit/regress/regress-5790.js b/deps/v8/test/mjsunit/regress/regress-5790.js
index 8709cd4aa3..eb405237e1 100644
--- a/deps/v8/test/mjsunit/regress/regress-5790.js
+++ b/deps/v8/test/mjsunit/regress/regress-5790.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
function foo(a) {
"use strict";
diff --git a/deps/v8/test/mjsunit/regress/regress-5802.js b/deps/v8/test/mjsunit/regress/regress-5802.js
index 799d89cada..57c8198c0c 100644
--- a/deps/v8/test/mjsunit/regress/regress-5802.js
+++ b/deps/v8/test/mjsunit/regress/regress-5802.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
(function() {
function eq(a, b) { return a == b; }
diff --git a/deps/v8/test/mjsunit/regress/regress-5902.js b/deps/v8/test/mjsunit/regress/regress-5902.js
index 69dde4387a..034b6a7951 100644
--- a/deps/v8/test/mjsunit/regress/regress-5902.js
+++ b/deps/v8/test/mjsunit/regress/regress-5902.js
@@ -58,5 +58,6 @@ assertEquals(
'Error.prototype',
'EvalError.prototype', 'RangeError.prototype', 'ReferenceError.prototype',
'SyntaxError.prototype', 'TypeError.prototype', 'URIError.prototype',
+ 'Map', 'Map.prototype.constructor', 'Set', 'Set.prototype.constructor'
],
log);
diff --git a/deps/v8/test/mjsunit/regress/regress-618608.js b/deps/v8/test/mjsunit/regress/regress-618608.js
index 742cc6e32e..33c5fbf188 100644
--- a/deps/v8/test/mjsunit/regress/regress-618608.js
+++ b/deps/v8/test/mjsunit/regress/regress-618608.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --validate-asm --allow-natives-syntax --crankshaft --no-always-opt
+// Flags: --validate-asm --allow-natives-syntax --opt --no-always-opt
// /v8/test/mjsunit/regress/regress-crbug-431602.js
// /v8/test/mjsunit/lazy-load.js
diff --git a/deps/v8/test/mjsunit/regress/regress-6248.js b/deps/v8/test/mjsunit/regress/regress-6248.js
new file mode 100644
index 0000000000..0631892549
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-6248.js
@@ -0,0 +1,24 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var sentinelObject = {};
+var evaluatedArg = false;
+class C extends Object {
+ constructor() {
+ try {
+ super(evaluatedArg = true);
+ } catch (e) {
+ assertInstanceof(e, TypeError);
+ return sentinelObject;
+ }
+ }
+}
+Object.setPrototypeOf(C, parseInt);
+assertSame(sentinelObject, new C());
+assertSame(sentinelObject, new C());
+%OptimizeFunctionOnNextCall(C)
+assertSame(sentinelObject, new C());
+assertFalse(evaluatedArg);
diff --git a/deps/v8/test/mjsunit/regress/regress-6280.js b/deps/v8/test/mjsunit/regress/regress-6280.js
new file mode 100644
index 0000000000..e5ccf265f4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-6280.js
@@ -0,0 +1,22 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Module(stdlib, imports, buffer) {
+ "use asm";
+ var x = new stdlib.Int8Array(buffer);
+ function f() {
+ return x[0] | 0;
+ }
+ return { f:f };
+}
+
+var b = new ArrayBuffer(1024);
+var m1 = Module({ Int8Array:Int8Array }, {}, b);
+assertEquals(0, m1.f());
+
+var was_called = 0;
+function observer() { was_called++; return [23]; }
+var m2 = Module({ Int8Array:observer }, {}, b);
+assertEquals(1, was_called);
+assertEquals(23, m2.f());
diff --git a/deps/v8/test/mjsunit/regress/regress-6288.js b/deps/v8/test/mjsunit/regress/regress-6288.js
new file mode 100644
index 0000000000..eb8e735920
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-6288.js
@@ -0,0 +1,13 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Environment Variables: LC_ALL=pt-BR.UTF8
+
+// The data files packaged with d8 currently have Brazillian Portugese
+// DateTimeFormat but not Collation
+
+if (this.Intl) {
+ assertEquals('und', Intl.Collator().resolvedOptions().locale);
+ assertEquals('pt-BR', Intl.DateTimeFormat().resolvedOptions().locale);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-6298.js b/deps/v8/test/mjsunit/regress/regress-6298.js
new file mode 100644
index 0000000000..c3f4de3c2d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-6298.js
@@ -0,0 +1,16 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function Module(stdlib, imports, buffer) {
+ "use asm";
+ function f() {
+ return (281474976710655 * 1048575) | 0;
+ }
+ return { f:f };
+}
+var m = Module(this);
+assertEquals(-1048576, m.f());
+assertFalse(%IsAsmWasmCode(Module));
diff --git a/deps/v8/test/mjsunit/regress/regress-6337.js b/deps/v8/test/mjsunit/regress/regress-6337.js
new file mode 100644
index 0000000000..e80804ee5b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-6337.js
@@ -0,0 +1,5 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows(function() { eval(`class C { ...[] }`); } )
diff --git a/deps/v8/test/mjsunit/regress/regress-641091.js b/deps/v8/test/mjsunit/regress/regress-641091.js
new file mode 100644
index 0000000000..33a98ef52c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-641091.js
@@ -0,0 +1,15 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertEquals(["🍤", "🍤"],
+ '🍤🍦🍋ππ🍋🍦🍤'.match(/🍤/ug));
+
+assertEquals(["🍤", "🍦", "🍦", "🍤"],
+ '🍤🍦🍋ππ🍋🍦🍤'.match(/🍤|🍦/ug));
+
+assertEquals(["🍤", "🍦", "🍋", "🍋", "🍦", "🍤"],
+ '🍤🍦🍋ππ🍋🍦🍤'.match(/🍤|🍦|🍋/ug));
+
+assertEquals(["🍤", "🍦", "🍋", "π", "π", "🍋", "🍦", "🍤"],
+ '🍤🍦🍋ππ🍋🍦🍤'.match(/🍤|🍦|π|🍋/ug));
diff --git a/deps/v8/test/mjsunit/regress/regress-645680.js b/deps/v8/test/mjsunit/regress/regress-645680.js
index b244d9c047..de216f07fc 100644
--- a/deps/v8/test/mjsunit/regress/regress-645680.js
+++ b/deps/v8/test/mjsunit/regress/regress-645680.js
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-gc
-//
+// Flags: --expose-gc --allow-natives-syntax
+
function getRandomProperty(v, rand) {
var properties = Object.getOwnPropertyNames(v);
if ("constructor" && v.constructor.hasOwnProperty()) {; }
@@ -11,10 +11,12 @@ function getRandomProperty(v, rand) {
return properties[rand % properties.length];
}
-var __v_18 = (function( b) { return arguments; })("foo", NaN, "bar");
-__v_18.__p_293850326 = "foo";
-__v_18.__defineGetter__(getRandomProperty( 990787501), function() {
+var args = (function( b) { return arguments; })("foo", NaN, "bar");
+args.__p_293850326 = "foo";
+%HeapObjectVerify(args);
+args.__defineGetter__(getRandomProperty( 990787501), function() {
gc();
- return __v_18.__p_293850326;
+ return args.__p_293850326;
});
-Array.prototype.indexOf.call(__v_18)
+%HeapObjectVerify(args);
+Array.prototype.indexOf.call(args)
diff --git a/deps/v8/test/mjsunit/regress/regress-707066.js b/deps/v8/test/mjsunit/regress/regress-707066.js
new file mode 100644
index 0000000000..b33b585ebd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-707066.js
@@ -0,0 +1,25 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-function-tostring
+
+// There was a bug in CreateDynamicFunction where a stack overflow
+// situation caused an assertion failure.
+
+function test(api) {
+ function f() {
+ try {
+ // induce a stack overflow
+ f();
+ } catch(e) {
+ // this might result in even more stack overflows
+ api();
+ }
+ }
+ f();
+}
+
+test(( function (){}).constructor); // Function
+test(( function*(){}).constructor); // GeneratorFunction
+test((async function (){}).constructor); // AsyncFunction
diff --git a/deps/v8/test/mjsunit/regress/regress-709782.js b/deps/v8/test/mjsunit/regress/regress-709782.js
new file mode 100644
index 0000000000..e33f694ec9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-709782.js
@@ -0,0 +1,14 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = [0];
+function bar(x) { return x; }
+function foo() { return a.reduce(bar); }
+
+assertEquals(0, foo());
+assertEquals(0, foo());
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(0, foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-711165.js b/deps/v8/test/mjsunit/regress/regress-711165.js
new file mode 100644
index 0000000000..9a42451e25
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-711165.js
@@ -0,0 +1,9 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is testing a leak failure.
+
+try {
+ Realm.navigate(0);
+} catch(e) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-716044.js b/deps/v8/test/mjsunit/regress/regress-716044.js
new file mode 100644
index 0000000000..264424c811
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-716044.js
@@ -0,0 +1,25 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --verify-heap
+
+class Array1 extends Array {
+ constructor(len) {
+ super(1);
+ }
+};
+
+class MyArray extends Array {
+ static get [Symbol.species]() {
+ return Array1;
+ }
+}
+
+a = new MyArray();
+
+for (var i = 0; i < 100000; i++) {
+ a.push(1);
+}
+
+a.map(function(x) { return 42; });
diff --git a/deps/v8/test/mjsunit/regress/regress-718285.js b/deps/v8/test/mjsunit/regress/regress-718285.js
new file mode 100644
index 0000000000..409f343693
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-718285.js
@@ -0,0 +1,46 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function foo_reference(n) {
+ var array = new Int32Array(n + 1);
+ for (var i = 0; i < n; ++i) {
+ array[i] = i;
+ }
+ var array2 = new Int32Array(array);
+ array2.set(new Uint8Array(array.buffer, 0, n), 1);
+ return array2;
+}
+
+function foo(n) {
+ var array = new Int32Array(n + 1);
+ for (var i = 0; i < n; ++i) {
+ array[i] = i;
+ }
+ array.set(new Uint8Array(array.buffer, 0, n), 1);
+ return array;
+}
+
+function bar_reference(n) {
+ var array = new Int32Array(n + 1);
+ for (var i = 0; i < n; ++i) {
+ array[i] = i;
+ }
+ var array2 = new Int32Array(array);
+ array2.set(new Uint8Array(array.buffer, 34), 0);
+ return array2;
+}
+
+function bar(n) {
+ var array = new Int32Array(n + 1);
+ for (var i = 0; i < n; ++i) {
+ array[i] = i;
+ }
+ array.set(new Uint8Array(array.buffer, 34), 0);
+ return array;
+}
+
+foo(10);
+foo_reference(10);
+bar(10);
+bar_reference(10);
diff --git a/deps/v8/test/mjsunit/regress/regress-718891.js b/deps/v8/test/mjsunit/regress/regress-718891.js
new file mode 100644
index 0000000000..60ce380e01
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-718891.js
@@ -0,0 +1,68 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+function Data() {
+}
+Data.prototype = { x: 1 };
+
+function TriggerDeopt() {
+ Data.prototype = { x: 2 };
+}
+
+function TestDontSelfHealWithDeoptedCode(run_unoptimized, ClosureFactory) {
+ // Create some function closures which don't have
+ // optimized code.
+ var unoptimized_closure = ClosureFactory();
+ if (run_unoptimized) {
+ unoptimized_closure();
+ }
+
+ // Run and optimize the code (do this in a seperate function
+ // so that the closure doesn't leak in a dead register).
+ (() => {
+ var optimized_closure = ClosureFactory();
+ // Use .call to avoid the CallIC retaining the JSFunction in the
+ // feedback vector via a weak map, which would mean it wouldn't be
+ // collected in the minor gc below.
+ optimized_closure.call(undefined);
+ %OptimizeFunctionOnNextCall(optimized_closure);
+ optimized_closure.call(undefined);
+ })();
+
+ // Optimize a dummy function, just so it gets linked into the
+ // Contexts optimized_functions list head, which is in the old
+ // space, and the link from to the optimized_closure's JSFunction
+ // moves to the inline link in dummy's JSFunction in the new space,
+ // otherwise optimized_closure's JSFunction will be retained by the
+ // old->new remember set.
+ (() => {
+ var dummy = function() { return 1; };
+ %OptimizeFunctionOnNextCall(dummy);
+ dummy();
+ })();
+
+ // GC the optimized closure with a minor GC - the optimized
+ // code will remain in the feedback vector.
+ gc(true);
+
+ // Trigger deoptimization by changing the prototype of Data. This
+ // will mark the code for deopt, but since no live JSFunction has
+ // optimized code, we won't clear the feedback vector.
+ TriggerDeopt();
+
+ // Call pre-existing functions, these will try to self-heal with the
+ // optimized code in the feedback vector op, but should bail-out
+ // since the code is marked for deoptimization.
+ unoptimized_closure();
+}
+
+// Run with the unoptimized closure both uncomplied and compiled for the
+// interpreter initially, to test self healing on both CompileLazy and
+// the InterpreterEntryTrampoline respectively.
+TestDontSelfHealWithDeoptedCode(false,
+ () => { return () => { return new Data() }});
+TestDontSelfHealWithDeoptedCode(true,
+ () => { return () => { return new Data() }});
diff --git a/deps/v8/test/mjsunit/regress/regress-719380.js b/deps/v8/test/mjsunit/regress/regress-719380.js
new file mode 100644
index 0000000000..18d541a5fe
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-719380.js
@@ -0,0 +1,7 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+TypeError.prototype.__defineGetter__("name", () => { throw 42; });
+console.log({ toString: () => { throw new TypeError() }});
+try { new WebAssembly.Table({}); } catch (e) {}
diff --git a/deps/v8/test/mjsunit/regress/regress-722978.js b/deps/v8/test/mjsunit/regress/regress-722978.js
new file mode 100644
index 0000000000..082c1f8be3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-722978.js
@@ -0,0 +1,15 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --always-opt
+
+var __v_3 = {};
+function __f_0() {
+ var __v_30 = -0;
+ __v_30.__defineGetter__("0", function() { return undefined; });
+ __v_30 = 0;
+ __v_3 = 0;
+ assertTrue(Object.is(0, __v_30));
+}
+__f_0();
diff --git a/deps/v8/test/mjsunit/regress/regress-725858.js b/deps/v8/test/mjsunit/regress/regress-725858.js
new file mode 100644
index 0000000000..466673f816
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-725858.js
@@ -0,0 +1,11 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {}
+var src = 'f(' + '0,'.repeat(0x201f) + ')';
+var boom = new Function(src);
+%OptimizeFunctionOnNextCall(boom);
+boom();
diff --git a/deps/v8/test/mjsunit/regress/regress-727218.js b/deps/v8/test/mjsunit/regress/regress-727218.js
new file mode 100644
index 0000000000..8b2aa06a20
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-727218.js
@@ -0,0 +1,12 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var f = ({ x } = { x: y }) => {
+ x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;
+ x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;
+ x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;
+ x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;
+ x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;
+ x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;x;
+};
diff --git a/deps/v8/test/mjsunit/regress/regress-conditional-position.js b/deps/v8/test/mjsunit/regress/regress-conditional-position.js
index ae5a3acb58..c9badd6830 100644
--- a/deps/v8/test/mjsunit/regress/regress-conditional-position.js
+++ b/deps/v8/test/mjsunit/regress/regress-conditional-position.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --nocrankshaft
+// Flags: --noopt
var functionToCatch;
var lineNumber;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-157019.js b/deps/v8/test/mjsunit/regress/regress-crbug-157019.js
index 1c54089ff9..66eb41bdd2 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-157019.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-157019.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --nocrankshaft
+// Flags: --allow-natives-syntax --noopt
function makeConstructor() {
return function() {
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-157520.js b/deps/v8/test/mjsunit/regress/regress-crbug-157520.js
index 17081dfa52..9570085333 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-157520.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-157520.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --nocrankshaft
+// Flags: --noopt
(function(){
var f = function(arg) {
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-244461.js b/deps/v8/test/mjsunit/regress/regress-crbug-244461.js
index 7b465482e0..2afb76ac12 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-244461.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-244461.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --smi-only-arrays
+// Flags: --allow-natives-syntax
function foo(arg) {
var a = arg();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-504787.js b/deps/v8/test/mjsunit/regress/regress-crbug-504787.js
index 66274bc6b9..ac592e08fb 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-504787.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-504787.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --noturbo-osr
-
function f() {
"use asm";
function g() {
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-506443.js b/deps/v8/test/mjsunit/regress/regress-crbug-506443.js
index 0ab518f9b9..490edf0d4c 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-506443.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-506443.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft --no-always-opt
+// Flags: --allow-natives-syntax --opt --no-always-opt
assertSame = function assertSame() {
if (found === expected) {
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-513507.js b/deps/v8/test/mjsunit/regress/regress-crbug-513507.js
index 86a0f1b1f9..ae321ba906 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-513507.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-513507.js
@@ -4,7 +4,7 @@
// Flags: --allow-natives-syntax
-// The following triggers a GC in SharedFunctionInfo::AddToOptimizedCodeMap.
+// The following triggers a GC in Context::AddToOSROptimizedCodeCache.
// Flags: --gc-interval=1234 --gc-global
function makeFun() {
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-554831.js b/deps/v8/test/mjsunit/regress/regress-crbug-554831.js
index da78fa4c07..3d022b257b 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-554831.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-554831.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
(function() {
var key = "s";
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-587068.js b/deps/v8/test/mjsunit/regress/regress-crbug-587068.js
index 4af8110497..864f8ce7d2 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-587068.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-587068.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
// The Crankshaft fast case for String.fromCharCode used to unconditionally
// deoptimize on non int32 indices.
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-594183.js b/deps/v8/test/mjsunit/regress/regress-crbug-594183.js
index 87f3195917..cb8003404d 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-594183.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-594183.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft --no-always-opt
+// Flags: --allow-natives-syntax --opt --no-always-opt
var global = {}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-651403-global.js b/deps/v8/test/mjsunit/regress/regress-crbug-651403-global.js
index 776bdcfc87..8682d822a5 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-651403-global.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-651403-global.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --ignition-staging --turbo --always-opt
+// Flags: --turbo --always-opt
x = "";
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-707580.js b/deps/v8/test/mjsunit/regress/regress-crbug-707580.js
new file mode 100644
index 0000000000..37d13d219f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-707580.js
@@ -0,0 +1,10 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var thrower = { [Symbol.toPrimitive] : function() { throw "I was called!" } };
+var heap_number = 4.2;
+var smi_number = 23;
+
+assertThrows(() => heap_number.hasOwnProperty(thrower));
+assertThrows(() => smi_number.hasOwnProperty(thrower));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-711166.js b/deps/v8/test/mjsunit/regress/regress-crbug-711166.js
new file mode 100644
index 0000000000..7f4acb963d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-711166.js
@@ -0,0 +1,22 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+'use strict'
+function g() {
+ var x = 1;
+ try { undefined.x } catch (e) { x = e; }
+ (function() { x });
+ return x;
+}
+function f(a) {
+ var args = arguments;
+ assertInstanceof(g(), TypeError);
+ return args.length;
+}
+assertEquals(1, f(0));
+assertEquals(1, f(0));
+%OptimizeFunctionOnNextCall(f);
+assertEquals(1, f(0));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-712802.js b/deps/v8/test/mjsunit/regress/regress-crbug-712802.js
new file mode 100644
index 0000000000..e23519e179
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-712802.js
@@ -0,0 +1,12 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(...args) { return Array.isArray(args); }
+
+assertTrue(foo());
+assertTrue(foo());
+%OptimizeFunctionOnNextCall(foo);
+assertTrue(foo());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-714696.js b/deps/v8/test/mjsunit/regress/regress-crbug-714696.js
new file mode 100644
index 0000000000..16b09604e9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-714696.js
@@ -0,0 +1,10 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+if (this.Intl) {
+ new Intl.v8BreakIterator();
+ new Intl.DateTimeFormat();
+ console.log({ toString: function() { throw 1; }});
+ new Intl.v8BreakIterator();
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-714872.js b/deps/v8/test/mjsunit/regress/regress-crbug-714872.js
new file mode 100644
index 0000000000..88dee1401e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-714872.js
@@ -0,0 +1,8 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f() {}
+f.prototype = 1;
+f.foo = 1;
+f.prototype = {};
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-714971.js b/deps/v8/test/mjsunit/regress/regress-crbug-714971.js
new file mode 100644
index 0000000000..d72c7a0fad
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-714971.js
@@ -0,0 +1,19 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function Module(stdlib, foreign, heap) {
+ "use asm";
+ var a = new stdlib.Int16Array(heap);
+ function f() {
+ return a[23 >> -1];
+ }
+ return { f:f };
+}
+var b = new ArrayBuffer(1024);
+var m = Module(this, {}, b);
+new Int16Array(b)[0] = 42;
+assertEquals(42, m.f());
+assertFalse(%IsAsmWasmCode(Module));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-714981.js b/deps/v8/test/mjsunit/regress/regress-crbug-714981.js
new file mode 100644
index 0000000000..e6a664d422
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-714981.js
@@ -0,0 +1,32 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function addProperties(o)
+{
+ o.p1 = 1;
+ o.p2 = 2;
+ o.p3 = 3;
+ o.p4 = 4;
+ o.p5 = 5;
+ o.p6 = 6;
+ o.p7 = 7;
+ o.p8 = 8;
+}
+function removeProperties(o)
+{
+ delete o.p8;
+ delete o.p7;
+ delete o.p6;
+ delete o.p5;
+}
+function makeO()
+{
+ var o = { };
+ addProperties(o);
+ removeProperties(o);
+ addProperties(o);
+}
+for (var i = 0; i < 3; ++i) {
+ o = makeO();
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-715151.js b/deps/v8/test/mjsunit/regress/regress-crbug-715151.js
new file mode 100644
index 0000000000..c0b2c5dba8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-715151.js
@@ -0,0 +1,15 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --verify-heap
+
+function foo() {
+ var a = [0];
+ Object.preventExtensions(a);
+ return a.pop();
+}
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-715404.js b/deps/v8/test/mjsunit/regress/regress-crbug-715404.js
new file mode 100644
index 0000000000..8ff2d00ba0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-715404.js
@@ -0,0 +1,11 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() { Array(-1); }
+assertThrows(foo, RangeError);
+assertThrows(foo, RangeError);
+%OptimizeFunctionOnNextCall(foo);
+assertThrows(foo, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-715455.js b/deps/v8/test/mjsunit/regress/regress-crbug-715455.js
new file mode 100644
index 0000000000..21ec165683
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-715455.js
@@ -0,0 +1,25 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function MODULE() {
+ "use asm";
+ function f() {
+ bogus_function_table[0 & LIMIT]();
+ }
+ return { f:f };
+}
+
+var bogus_function_table = [ Object ];
+var test_set = [ 0x3fffffff, 0x7fffffff, 0xffffffff ];
+for (var i = 0; i < test_set.length; ++i) {
+ bogus_function_table[i] = Object;
+ var src = MODULE.toString();
+ src = src.replace(/MODULE/g, "Module" + i);
+ src = src.replace(/LIMIT/g, test_set[i]);
+ var module = eval("(" + src + ")");
+ assertDoesNotThrow(module(this).f());
+ assertFalse(%IsAsmWasmCode(module));
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-715862.js b/deps/v8/test/mjsunit/regress/regress-crbug-715862.js
new file mode 100644
index 0000000000..60e836ddc4
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-715862.js
@@ -0,0 +1,17 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --verify-heap
+
+function f(a) {
+ a.x = 0;
+ a[1] = 0.1;
+ a.x = {};
+}
+
+f(new Array(1));
+f(new Array());
+
+%OptimizeFunctionOnNextCall(f);
+f(new Array(1));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-716520.js b/deps/v8/test/mjsunit/regress/regress-crbug-716520.js
new file mode 100644
index 0000000000..5058c94a6b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-716520.js
@@ -0,0 +1,21 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var __v_0 = {};
+var __v_8 = this;
+var __v_11 = -1073741825;
+__v_1 = this;
+try {
+} catch(e) {; }
+ function __f_4() {}
+ __f_4.prototype = __v_0;
+ function __f_9() { return new __f_4().v; }
+ __f_9(); __f_9();
+try {
+(function() {
+})();
+} catch(e) {; }
+ Object.assign(__v_0, __v_1, __v_0);
+(function() {
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-716804.js b/deps/v8/test/mjsunit/regress/regress-crbug-716804.js
new file mode 100644
index 0000000000..181a3d6c68
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-716804.js
@@ -0,0 +1,13 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var v = [];
+v.__proto__ = function() {};
+v.prototype;
+
+var v = [];
+v.__proto__ = new Error();
+v.stack;
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-716912.js b/deps/v8/test/mjsunit/regress/regress-crbug-716912.js
new file mode 100644
index 0000000000..ca1663d61a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-716912.js
@@ -0,0 +1,23 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-gc --invoke-weak-callbacks
+
+function __f_6() {
+this.a4 = {};
+}
+__v_6 = new __f_6();
+__v_6.prototype = __v_6;
+__v_6 = new __f_6();
+gc();
+gc();
+
+buf = new ArrayBuffer(8);
+__v_8 = new Int32Array(buf);
+__v_9 = new Float64Array(buf);
+
+__v_8[0] = 1;
+__v_6.a4 = {a: 0};
+delete __v_6.a4;
+__v_6.boom = __v_9[0];
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-718779.js b/deps/v8/test/mjsunit/regress/regress-crbug-718779.js
new file mode 100644
index 0000000000..e62c10729f
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-718779.js
@@ -0,0 +1,21 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function __f_1()
+{
+ __v_1.p2 = 2147483648;
+ __v_1.p3 = 3;
+ __v_1.p4 = 4;
+ __v_1.p5 = 2147483648;
+ __v_1.p6 = 6;
+}
+function __f_2()
+{
+ delete __v_1.p6;
+ delete __v_1.p5;
+}
+var __v_1 = { };
+__f_1(__v_1);
+__f_2(__v_1);
+__f_1(__v_1);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-719479.js b/deps/v8/test/mjsunit/regress/regress-crbug-719479.js
new file mode 100644
index 0000000000..dac49de3b7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-719479.js
@@ -0,0 +1,24 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function baz(a, b) {
+ for (var i = 0; i < a.length; i++) {
+ if (a[i], b[i]) return false;
+ }
+}
+function bar(expected, found) {
+ if (!baz(found, expected)) {
+ }
+};
+bar([{}, 6, NaN], [1.8, , NaN]);
+function foo() {
+ var a = [1,2,3,4];
+ bar(a.length, a.length);
+}
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-721835.js b/deps/v8/test/mjsunit/regress/regress-crbug-721835.js
new file mode 100644
index 0000000000..80f99e6dd5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-721835.js
@@ -0,0 +1,31 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --allow-natives-syntax
+
+(function TestValidationFailureInForStatement() {
+ function Module() {
+ "use asm"
+ function f() {
+ var a = 0;
+ for (a = b; 0; 0) {};
+ return 0;
+ }
+ return { f:f };
+ }
+ assertThrows(() => Module().f(), ReferenceError);
+ assertFalse(%IsAsmWasmCode(Module));
+})();
+
+(function TestForStatementInVoidFunction() {
+ function Module() {
+ "use asm"
+ function f() {
+ for (1; 0; 0) {};
+ }
+ return { f:f };
+ }
+ assertDoesNotThrow(() => Module().f());
+ assertTrue(%IsAsmWasmCode(Module));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-723132.js b/deps/v8/test/mjsunit/regress/regress-crbug-723132.js
new file mode 100644
index 0000000000..99189f6f21
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-723132.js
@@ -0,0 +1,16 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function outer() {
+ function* generator() {
+ let arrow = () => {
+ assertSame(expectedReceiver, this);
+ assertEquals(42, arguments[0]);
+ };
+ arrow();
+ }
+ generator.call(this, 42).next();
+}
+let expectedReceiver = {};
+outer.call(expectedReceiver);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-723455.js b/deps/v8/test/mjsunit/regress/regress-crbug-723455.js
new file mode 100644
index 0000000000..85f5e3c1d5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-723455.js
@@ -0,0 +1,18 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --verify-heap
+
+function f(a) {
+ a.x = 0;
+ a[0] = 0.1;
+ a.x = {};
+}
+
+f(new Array(1));
+f(new Array(1));
+f(new Array());
+
+%OptimizeFunctionOnNextCall(f);
+f(new Array(1));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-736451.js b/deps/v8/test/mjsunit/regress/regress-crbug-736451.js
new file mode 100644
index 0000000000..3f70fe271b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-736451.js
@@ -0,0 +1,13 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-externalize-string --no-stress-opt
+
+!function() {
+ const s0 = "external string turned into two byte";
+ const s1 = s0.substring(1);
+ externalizeString(s0, true);
+
+ s1.toLowerCase();
+}();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-736575.js b/deps/v8/test/mjsunit/regress/regress-crbug-736575.js
new file mode 100644
index 0000000000..3622b09b97
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-736575.js
@@ -0,0 +1,14 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+ return [...[/*hole*/, 2.3]];
+}
+
+assertEquals(undefined, f()[0]);
+assertEquals(undefined, f()[0]);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(undefined, f()[0]);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-738763.js b/deps/v8/test/mjsunit/regress/regress-crbug-738763.js
new file mode 100644
index 0000000000..71cb67b22e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-738763.js
@@ -0,0 +1,25 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --verify-heap --allow-natives-syntax --expose-gc
+
+let constant = { a: 1 };
+
+function update_array(array) {
+ array.x = constant;
+ %HeapObjectVerify(array);
+ array[0] = undefined;
+ %HeapObjectVerify(array);
+ return array;
+}
+
+let ar1 = [1];
+let ar2 = [2];
+let ar3 = [3];
+gc();
+gc();
+
+update_array(ar1);
+constant = update_array(ar2);
+update_array(ar3);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-740803.js b/deps/v8/test/mjsunit/regress/regress-crbug-740803.js
new file mode 100644
index 0000000000..b470ecafbf
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-740803.js
@@ -0,0 +1,19 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+({
+ m() {
+ x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x;
+ x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x;
+ x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x;
+ x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x;
+ x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x;
+ x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x;
+ x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x;
+ x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x;
+ x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x;
+ x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x; x;
+ x;
+ }
+})
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-747979.js b/deps/v8/test/mjsunit/regress/regress-crbug-747979.js
new file mode 100644
index 0000000000..bbdea1ddf5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-747979.js
@@ -0,0 +1,32 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(a) {
+ %HeapObjectVerify(a);
+ a[1] = 0;
+ %HeapObjectVerify(a);
+}
+
+function foo() {}
+
+var arr1 = [0];
+var arr2 = [0];
+var arr3 = [0];
+
+arr1.f = foo;
+arr1[0] = 4.2;
+
+arr2.f = foo;
+
+arr3.f = foo;
+arr3[0] = 4.2;
+arr3.f = f;
+
+f(arr1);
+f(arr2);
+f(arr3);
+%OptimizeFunctionOnNextCall(f);
+f(arr3);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-748539.js b/deps/v8/test/mjsunit/regress/regress-crbug-748539.js
new file mode 100644
index 0000000000..bae598710e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-748539.js
@@ -0,0 +1,22 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f1() {}
+function f2() {}
+
+var o1 = [];
+o1.a = 0;
+o1.f = f1;
+%HeapObjectVerify(o1);
+
+var o2 = [];
+o2.a = 4.2;
+o2.f = f2;
+%HeapObjectVerify(o2);
+
+o1.a;
+%HeapObjectVerify(o1);
+%HeapObjectVerify(o2);
diff --git a/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js b/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js
index b7c5e14231..08de6dbab1 100644
--- a/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js
+++ b/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js
@@ -28,7 +28,7 @@
// Flags: --fold-constants --nodead-code-elimination
// Flags: --expose-gc --allow-natives-syntax
// Flags: --concurrent-recompilation --block-concurrent-recompilation
-// Flags: --crankshaft --no-always-opt
+// Flags: --opt --no-always-opt
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
diff --git a/deps/v8/test/mjsunit/regress/regress-map-invalidation-2.js b/deps/v8/test/mjsunit/regress/regress-map-invalidation-2.js
index f1d2b7703f..ece96b3ff0 100644
--- a/deps/v8/test/mjsunit/regress/regress-map-invalidation-2.js
+++ b/deps/v8/test/mjsunit/regress/regress-map-invalidation-2.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
var c = { x: 2, y: 1 };
diff --git a/deps/v8/test/mjsunit/regress/regress-param-local-type.js b/deps/v8/test/mjsunit/regress/regress-param-local-type.js
index 076a56dd25..0eaca50af5 100644
--- a/deps/v8/test/mjsunit/regress/regress-param-local-type.js
+++ b/deps/v8/test/mjsunit/regress/regress-param-local-type.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
// Test that we do not confuse the first local and the first parameter
// when gathering type information.
diff --git a/deps/v8/test/mjsunit/regress/regress-r4998.js b/deps/v8/test/mjsunit/regress/regress-r4998.js
index 9cf33713b5..a82c266777 100644
--- a/deps/v8/test/mjsunit/regress/regress-r4998.js
+++ b/deps/v8/test/mjsunit/regress/regress-r4998.js
@@ -28,8 +28,6 @@
// Test for a broken fast-smi-loop that does not save the incremented value
// of the loop index. If this test fails, it loops forever, and times out.
-// Flags: --nofull-compiler
-
// Calling foo() spills the virtual frame.
function foo() {
return;
diff --git a/deps/v8/test/mjsunit/regress/regress-store-uncacheable.js b/deps/v8/test/mjsunit/regress/regress-store-uncacheable.js
index fdd200ae3a..4baedbacae 100644
--- a/deps/v8/test/mjsunit/regress/regress-store-uncacheable.js
+++ b/deps/v8/test/mjsunit/regress/regress-store-uncacheable.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
function f() {
var o = {};
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-5697.js b/deps/v8/test/mjsunit/regress/regress-v8-5697.js
index 550bd98017..d7c1679fa1 100644
--- a/deps/v8/test/mjsunit/regress/regress-v8-5697.js
+++ b/deps/v8/test/mjsunit/regress/regress-v8-5697.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
function load(o) { return o.x; }
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-712569.js b/deps/v8/test/mjsunit/regress/wasm/regress-712569.js
new file mode 100644
index 0000000000..ea7e4060a0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-712569.js
@@ -0,0 +1,20 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+
+var v11 = {};
+Object.defineProperty(v11.__proto__, 0, {
+ get: function() {
+ },
+ set: function() {
+ try {
+ WebAssembly.instantiate();
+ v11[0] = 0;
+ } catch (e) {
+ assertTrue(e instanceof RangeError);
+ }
+ }
+});
+v66 = new Array();
+cv = v66; cv[0] = 0.1; cv[2] = 0.2;
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-02256.js b/deps/v8/test/mjsunit/regress/wasm/regression-02256.js
index d0db4ca82a..3b9b76b5a6 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-02256.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-02256.js
@@ -4,7 +4,7 @@
//
// Flags: --random-seed=891196975 --expose-gc --allow-natives-syntax
// Flags: --gc-interval=207 --stress-compaction --validate-asm
-// Flags: --crankshaft --no-always-opt
+// Flags: --opt --no-always-opt
//
// /v8/test/mjsunit/wasm/grow-memory.js
// /v8/test/mjsunit/regress/regress-540.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-02256b.js b/deps/v8/test/mjsunit/regress/wasm/regression-02256b.js
index 6facf0d4e3..120643896d 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-02256b.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-02256b.js
@@ -4,7 +4,7 @@
//
// Flags: --random-seed=891196975 --expose-gc --allow-natives-syntax
// Flags: --gc-interval=207 --stress-compaction --validate-asm
-// Flags: --crankshaft --no-always-opt
+// Flags: --opt --no-always-opt
//
// /v8/test/mjsunit/wasm/grow-memory.js
// /v8/test/mjsunit/regress/regress-540.js
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-647649.js b/deps/v8/test/mjsunit/regress/wasm/regression-647649.js
index fc228d4b10..dc89ebd845 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-647649.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-647649.js
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --nostress-opt --expose-gc --invoke-weak-callbacks --validate-asm
-// Flags: --noalways-opt --invoke-weak-callbacks
+// Flags: --nostress-opt --expose-gc --noalways-opt --invoke-weak-callbacks
// This test was generated by the fuzzer.
@@ -38,6 +37,6 @@ Array.prototype.__proto__ = {3: __v_13};
Array.prototype.__proto__.__proto__ = {7: __v_11};
__v_9 = [0, 1, , , 4, 5, , , , 9]
__v_12 = __v_9.splice(4, 1)
-__v_9.__defineGetter__(getRandomProperty(__v_9, 1689439720), function() {; return __f_1(); });
+__v_9.__defineGetter__(getRandomProperty(__v_9, 1689439720), function() { return {}; });
__v_9[8]
gc();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-699485.js b/deps/v8/test/mjsunit/regress/wasm/regression-699485.js
index 12477c5d37..7f4560789e 100644
--- a/deps/v8/test/mjsunit/regress/wasm/regression-699485.js
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-699485.js
@@ -1,4 +1,4 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
+// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-710844.js b/deps/v8/test/mjsunit/regress/wasm/regression-710844.js
new file mode 100644
index 0000000000..a45e953574
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-710844.js
@@ -0,0 +1,23 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function() {
+ "use asm";
+ var builder = new WasmModuleBuilder();
+ builder.addMemory(0, 5, true);
+ builder.addFunction("regression_710844", kSig_v_v)
+ .addBody([
+ kExprI32Const, 0x03,
+ kExprNop,
+ kExprGrowMemory, 0x00,
+ kExprI32Const, 0x13,
+ kExprNop,
+ kExprI32StoreMem8, 0x00, 0x10
+ ]).exportFunc();
+ let instance = builder.instantiate();
+ instance.exports.regression_710844();
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-711203.js b/deps/v8/test/mjsunit/regress/wasm/regression-711203.js
new file mode 100644
index 0000000000..46f274a8b0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-711203.js
@@ -0,0 +1,30 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+(function() {
+ var builder = new WasmModuleBuilder();
+ builder.addMemory(16, 32, false);
+ builder.addFunction("test", kSig_i_iii)
+ .addBodyWithEnd([
+ // body:
+ kExprI64Const, 0,
+ kExprI64Const, 0x1,
+ kExprI64Clz,
+ kExprI64Sub,
+ kExprI64Const, 0x10,
+ kExprI64Const, 0x1b,
+ kExprI64Shl,
+ kExprI64Sub,
+ kExprI64Popcnt,
+ kExprI32ConvertI64,
+ kExprEnd, // @207
+ ])
+ .exportFunc();
+ var module = builder.instantiate();
+ const result = module.exports.test(1, 2, 3);
+ assertEquals(58, result);
+})();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-715216-a.js b/deps/v8/test/mjsunit/regress/wasm/regression-715216-a.js
new file mode 100644
index 0000000000..56253414c9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-715216-a.js
@@ -0,0 +1,12 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-interpret-all --validate-asm
+
+function asm() {
+ "use asm";
+ function f() {}
+ return {};
+}
+asm();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-715216-b.js b/deps/v8/test/mjsunit/regress/wasm/regression-715216-b.js
new file mode 100644
index 0000000000..0954f807dd
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-715216-b.js
@@ -0,0 +1,13 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-interpret-all --wasm-lazy-compilation
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+var builder = new WasmModuleBuilder();
+builder.addFunction('f', kSig_v_v).addBody([]);
+builder.addFunction('g', kSig_v_v).addBody([]);
+builder.instantiate();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-717056.js b/deps/v8/test/mjsunit/regress/wasm/regression-717056.js
new file mode 100644
index 0000000000..534cf74eb7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-717056.js
@@ -0,0 +1,16 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Check that stack overflow inside asm-wasm translation propagates correctly.
+
+function asm() {
+ 'use asm';
+ return {};
+}
+
+function rec() {
+ asm();
+ rec();
+}
+assertThrows(() => rec(), RangeError);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-719175.js b/deps/v8/test/mjsunit/regress/wasm/regression-719175.js
new file mode 100644
index 0000000000..c6217b0b01
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-719175.js
@@ -0,0 +1,16 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --wasm-interpret-all
+
+function asm() {
+ 'use asm';
+ function f() {
+ if (1.0 % 2.5 == -0.75) {
+ }
+ return 0;
+ }
+ return {f: f};
+}
+asm().f();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-722445.js b/deps/v8/test/mjsunit/regress/wasm/regression-722445.js
new file mode 100644
index 0000000000..f6a96dc60d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-722445.js
@@ -0,0 +1,16 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-constants.js');
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+builder.addFunction('f', kSig_v_v).addBody([
+ kExprI32Const, 0, kExprBrTable,
+ // 0x80000000 in LEB:
+ 0x80, 0x80, 0x80, 0x80, 0x08,
+ // First break target. Creation of this node triggered the bug.
+ 0
+]);
+assertThrows(() => builder.instantiate(), WebAssembly.CompileError);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-731351.js b/deps/v8/test/mjsunit/regress/wasm/regression-731351.js
new file mode 100644
index 0000000000..238223ac2c
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-731351.js
@@ -0,0 +1,23 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --validate-asm --expose-gc --stress-opt
+
+gc();
+function asm(stdlib, foreign, buffer) {
+ "use asm";
+ var HEAP32 = new stdlib.Uint32Array(buffer);
+ function load(a) {
+ a = a | 0;
+ return +(HEAP32[a >> 2] >>> 0);
+ }
+ return {load: load};
+}
+
+function RunAsmJsTest() {
+ buffer = new ArrayBuffer(65536);
+ var asm_module = asm({Uint32Array: Uint32Array}, {}, buffer);
+ asm_module.load(buffer.byteLength);
+}
+RunAsmJsTest();
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-734108.js b/deps/v8/test/mjsunit/regress/wasm/regression-734108.js
new file mode 100644
index 0000000000..d8774f4a84
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-734108.js
@@ -0,0 +1,16 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-async-compilation
+
+__v_0 = new Uint8Array([
+ 0x00, 0x61, 0x73, 0x6d, 0x01, 0x00, 0x00, 0x00, 0x01, 0x05, 0x01,
+ 0x60, 0x00, 0x01, 0x7f, 0x03, 0x02, 0x01, 0x00, 0x05, 0x03, 0x01,
+ 0x00, 0x01, 0x07, 0x11, 0x02, 0x04, 0x67, 0x72, 0x6f, 0x77, 0x00,
+ 0x00, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x02, 0x00, 0x0a,
+ 0x08, 0x01, 0x06, 0x00, 0x41, 0x01, 0x40, 0x00, 0x0b
+]);
+assertPromiseResult(
+ WebAssembly.compile(__v_0)
+);
diff --git a/deps/v8/test/mjsunit/regress/wasm/regression-737069.js b/deps/v8/test/mjsunit/regress/wasm/regression-737069.js
new file mode 100644
index 0000000000..c68d10f06d
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regression-737069.js
@@ -0,0 +1,35 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-wasm
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+let binary = new Binary;
+
+binary.emit_header();
+binary.emit_section(kTypeSectionCode, section => {
+ section.emit_u32v(1); // number of types
+ section.emit_u8(kWasmFunctionTypeForm);
+ section.emit_u32v(0); // number of parameters
+ section.emit_u32v(0); // number of returns
+});
+binary.emit_section(kFunctionSectionCode, section => {
+ section.emit_u32v(1); // number of functions
+ section.emit_u32v(0); // type index
+});
+
+binary.emit_u8(kCodeSectionCode);
+binary.emit_u8(0x02); // section length
+binary.emit_u8(0x01); // number of functions
+binary.emit_u8(0x40); // function body size
+// Function body is missing here.
+
+let buffer = new ArrayBuffer(binary.length);
+let view = new Uint8Array(buffer);
+for (let i = 0; i < binary.length; i++) {
+ view[i] = binary[i] | 0;
+}
+WebAssembly.validate(buffer);
diff --git a/deps/v8/test/mjsunit/setters-on-elements.js b/deps/v8/test/mjsunit/setters-on-elements.js
index f90c510c22..48fa33b5fe 100644
--- a/deps/v8/test/mjsunit/setters-on-elements.js
+++ b/deps/v8/test/mjsunit/setters-on-elements.js
@@ -25,11 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --max-opt-count=100 --noalways-opt
-// Flags: --crankshaft
-
-// We specify max-opt-count because we opt/deopt the same function many
-// times.
+// Flags: --allow-natives-syntax --noalways-opt --opt
// It's nice to run this in other browsers too.
var standalone = false;
diff --git a/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js b/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js
index 56d07ad62b..95fa8d337c 100644
--- a/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js
+++ b/deps/v8/test/mjsunit/shared-function-tier-up-turbo.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
//
// Flags: --mark-shared-functions-for-tier-up --allow-natives-syntax
-// Flags: --ignition --turbo --crankshaft --no-always-opt
+// Flags: --ignition --turbo --opt --no-always-opt
// Flags: --turbo-filter=*
// If we are always or never optimizing it is useless.
diff --git a/deps/v8/test/mjsunit/shift-for-integer-div.js b/deps/v8/test/mjsunit/shift-for-integer-div.js
index 7aadb4d237..9264242879 100644
--- a/deps/v8/test/mjsunit/shift-for-integer-div.js
+++ b/deps/v8/test/mjsunit/shift-for-integer-div.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft --no-always-opt
+// Flags: --allow-natives-syntax --opt --no-always-opt
function divp4(x) {
return x / 4;
diff --git a/deps/v8/test/mjsunit/shifts.js b/deps/v8/test/mjsunit/shifts.js
index b91b3e8a00..37ba9d17c1 100644
--- a/deps/v8/test/mjsunit/shifts.js
+++ b/deps/v8/test/mjsunit/shifts.js
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --nofull-compiler
-
// test a few corners cases with shifts
// The result of the shift is not a Smi.
diff --git a/deps/v8/test/mjsunit/sin-cos.js b/deps/v8/test/mjsunit/sin-cos.js
index 8c4b80e8c1..7af471d3c0 100644
--- a/deps/v8/test/mjsunit/sin-cos.js
+++ b/deps/v8/test/mjsunit/sin-cos.js
@@ -27,7 +27,7 @@
// Test Math.sin and Math.cos.
-// Flags: --allow-natives-syntax --crankshaft
+// Flags: --allow-natives-syntax --opt
assertEquals("-Infinity", String(1/Math.sin(-0)));
assertEquals(1, Math.cos(-0));
diff --git a/deps/v8/test/mjsunit/skipping-inner-functions.js b/deps/v8/test/mjsunit/skipping-inner-functions.js
new file mode 100644
index 0000000000..1c5538567f
--- /dev/null
+++ b/deps/v8/test/mjsunit/skipping-inner-functions.js
@@ -0,0 +1,37 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-preparser-scope-analysis
+
+(function TestBasicSkipping() {
+ var result = 0;
+
+ function lazy(ctxt_alloc_param) {
+ var ctxt_alloc_var = 10;
+ function skip_me() {
+ result = ctxt_alloc_param + ctxt_alloc_var;
+ }
+ return skip_me;
+ }
+ // Test that parameters and variables of the outer function get context
+ // allocated even if we skip the inner function.
+ lazy(9)();
+ assertEquals(19, result);
+})();
+
+(function TestSkippingFunctionWithEval() {
+ var result = 0;
+
+ function lazy(ctxt_alloc_param) {
+ var ctxt_alloc_var = 10;
+ function skip_me() {
+ eval('result = ctxt_alloc_param + ctxt_alloc_var');
+ }
+ return skip_me;
+ }
+ // Test that parameters and variables of the outer function get context
+ // allocated even if we skip the inner function.
+ lazy(9)();
+ assertEquals(19, result);
+})();
diff --git a/deps/v8/test/mjsunit/smi-mul-const.js b/deps/v8/test/mjsunit/smi-mul-const.js
index ef0685b7a9..e5255014dc 100644
--- a/deps/v8/test/mjsunit/smi-mul-const.js
+++ b/deps/v8/test/mjsunit/smi-mul-const.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft --noalways-opt
+// Flags: --allow-natives-syntax --opt --noalways-opt
function check(func, input, expected) {
func(-1);
diff --git a/deps/v8/test/mjsunit/smi-mul.js b/deps/v8/test/mjsunit/smi-mul.js
index 236563590b..12d206abec 100644
--- a/deps/v8/test/mjsunit/smi-mul.js
+++ b/deps/v8/test/mjsunit/smi-mul.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft --noalways-opt
+// Flags: --allow-natives-syntax --opt --noalways-opt
function mul(a, b) {
return a * b;
diff --git a/deps/v8/test/mjsunit/stack-traces.js b/deps/v8/test/mjsunit/stack-traces.js
index 9522178c0d..33552bbf5c 100644
--- a/deps/v8/test/mjsunit/stack-traces.js
+++ b/deps/v8/test/mjsunit/stack-traces.js
@@ -264,7 +264,7 @@ function testOmittedBuiltin(throwing, omitted) {
}
-testTrace("testArrayNative", testArrayNative, ["Array.map (native)"]);
+testTrace("testArrayNative", testArrayNative, ["Array.map"]);
testTrace("testNested", testNested, ["at one", "at two", "at three"]);
testTrace("testMethodNameInference", testMethodNameInference, ["at Foo.bar"]);
testTrace("testImplicitConversion", testImplicitConversion, ["at Nirk.valueOf"]);
diff --git a/deps/v8/test/mjsunit/string-case.js b/deps/v8/test/mjsunit/string-case.js
index 34c2340d33..b6934eb705 100644
--- a/deps/v8/test/mjsunit/string-case.js
+++ b/deps/v8/test/mjsunit/string-case.js
@@ -59,8 +59,9 @@ function test(length) {
strLower += String.fromCharCode(charCodeToLower(c));
strUpper += String.fromCharCode(charCodeToUpper(c));
}
- %FlattenString(strLower);
- %FlattenString(strUpper);
+ str = %FlattenString(str);
+ strLower = %FlattenString(strLower);
+ strUpper = %FlattenString(strUpper);
// Sequential string.
assertEquals(strLower, str.toLowerCase());
assertEquals(strUpper, str.toUpperCase());
diff --git a/deps/v8/test/mjsunit/strong-rooted-literals.js b/deps/v8/test/mjsunit/strong-rooted-literals.js
index c124a913da..55ce89e4ef 100644
--- a/deps/v8/test/mjsunit/strong-rooted-literals.js
+++ b/deps/v8/test/mjsunit/strong-rooted-literals.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --expose-gc --turbo --crankshaft
+// Flags: --allow-natives-syntax --expose-gc --turbo --opt
// Make sure literals are strongly rooted and safe from weak-code deopts.
diff --git a/deps/v8/test/mjsunit/type-profile/regress-707223.js b/deps/v8/test/mjsunit/type-profile/regress-707223.js
new file mode 100644
index 0000000000..078b687c51
--- /dev/null
+++ b/deps/v8/test/mjsunit/type-profile/regress-707223.js
@@ -0,0 +1,8 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --type-profile
+
+let e;
+eval("e");
diff --git a/deps/v8/test/mjsunit/unary-minus-deopt.js b/deps/v8/test/mjsunit/unary-minus-deopt.js
index cc3bede4d2..07f7e0e497 100644
--- a/deps/v8/test/mjsunit/unary-minus-deopt.js
+++ b/deps/v8/test/mjsunit/unary-minus-deopt.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax --crankshaft --no-always-opt
+// Flags: --allow-natives-syntax --opt --no-always-opt
// This is a boiled-down example happening in the Epic Citadel demo:
// After deopting, the multiplication for unary minus stayed in Smi
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm-expr.js b/deps/v8/test/mjsunit/wasm/asm-wasm-expr.js
index 3b20826fe7..ac42759788 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm-expr.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm-expr.js
@@ -23,7 +23,7 @@ const assign_in_stmt = [
"do { S } while (=)",
];
const assign_in_expr = [
- "i32_func(=)",
+ "i32_func(=) | 0",
"(=) ? E : E",
"E ? (=) : E",
"E ? E : (=)",
@@ -108,9 +108,6 @@ function DoTheTests(expr, assign, stmt) {
e = e.replace(/S/g, stmt);
var str = main.toString().replace("FUNC_BODY", "return (" + e + ") | 0;");
var asm_source = MODULE_TEMPLATE.toString().replace("FUNC_DECL", str);
- // TODO(titzer): a verbosity API for these kinds of tests?
- // print(asm_source);
-
doTest(asm_source, "(" + test + ") " + e);
}
@@ -123,8 +120,6 @@ function DoTheTests(expr, assign, stmt) {
e = e.replace(/S/g, stmt);
var str = main.toString().replace("FUNC_BODY", e + "; return 0;");
var asm_source = MODULE_TEMPLATE.toString().replace("FUNC_DECL", str);
-// print(asm_source);
-
doTest(asm_source, "(" + test + ") " + e);
}
@@ -134,9 +129,8 @@ function DoTheTests(expr, assign, stmt) {
var js_module = eval("(" + nonasm_source + ")")(stdlib, {}, buffer);
expect(js_module);
- var asmfunc = eval("(" + asm_source + ")");
-
print("Testing ASMJS: " + orig);
+ var asmfunc = eval("(" + asm_source + ")");
var asm_module = asmfunc(stdlib, {}, buffer);
assertTrue(%IsAsmWasmCode(asmfunc));
expect(asm_module);
diff --git a/deps/v8/test/mjsunit/wasm/asm-wasm.js b/deps/v8/test/mjsunit/wasm/asm-wasm.js
index a5d04ce4fe..9d7a807bc9 100644
--- a/deps/v8/test/mjsunit/wasm/asm-wasm.js
+++ b/deps/v8/test/mjsunit/wasm/asm-wasm.js
@@ -549,7 +549,6 @@ function TestHeapAccessIntTypes() {
assertValidAsm(module_decl);
assertEquals(7, module.caller());
assertEquals(7, memory_view[2]);
- assertEquals(7, module_decl(stdlib).caller());
assertValidAsm(module_decl);
}
}
@@ -1227,8 +1226,9 @@ TestForeignVariables();
return {load: load, iload: iload, store: store, storeb: storeb};
}
+ var memory = new ArrayBuffer(1024);
var module_decl = eval('(' + TestByteHeapAccessCompat.toString() + ')');
- var m = module_decl(stdlib);
+ var m = module_decl(stdlib, null, memory);
assertValidAsm(module_decl);
m.store(0, 20);
m.store(4, 21);
diff --git a/deps/v8/test/mjsunit/wasm/async-compile.js b/deps/v8/test/mjsunit/wasm/async-compile.js
index 135e39a9d8..b95930aa5a 100644
--- a/deps/v8/test/mjsunit/wasm/async-compile.js
+++ b/deps/v8/test/mjsunit/wasm/async-compile.js
@@ -7,43 +7,66 @@
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
-let ok_buffer = (() => {
- var builder = new WasmModuleBuilder();
- builder.addFunction("f", kSig_i_v)
- .addBody([kExprI32Const, 42])
- .exportAs("f");
- return builder.toBuffer();
-})();
-
-// The OK buffer validates and can be made into a module.
-assertTrue(WebAssembly.validate(ok_buffer));
-let ok_module = new WebAssembly.Module(ok_buffer);
-assertTrue(ok_module instanceof WebAssembly.Module);
-
-// The bad buffer does not validate and cannot be made into a module.
-let bad_buffer = new ArrayBuffer(0);
-assertFalse(WebAssembly.validate(bad_buffer));
-assertThrows(() => new WebAssembly.Module(bad_buffer), WebAssembly.CompileError);
-
-function checkModule(module) {
- assertTrue(module instanceof WebAssembly.Module);
+function assertCompiles(buffer) {
+ return assertPromiseResult(
+ WebAssembly.compile(buffer),
+ module => assertTrue(module instanceof WebAssembly.Module),
+ ex => assertUnreachable);
}
-function checkCompileError(ex) {
- assertTrue(ex instanceof WebAssembly.CompileError);
+function assertCompileError(buffer) {
+ return assertPromiseResult(
+ WebAssembly.compile(buffer), module => assertUnreachable,
+ ex => assertTrue(ex instanceof WebAssembly.CompileError));
}
-let kNumCompiles = 3;
+assertPromiseResult(async function basicCompile() {
+ let ok_buffer = (() => {
+ var builder = new WasmModuleBuilder();
+ builder.addFunction('f', kSig_i_v)
+ .addBody([kExprI32Const, 42])
+ .exportAs('f');
+ return builder.toBuffer();
+ })();
-// Three compilations of the OK module should succeed.
-for (var i = 0; i < kNumCompiles; i++) {
- assertPromiseResult(WebAssembly.compile(ok_buffer), checkModule,
- (ex) => assertUnreachable);
-}
+ // The OK buffer validates and can be made into a module.
+ assertTrue(WebAssembly.validate(ok_buffer));
+ let ok_module = new WebAssembly.Module(ok_buffer);
+ assertTrue(ok_module instanceof WebAssembly.Module);
-// Three compilations of the bad module should fail.
-for (var i = 0; i < kNumCompiles; i++) {
- assertPromiseResult(WebAssembly.compile(bad_buffer),
- (module) => assertUnreachable,
- checkCompileError);
-}
+ // The bad buffer does not validate and cannot be made into a module.
+ let bad_buffer = new ArrayBuffer(0);
+ assertFalse(WebAssembly.validate(bad_buffer));
+ assertThrows(
+ () => new WebAssembly.Module(bad_buffer), WebAssembly.CompileError);
+
+ let kNumCompiles = 3;
+
+ // Three compilations of the OK module should succeed.
+ for (var i = 0; i < kNumCompiles; i++) {
+ await assertCompiles(ok_buffer);
+ }
+
+ // Three compilations of the bad module should fail.
+ for (var i = 0; i < kNumCompiles; i++) {
+ await assertCompileError(bad_buffer);
+ }
+}());
+
+assertPromiseResult(async function badFunctionInTheMiddle() {
+ // We had an error where an exception was generated by a background task and
+ // later thrown in a foreground task. The handle to the exception died
+ // inbetween, since the HandleScope was left.
+ // This test reproduced that error.
+ let builder = new WasmModuleBuilder();
+ let sig = builder.addType(kSig_i_v);
+ for (var i = 0; i < 10; ++i) {
+ builder.addFunction('a' + i, sig).addBody([kExprI32Const, 42]);
+ }
+ builder.addFunction('bad', sig).addBody([]);
+ for (var i = 0; i < 10; ++i) {
+ builder.addFunction('b' + i, sig).addBody([kExprI32Const, 42]);
+ }
+ let buffer = builder.toBuffer();
+ await assertCompileError(buffer);
+}());
diff --git a/deps/v8/test/mjsunit/wasm/compilation-limits.js b/deps/v8/test/mjsunit/wasm/compilation-limits.js
index 1a4fa0a8ea..2cc38bdfea 100644
--- a/deps/v8/test/mjsunit/wasm/compilation-limits.js
+++ b/deps/v8/test/mjsunit/wasm/compilation-limits.js
@@ -106,8 +106,4 @@ async function TestAll() {
await FailAsyncInstantiate();
}
-%IncrementWaitCount();
-TestAll().then(
- () => { %DecrementWaitCount(); },
- () => { %DecrementWaitCount(); }
-);
+assertPromiseResult(TestAll());
diff --git a/deps/v8/test/mjsunit/wasm/gc-buffer.js b/deps/v8/test/mjsunit/wasm/gc-buffer.js
index 6bcf299d8b..f187d7dabe 100644
--- a/deps/v8/test/mjsunit/wasm/gc-buffer.js
+++ b/deps/v8/test/mjsunit/wasm/gc-buffer.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --stress-gc --expose-gc
+// Flags: --expose-wasm --gc-interval=500 --stress-compaction --expose-gc
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/wasm/gc-stress.js b/deps/v8/test/mjsunit/wasm/gc-stress.js
index 2bf2f758d6..a6c408d3db 100644
--- a/deps/v8/test/mjsunit/wasm/gc-stress.js
+++ b/deps/v8/test/mjsunit/wasm/gc-stress.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm --stress-gc
+// Flags: --expose-wasm --gc-interval=500 --stress-compaction
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/wasm/huge-memory.js b/deps/v8/test/mjsunit/wasm/huge-memory.js
new file mode 100644
index 0000000000..9719ad4a28
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/huge-memory.js
@@ -0,0 +1,76 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-max-mem-pages=49152
+
+// This test makes sure things don't break once we support >2GB wasm memories.
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+function testHugeMemory() {
+ var builder = new WasmModuleBuilder();
+
+ const num_pages = 49152; // 3GB
+
+ builder.addMemory(num_pages, num_pages, true);
+ builder.addFunction("geti", kSig_i_ii)
+ .addBody([
+ kExprGetLocal, 0,
+ kExprGetLocal, 1,
+ kExprI32Mul,
+ kExprI32LoadMem, 0, 0,
+ ])
+ .exportFunc();
+
+ var module = builder.instantiate();
+ const geti = module.exports.geti;
+
+ print("In bounds");
+ assertEquals(0, geti(2500, 1 << 20));
+ print("Out of bounds");
+ assertTraps(kTrapMemOutOfBounds, () => geti(3500, 1 << 20));
+}
+testHugeMemory();
+
+function testHugeMemoryConstInBounds() {
+ var builder = new WasmModuleBuilder();
+
+ const num_pages = 49152; // 3GB
+
+ builder.addMemory(num_pages, num_pages, true);
+ builder.addFunction("geti", kSig_i_v)
+ .addBody([
+ kExprI32Const, 0x80, 0x80, 0x80, 0x80, 0x7A, // 0xA0000000, 2.5GB
+ kExprI32LoadMem, 0, 0,
+ ])
+ .exportFunc();
+
+ var module = builder.instantiate();
+ const geti = module.exports.geti;
+
+ print("In bounds");
+ assertEquals(0, geti());
+}
+testHugeMemoryConstInBounds();
+
+function testHugeMemoryConstOutOfBounds() {
+ var builder = new WasmModuleBuilder();
+
+ const num_pages = 49152; // 3GB
+
+ builder.addMemory(num_pages, num_pages, true);
+ builder.addFunction("geti", kSig_i_v)
+ .addBody([
+ kExprI32Const, 0x80, 0x80, 0x80, 0x80, 0x7E, // 0xE0000000, 3.5GB
+ kExprI32LoadMem, 0, 0,
+ ])
+ .exportFunc();
+
+ var module = builder.instantiate();
+ const geti = module.exports.geti;
+
+ print("Out of bounds");
+ assertTraps(kTrapMemOutOfBounds, geti);
+}
+testHugeMemoryConstOutOfBounds();
diff --git a/deps/v8/test/mjsunit/wasm/indirect-tables.js b/deps/v8/test/mjsunit/wasm/indirect-tables.js
index 7583d05968..9bc646e2f0 100644
--- a/deps/v8/test/mjsunit/wasm/indirect-tables.js
+++ b/deps/v8/test/mjsunit/wasm/indirect-tables.js
@@ -523,3 +523,34 @@ function js_div(a, b) { return (a / b) | 0; }
// Try to grow past imported maximum
assertThrows(() => table.grow(21));
})();
+
+(function InitImportedTableSignatureMismatch() {
+ // instance0 exports a function table and a main function which indirectly
+ // calls a function from the table.
+ let builder0 = new WasmModuleBuilder();
+ let sig_index = builder0.addType(kSig_i_v);
+ builder0.addFunction('main', kSig_i_i)
+ .addBody([
+ kExprGetLocal, 0, // -
+ kExprCallIndirect, sig_index, kTableZero
+ ])
+ .exportAs('main');
+ builder0.setFunctionTableLength(3);
+ builder0.addExportOfKind('table', kExternalTable);
+ let module0 = new WebAssembly.Module(builder0.toBuffer());
+ let instance0 = new WebAssembly.Instance(module0);
+
+ // instance1 imports the table and adds a function to it.
+ let builder1 = new WasmModuleBuilder();
+ builder1.addFunction('f', kSig_i_i).addBody([kExprGetLocal, 0]);
+ builder1.addImportedTable('z', 'table');
+ builder1.addFunctionTableInit(0, false, [0], true);
+ let module1 = new WebAssembly.Module(builder1.toBuffer());
+ let instance1 =
+ new WebAssembly.Instance(module1, {z: {table: instance0.exports.table}});
+
+ // Calling the main method on instance0 should fail, because the signature of
+ // the added function does not match.
+ assertThrows(
+ () => instance0.exports.main(0), WebAssembly.RuntimeError);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js b/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
index e876a7997f..d2489f3e89 100644
--- a/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
+++ b/deps/v8/test/mjsunit/wasm/instantiate-module-basic.js
@@ -70,7 +70,7 @@ function CheckInstance(instance) {
print('async instantiate...');
let instance_promise = WebAssembly.instantiate(buffer);
- assertPromiseResult(instance_promise, CheckInstance);
+ assertPromiseResult(instance_promise, pair => CheckInstance(pair.instance));
})();
// Check that validate works correctly for a module.
diff --git a/deps/v8/test/mjsunit/wasm/instantiate-run-basic.js b/deps/v8/test/mjsunit/wasm/instantiate-run-basic.js
index e9e9a9ac48..b0016ec9aa 100644
--- a/deps/v8/test/mjsunit/wasm/instantiate-run-basic.js
+++ b/deps/v8/test/mjsunit/wasm/instantiate-run-basic.js
@@ -2,19 +2,33 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --expose-wasm
+// Flags: --allow-natives-syntax
load("test/mjsunit/wasm/wasm-constants.js");
load("test/mjsunit/wasm/wasm-module-builder.js");
-(function BasicTest() {
- var kReturnValue = 15;
+const kReturnValue = 15;
+
+function getBuilder() {
var builder = new WasmModuleBuilder();
builder.addFunction("main", kSig_i_i)
.addBody([kExprI32Const, kReturnValue])
.exportFunc();
+ return builder;
+}
+(function BasicTest() {
+ var builder = getBuilder();
var main = builder.instantiate().exports.main;
assertEquals(kReturnValue, main());
})();
+
+(function AsyncTest() {
+ var builder = getBuilder();
+ var buffer = builder.toBuffer();
+ assertPromiseResult(
+ WebAssembly.instantiate(buffer)
+ .then(pair => pair.instance.exports.main(), assertUnreachable)
+ .then(result => assertEquals(kReturnValue, result), assertUnreachable));
+})();
diff --git a/deps/v8/test/mjsunit/wasm/js-api.js b/deps/v8/test/mjsunit/wasm/js-api.js
index 689a0adbc4..0f6b0816be 100644
--- a/deps/v8/test/mjsunit/wasm/js-api.js
+++ b/deps/v8/test/mjsunit/wasm/js-api.js
@@ -713,7 +713,6 @@ function assertCompileError(args, err, msg) {
var error = null;
assertPromiseResult(compile(...args), unexpectedSuccess, error => {
assertTrue(error instanceof err);
- assertTrue(Boolean(error.stack.match('js-api.js')));
// TODO assertTrue(Boolean(error.message.match(msg)));
});
}
@@ -760,7 +759,6 @@ function assertInstantiateError(args, err, msg) {
var error = null;
assertPromiseResult(instantiate(...args), unexpectedSuccess, error => {
assertTrue(error instanceof err);
- assertTrue(Boolean(error.stack.match('js-api.js')));
// TODO assertTrue(Boolean(error.message.match(msg)));
});
}
diff --git a/deps/v8/test/mjsunit/wasm/wasm-api-overloading.js b/deps/v8/test/mjsunit/wasm/wasm-api-overloading.js
new file mode 100644
index 0000000000..37320e54ce
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/wasm-api-overloading.js
@@ -0,0 +1,53 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+load("test/mjsunit/wasm/wasm-constants.js");
+load("test/mjsunit/wasm/wasm-module-builder.js");
+
+%ResetWasmOverloads();
+let buffer = (() => {
+ let builder = new WasmModuleBuilder();
+ builder.addFunction("f", kSig_i_v)
+ .addBody([kExprI32Const, 42])
+ .exportAs("f");
+ return builder.toBuffer();
+})();
+
+var module = new WebAssembly.Module(buffer);
+var wrapper = [module];
+
+assertPromiseResult(
+ WebAssembly.instantiateStreaming(wrapper),
+ assertUnreachable,
+ e => assertTrue(e instanceof TypeError));
+
+assertPromiseResult(
+ WebAssembly.compileStreaming(wrapper),
+ assertUnreachable,
+ e => assertTrue(e instanceof TypeError));
+
+assertPromiseResult(
+ (() => {
+ %SetWasmCompileFromPromiseOverload();
+ return WebAssembly.compileStreaming(wrapper);
+ })(),
+ module => {
+ assertTrue(module instanceof WebAssembly.Module);
+ %ResetWasmOverloads();
+ },
+ assertUnreachable);
+
+assertPromiseResult(
+ (() => {
+ %SetWasmCompileFromPromiseOverload();
+ return WebAssembly.instantiateStreaming(wrapper);
+ })(),
+ pair => {
+ assertTrue(pair.instance instanceof WebAssembly.Instance);
+ assertTrue(pair.module instanceof WebAssembly.Module);
+ %ResetWasmOverloads();
+ },
+ assertUnreachable);
diff --git a/deps/v8/test/mkgrokdump/BUILD.gn b/deps/v8/test/mkgrokdump/BUILD.gn
new file mode 100644
index 0000000000..5359f675d0
--- /dev/null
+++ b/deps/v8/test/mkgrokdump/BUILD.gn
@@ -0,0 +1,27 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("../../gni/v8.gni")
+
+v8_executable("mkgrokdump") {
+ testonly = true
+
+ # mkgrokdump is used to create tools/v8heapconst.py.
+
+ sources = [
+ "mkgrokdump.cc",
+ ]
+
+ configs = [ "../..:internal_config_base" ]
+
+ defines = []
+
+ deps = [
+ "../..:v8",
+ "../..:v8_libbase",
+ "../..:v8_libplatform",
+ "//build/config:exe_and_shlib_deps",
+ "//build/win:default_exe_manifest",
+ ]
+}
diff --git a/deps/v8/test/mkgrokdump/DEPS b/deps/v8/test/mkgrokdump/DEPS
new file mode 100644
index 0000000000..3e73aa244f
--- /dev/null
+++ b/deps/v8/test/mkgrokdump/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+src",
+]
diff --git a/deps/v8/test/mkgrokdump/README b/deps/v8/test/mkgrokdump/README
new file mode 100644
index 0000000000..2ee5ab099f
--- /dev/null
+++ b/deps/v8/test/mkgrokdump/README
@@ -0,0 +1,3 @@
+If you change the heap layout, expect this test to fail (in CQ). You will then
+need to build x64 release and run `<outdir>/mkgrokdump > tools/v8heapconst.py`
+to rebaseline.
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.cc b/deps/v8/test/mkgrokdump/mkgrokdump.cc
new file mode 100644
index 0000000000..e5a41a13ba
--- /dev/null
+++ b/deps/v8/test/mkgrokdump/mkgrokdump.cc
@@ -0,0 +1,133 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdio.h>
+
+#include "include/libplatform/libplatform.h"
+#include "include/v8.h"
+
+#include "src/frames.h"
+#include "src/heap/heap.h"
+#include "src/heap/spaces.h"
+#include "src/isolate.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+
+static const char* kHeader =
+ "# Copyright 2017 the V8 project authors. All rights reserved.\n"
+ "# Use of this source code is governed by a BSD-style license that can\n"
+ "# be found in the LICENSE file.\n"
+ "\n"
+ "# This file is automatically generated by mkgrokdump and should not\n"
+ "# be modified manually.\n"
+ "\n"
+ "# List of known V8 instance types.\n";
+
+// Non-snapshot builds allocate objects to different places.
+// Debug builds emit debug code, affecting code object sizes.
+#if defined(V8_USE_SNAPSHOT) && !defined(DEBUG)
+static const char* kBuild = "shipping";
+#else
+static const char* kBuild = "non-shipping";
+#endif
+
+class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
+ public:
+ void* Allocate(size_t length) override { return nullptr; }
+ void* AllocateUninitialized(size_t length) override { return nullptr; }
+ void Free(void* p, size_t) override {}
+};
+
+static int DumpHeapConstants(const char* argv0) {
+ // Start up V8.
+ v8::Platform* platform = v8::platform::CreateDefaultPlatform();
+ v8::V8::InitializePlatform(platform);
+ v8::V8::Initialize();
+ v8::V8::InitializeExternalStartupData(argv0);
+ Isolate::CreateParams create_params;
+ MockArrayBufferAllocator mock_arraybuffer_allocator;
+ create_params.array_buffer_allocator = &mock_arraybuffer_allocator;
+ Isolate* isolate = Isolate::New(create_params);
+ {
+ Isolate::Scope scope(isolate);
+ i::Heap* heap = reinterpret_cast<i::Isolate*>(isolate)->heap();
+ i::PrintF("%s", kHeader);
+#define DUMP_TYPE(T) i::PrintF(" %d: \"%s\",\n", i::T, #T);
+ i::PrintF("INSTANCE_TYPES = {\n");
+ INSTANCE_TYPE_LIST(DUMP_TYPE)
+ i::PrintF("}\n");
+#undef DUMP_TYPE
+
+ // Dump the KNOWN_MAP table to the console.
+ i::PrintF("\n# List of known V8 maps.\n");
+#define ROOT_LIST_CASE(type, name, camel_name) \
+ if (n == NULL && o == heap->name()) n = #camel_name;
+#define STRUCT_LIST_CASE(upper_name, camel_name, name) \
+ if (n == NULL && o == heap->name##_map()) n = #camel_name "Map";
+ i::HeapObjectIterator it(heap->map_space());
+ i::PrintF("KNOWN_MAPS = {\n");
+ for (i::Object* o = it.Next(); o != NULL; o = it.Next()) {
+ i::Map* m = i::Map::cast(o);
+ const char* n = NULL;
+ intptr_t p = reinterpret_cast<intptr_t>(m) & 0x7ffff;
+ int t = m->instance_type();
+ ROOT_LIST(ROOT_LIST_CASE)
+ STRUCT_LIST(STRUCT_LIST_CASE)
+ if (n == NULL) continue;
+ i::PrintF(" 0x%05" V8PRIxPTR ": (%d, \"%s\"),\n", p, t, n);
+ }
+ i::PrintF("}\n");
+#undef STRUCT_LIST_CASE
+#undef ROOT_LIST_CASE
+
+ // Dump the KNOWN_OBJECTS table to the console.
+ i::PrintF("\n# List of known V8 objects.\n");
+#define ROOT_LIST_CASE(type, name, camel_name) \
+ if (n == NULL && o == heap->name()) { \
+ n = #camel_name; \
+ i = i::Heap::k##camel_name##RootIndex; \
+ }
+ i::OldSpaces spit(heap);
+ i::PrintF("KNOWN_OBJECTS = {\n");
+ for (i::PagedSpace* s = spit.next(); s != NULL; s = spit.next()) {
+ i::HeapObjectIterator it(s);
+ // Code objects are generally platform-dependent.
+ if (s->identity() == i::CODE_SPACE) continue;
+ const char* sname = AllocationSpaceName(s->identity());
+ for (i::Object* o = it.Next(); o != NULL; o = it.Next()) {
+ const char* n = NULL;
+ i::Heap::RootListIndex i = i::Heap::kStrongRootListLength;
+ intptr_t p = reinterpret_cast<intptr_t>(o) & 0x7ffff;
+ ROOT_LIST(ROOT_LIST_CASE)
+ if (n == NULL) continue;
+ if (!i::Heap::RootIsImmortalImmovable(i)) continue;
+ i::PrintF(" (\"%s\", 0x%05" V8PRIxPTR "): \"%s\",\n", sname, p, n);
+ }
+ }
+ i::PrintF("}\n");
+#undef ROOT_LIST_CASE
+
+ // Dump frame markers
+ i::PrintF("\n# List of known V8 Frame Markers.\n");
+#define DUMP_MARKER(T, class) i::PrintF(" \"%s\",\n", #T);
+ i::PrintF("FRAME_MARKERS = (\n");
+ STACK_FRAME_TYPE_LIST(DUMP_MARKER)
+ i::PrintF(")\n");
+#undef DUMP_TYPE
+ }
+
+ i::PrintF("\n# This set of constants is generated from a %s build.\n",
+ kBuild);
+
+ // Teardown.
+ isolate->Dispose();
+ v8::V8::ShutdownPlatform();
+ delete platform;
+ return 0;
+}
+
+} // namespace v8
+
+int main(int argc, char* argv[]) { return v8::DumpHeapConstants(argv[0]); }
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.gyp b/deps/v8/test/mkgrokdump/mkgrokdump.gyp
new file mode 100644
index 0000000000..56f9ad14e0
--- /dev/null
+++ b/deps/v8/test/mkgrokdump/mkgrokdump.gyp
@@ -0,0 +1,46 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'v8_code': 1,
+ },
+ 'includes': ['../../gypfiles/toolchain.gypi', '../../gypfiles/features.gypi'],
+ 'targets': [
+ {
+ 'target_name': 'mkgrokdump',
+ 'type': 'executable',
+ 'dependencies': [
+ '../../src/v8.gyp:v8',
+ '../../src/v8.gyp:v8_libbase',
+ '../../src/v8.gyp:v8_libplatform',
+ ],
+ 'include_dirs': [
+ '../..',
+ ],
+ 'sources': [
+ 'mkgrokdump.cc',
+ ],
+ },
+ ],
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'mkgrokdump_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'mkgrokdump',
+ ],
+ 'includes': [
+ '../../gypfiles/isolate.gypi',
+ ],
+ 'sources': [
+ 'mkgrokdump.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.isolate b/deps/v8/test/mkgrokdump/mkgrokdump.isolate
new file mode 100644
index 0000000000..b2cbc32551
--- /dev/null
+++ b/deps/v8/test/mkgrokdump/mkgrokdump.isolate
@@ -0,0 +1,17 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'files': [
+ './mkgrokdump.status',
+ './testcfg.py',
+ '../../tools/v8heapconst.py',
+ '<(PRODUCT_DIR)/mkgrokdump<(EXECUTABLE_SUFFIX)',
+ ],
+ },
+ 'includes': [
+ '../../src/base.isolate',
+ '../../tools/testrunner/testrunner.isolate',
+ ],
+}
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.status b/deps/v8/test/mkgrokdump/mkgrokdump.status
new file mode 100644
index 0000000000..8fd6a0417a
--- /dev/null
+++ b/deps/v8/test/mkgrokdump/mkgrokdump.status
@@ -0,0 +1,10 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+# Only test for default mode x64.
+['variant != default or arch != x64', {
+ '*': [SKIP],
+}], # variant != default or arch != x64
+]
diff --git a/deps/v8/test/mkgrokdump/testcfg.py b/deps/v8/test/mkgrokdump/testcfg.py
new file mode 100644
index 0000000000..c47b59de4a
--- /dev/null
+++ b/deps/v8/test/mkgrokdump/testcfg.py
@@ -0,0 +1,49 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import difflib
+
+from testrunner.local import testsuite
+from testrunner.objects import testcase
+
+
+class MkGrokdump(testsuite.TestSuite):
+
+ def __init__(self, name, root):
+ super(MkGrokdump, self).__init__(name, root)
+
+ def ListTests(self, context):
+ test = testcase.TestCase(self, self.shell())
+ return [test]
+
+ def GetFlagsForTestCase(self, testcase, context):
+ return []
+
+ def IsFailureOutput(self, testcase):
+ output = testcase.output
+ v8_path = os.path.dirname(os.path.dirname(os.path.abspath(self.root)))
+ expected_path = os.path.join(v8_path, "tools", "v8heapconst.py")
+ with open(expected_path) as f:
+ expected = f.read()
+ expected_lines = expected.splitlines()
+ actual_lines = output.stdout.splitlines()
+ diff = difflib.unified_diff(expected_lines, actual_lines, lineterm="",
+ fromfile="expected_path")
+ diffstring = '\n'.join(diff)
+ if diffstring is not "":
+ if "generated from a non-shipping build" in output.stdout:
+ return False
+ if not "generated from a shipping build" in output.stdout:
+ output.stdout = "Unexpected output:\n\n" + output.stdout
+ return True
+ output.stdout = diffstring
+ return True
+ return False
+
+ def shell(self):
+ return "mkgrokdump"
+
+def GetSuite(name, root):
+ return MkGrokdump(name, root)
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index 01dfa9ae8f..a30172ab58 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -932,14 +932,14 @@
'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
}], # 'arch == mipsel or arch == mips64el or arch == mips64'
-['arch == mipsel and simulator_run == True', {
+['arch == mipsel and simulator_run', {
# Crashes due to C stack overflow.
'js1_5/extensions/regress-355497': [SKIP],
-}], # 'arch == mipsel and simulator_run == True'
+}], # 'arch == mipsel and simulator_run'
-['arch == mips64el and simulator_run == True', {
+['arch == mips64el and simulator_run', {
'js1_5/extensions/regress-355497': [FAIL_OK, 'Flags: --sim-stack-size=512'],
-}], # 'arch == mips64el and simulator_run == True'
+}], # 'arch == mips64el and simulator_run'
['arch == mips', {
@@ -960,13 +960,13 @@
'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
}], # 'arch == mips'
-['arch == arm and simulator_run == True', {
+['arch == arm and simulator_run', {
#BUG(3837): Crashes due to C stack overflow.
'js1_5/extensions/regress-355497': [SKIP],
-}], # 'arch == arm and simulator_run == True'
+}], # 'arch == arm and simulator_run'
-['arch == arm64 and simulator_run == True', {
+['arch == arm64 and simulator_run', {
'js1_5/GC/regress-203278-2': [SKIP],
@@ -988,7 +988,7 @@
#BUG(3152): Avoid C stack overflow.
'js1_5/extensions/regress-355497': [FAIL_OK, 'Flags: --sim-stack-size=512'],
-}], # 'arch == arm64 and simulator_run == True'
+}], # 'arch == arm64 and simulator_run'
['variant == wasm_traps', {
'*': [SKIP],
diff --git a/deps/v8/test/test262/local-tests/test/language/module-code/comment-single-line-html-close-comment-before-function.js b/deps/v8/test/test262/local-tests/test/language/module-code/comment-single-line-html-close-comment-before-function.js
new file mode 100644
index 0000000000..92d7507cff
--- /dev/null
+++ b/deps/v8/test/test262/local-tests/test/language/module-code/comment-single-line-html-close-comment-before-function.js
@@ -0,0 +1,16 @@
+// Copyright (C) 2017 the V8 project authors. All rights reserved.
+// This code is governed by the BSD license found in the LICENSE file.
+/*---
+description: >
+ HTML-like comments are not available in module code
+ (SingleLineHTMLCloseComment)
+esid: sec-html-like-comments
+es6id: B1.3
+negative:
+ phase: early
+ type: SyntaxError
+flags: [module]
+---*/
+
+-->
+function f(){}
diff --git a/deps/v8/test/test262/local-tests/test/language/module-code/comment-single-line-html-close-comment-newline-before-function.js b/deps/v8/test/test262/local-tests/test/language/module-code/comment-single-line-html-close-comment-newline-before-function.js
new file mode 100644
index 0000000000..19b0c99250
--- /dev/null
+++ b/deps/v8/test/test262/local-tests/test/language/module-code/comment-single-line-html-close-comment-newline-before-function.js
@@ -0,0 +1,15 @@
+// Copyright (C) 2017 the V8 project authors. All rights reserved.
+// This code is governed by the BSD license found in the LICENSE file.
+/*---
+description: >
+ HTML-like comments are not available in module code
+ (SingleLineHTMLCloseComment)
+esid: sec-html-like-comments
+es6id: B1.3
+negative:
+ phase: early
+ type: SyntaxError
+flags: [module]
+---*/
+-->
+function f(){}
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index e9fb30ae9e..920ce3293f 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -117,7 +117,6 @@
'built-ins/TypedArray/prototype/slice/detached-buffer-custom-ctor-other-targettype': [FAIL],
'built-ins/TypedArray/prototype/slice/detached-buffer-custom-ctor-same-targettype': [FAIL],
'built-ins/TypedArray/prototype/slice/detached-buffer-get-ctor': [FAIL],
- 'built-ins/TypedArray/prototype/slice/detached-buffer-speciesctor-get-species-custom-ctor-throws': [FAIL],
'built-ins/TypedArray/prototype/some/callbackfn-detachbuffer': [FAIL],
'built-ins/TypedArray/prototype/sort/detached-buffer-comparefn': [FAIL],
# DataView functions should also throw on detached buffers
@@ -158,27 +157,6 @@
'built-ins/DataView/prototype/setUint8/detached-buffer': [FAIL],
'built-ins/DataView/prototype/setUint8/detached-buffer-before-outofrange-byteoffset': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=4648
- 'built-ins/TypedArray/prototype/copyWithin/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/every/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/fill/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/filter/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/find/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/findIndex/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/forEach/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/includes/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/indexOf/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/join/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/lastIndexOf/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/map/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/reverse/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/slice/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/some/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/sort/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/subarray/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/toLocaleString/detached-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/toString/detached-buffer': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=4034
'built-ins/ThrowTypeError/unique-per-realm-function-proto': [FAIL],
@@ -278,14 +256,14 @@
'built-ins/TypedArrays/internals/Set/conversion-operation-consistent-nan': [PASS, FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=5070
- 'annexB/built-ins/Object/prototype/__defineGetter__/define-non-configurable': [FAIL],
- 'annexB/built-ins/Object/prototype/__defineGetter__/define-non-extensible': [FAIL],
- 'annexB/built-ins/Object/prototype/__defineGetter__/this-non-obj': [FAIL],
- 'annexB/built-ins/Object/prototype/__defineSetter__/define-non-configurable': [FAIL],
- 'annexB/built-ins/Object/prototype/__defineSetter__/define-non-extensible': [FAIL],
- 'annexB/built-ins/Object/prototype/__defineSetter__/this-non-obj': [FAIL],
- 'annexB/built-ins/Object/prototype/__lookupGetter__/this-non-obj': [FAIL],
- 'annexB/built-ins/Object/prototype/__lookupSetter__/this-non-obj': [FAIL],
+ 'annexB/built-ins/Object/prototype/__defineGetter__/define-non-configurable': ['--harmony-strict-legacy-accessor-builtins'],
+ 'annexB/built-ins/Object/prototype/__defineGetter__/define-non-extensible': ['--harmony-strict-legacy-accessor-builtins'],
+ 'annexB/built-ins/Object/prototype/__defineGetter__/this-non-obj': ['--harmony-strict-legacy-accessor-builtins'],
+ 'annexB/built-ins/Object/prototype/__defineSetter__/define-non-configurable': ['--harmony-strict-legacy-accessor-builtins'],
+ 'annexB/built-ins/Object/prototype/__defineSetter__/define-non-extensible': ['--harmony-strict-legacy-accessor-builtins'],
+ 'annexB/built-ins/Object/prototype/__defineSetter__/this-non-obj': ['--harmony-strict-legacy-accessor-builtins'],
+ 'annexB/built-ins/Object/prototype/__lookupGetter__/this-non-obj': ['--harmony-strict-legacy-accessor-builtins'],
+ 'annexB/built-ins/Object/prototype/__lookupSetter__/this-non-obj': ['--harmony-strict-legacy-accessor-builtins'],
# https://bugs.chromium.org/p/v8/issues/detail?id=4451
# https://github.com/tc39/ecma262/issues/753
@@ -391,9 +369,6 @@
'language/statements/async-function/early-errors-declaration-formals-body-duplicate': [FAIL],
# Module-related tests
- # v8:5485
- 'language/module-code/comment-multi-line-html*': [FAIL],
- 'language/module-code/comment-single-line-html*': [FAIL],
# v8:5487
'language/module-code/namespace/internals/get-own-property-str-found-uninit': [FAIL],
# https://github.com/tc39/ecma262/pull/858
@@ -421,36 +396,6 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=5601
'intl402/PluralRules/*': [SKIP],
- # https://bugs.chromium.org/p/v8/issues/detail?id=5855
- 'language/expressions/async-generators/*': ['--harmony-async-iteration'],
- 'language/statements/async-generator/*': ['--harmony-async-iteration'],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=6226
- 'language/expressions/async-generator/named-yield-star-async-next': [FAIL],
- 'language/expressions/async-generator/named-yield-star-async-return': [FAIL],
- 'language/expressions/async-generator/named-yield-star-async-throw': [FAIL],
- 'language/expressions/async-generator/yield-star-async-next': [FAIL],
- 'language/expressions/async-generator/yield-star-async-return': [FAIL],
- 'language/expressions/async-generator/yield-star-async-throw': [FAIL],
- 'language/expressions/class/async-gen-method-static-yield-star-async-next': [FAIL],
- 'language/expressions/class/async-gen-method-static-yield-star-async-return': [FAIL],
- 'language/expressions/class/async-gen-method-static-yield-star-async-throw': [FAIL],
- 'language/expressions/class/async-gen-method-yield-star-async-next': [FAIL],
- 'language/expressions/class/async-gen-method-yield-star-async-return': [FAIL],
- 'language/expressions/class/async-gen-method-yield-star-async-throw': [FAIL],
- 'language/expressions/object/method-definition/async-gen-yield-star-async-next': [FAIL],
- 'language/expressions/object/method-definition/async-gen-yield-star-async-return': [FAIL],
- 'language/expressions/object/method-definition/async-gen-yield-star-async-throw': [FAIL],
- 'language/statements/async-generator/yield-star-async-next': [FAIL],
- 'language/statements/async-generator/yield-star-async-return': [FAIL],
- 'language/statements/async-generator/yield-star-async-throw': [FAIL],
- 'language/statements/class/async-gen-method-static-yield-star-async-next': [FAIL],
- 'language/statements/class/async-gen-method-static-yield-star-async-return': [FAIL],
- 'language/statements/class/async-gen-method-static-yield-star-async-throw': [FAIL],
- 'language/statements/class/async-gen-method-yield-star-async-next': [FAIL],
- 'language/statements/class/async-gen-method-yield-star-async-return': [FAIL],
- 'language/statements/class/async-gen-method-yield-star-async-throw': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=6242
'language/expressions/async-generator/named-yield-star-sync-next': [FAIL],
'language/expressions/async-generator/named-yield-star-sync-return': [FAIL],
@@ -552,18 +497,6 @@
'built-ins/TypedArrays/buffer-arg-use-default-proto-if-custom-proto-is-not-object-sab': ['--harmony-sharedarraybuffer'],
'built-ins/TypedArrays/internals/Get/indexed-value-sab': ['--harmony-sharedarraybuffer'],
- # https://bugs.chromium.org/p/v8/issues/detail?id=5961
- 'built-ins/Atomics/add/good-views': ['--harmony-sharedarraybuffer', FAIL],
- 'built-ins/Atomics/and/good-views': ['--harmony-sharedarraybuffer', FAIL],
- 'built-ins/Atomics/compareExchange/good-views': ['--harmony-sharedarraybuffer', FAIL],
- 'built-ins/Atomics/exchange/good-views': ['--harmony-sharedarraybuffer', FAIL],
- 'built-ins/Atomics/load/good-views': ['--harmony-sharedarraybuffer', FAIL],
- 'built-ins/Atomics/or/good-views': ['--harmony-sharedarraybuffer', FAIL],
- 'built-ins/Atomics/store/good-views': ['--harmony-sharedarraybuffer', FAIL],
- 'built-ins/Atomics/sub/good-views': ['--harmony-sharedarraybuffer', FAIL],
- 'built-ins/Atomics/wake/good-views': ['--harmony-sharedarraybuffer', FAIL],
- 'built-ins/Atomics/xor/good-views': ['--harmony-sharedarraybuffer', FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=6045
'intl402/NumberFormat/prototype/format/11.3.2_TRF': [FAIL],
'intl402/NumberFormat/prototype/format/11.3.2_TRP': [FAIL],
@@ -597,19 +530,6 @@
# https://code.google.com/p/v8/issues/detail?id=4693
'language/block-scope/syntax/redeclaration-in-block/attempt-to-redeclare-function-declaration-with-function-declaration': [PASS, FAIL_SLOPPY],
- # https://bugs.chromium.org/p/v8/issues/detail?id=4953
- 'built-ins/TypedArray/prototype/subarray/speciesctor-get-ctor': [FAIL],
- 'built-ins/TypedArray/prototype/subarray/speciesctor-get-ctor-abrupt': [FAIL],
- 'built-ins/TypedArray/prototype/subarray/speciesctor-get-ctor-inherited': [FAIL],
- 'built-ins/TypedArray/prototype/subarray/speciesctor-get-ctor-returns-throws': [FAIL],
- 'built-ins/TypedArray/prototype/subarray/speciesctor-get-species': [FAIL],
- 'built-ins/TypedArray/prototype/subarray/speciesctor-get-species-abrupt': [FAIL],
- 'built-ins/TypedArray/prototype/subarray/speciesctor-get-species-custom-ctor': [FAIL],
- 'built-ins/TypedArray/prototype/subarray/speciesctor-get-species-custom-ctor-invocation': [FAIL],
- 'built-ins/TypedArray/prototype/subarray/speciesctor-get-species-custom-ctor-returns-another-instance': [FAIL],
- 'built-ins/TypedArray/prototype/subarray/speciesctor-get-species-custom-ctor-throws': [FAIL],
- 'built-ins/TypedArray/prototype/subarray/speciesctor-get-species-returns-throws': [FAIL],
-
# We do not expose Array.prototype.values due to webcompat issues.
# Most recent incompatability: https://crbug.com/615873
# https://code.google.com/p/v8/issues/detail?id=4247
@@ -712,7 +632,7 @@
# compilation of parenthesized function literals. Needs investigation.
'language/statements/function/S13.2.1_A1_T1': [SKIP],
- # BUG(3251225): Tests that timeout with --nocrankshaft.
+ # BUG(3251225): Tests that timeout with --noopt.
'built-ins/decodeURI/S15.1.3.1_A2.4_T1': [SKIP],
'built-ins/decodeURI/S15.1.3.1_A2.5_T1': [SKIP],
'built-ins/decodeURIComponent/S15.1.3.2_A2.4_T1': [SKIP],
@@ -721,16 +641,24 @@
'built-ins/encodeURIComponent/S15.1.3.4_A2.3_T1': [SKIP],
}], # 'arch == arm or arch == mipsel or arch == mips or arch == arm64'
+['byteorder == big', {
+ # Test failures on big endian platforms due to the way the tests
+ # are written
+
+ # https://github.com/tc39/test262/issues/757
+ 'built-ins/TypedArray/prototype/set/typedarray-arg-set-values-same-buffer-other-type': [SKIP],
+}],
+
['asan == True', {
# BUG(v8:4653): Test262 tests which rely on quit() are not compatible with
# asan's --omit-quit flag.
'built-ins/Promise/prototype/then/deferred-is-resolved-value': [SKIP],
}], # asan == True
-['variant != default and asan', {
- # BUG(chromium:710428).
+['asan and variant in [noturbofan, noturbofan_stress, fullcode]', {
+ # Demands too many resources to test the old pipeline with asan.
'*': [SKIP],
-}], # variant != default and asan
+}], # asan and variant in [noturbofan, noturbofan_stress, fullcode]
['asan == True or msan == True or tsan == True', {
# https://bugs.chromium.org/p/v8/issues/detail?id=4639
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index 6bdc3199d9..ece24ad1c1 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -8,6 +8,7 @@ v8_executable("unittests") {
testonly = true
sources = [
+ "../../test/common/wasm/wasm-macro-gen.h",
"../../testing/gmock-support.h",
"../../testing/gtest-support.h",
"api/access-check-unittest.cc",
@@ -17,6 +18,8 @@ v8_executable("unittests") {
"api/remote-object-unittest.cc",
"api/v8-object-unittest.cc",
"asmjs/asm-scanner-unittest.cc",
+ "asmjs/asm-types-unittest.cc",
+ "asmjs/switch-logic-unittest.cc",
"base/atomic-utils-unittest.cc",
"base/bits-unittest.cc",
"base/cpu-unittest.cc",
@@ -35,8 +38,6 @@ v8_executable("unittests") {
"base/utils/random-number-generator-unittest.cc",
"cancelable-tasks-unittest.cc",
"char-predicates-unittest.cc",
- "compiler-dispatcher/compiler-dispatcher-helper.cc",
- "compiler-dispatcher/compiler-dispatcher-helper.h",
"compiler-dispatcher/compiler-dispatcher-job-unittest.cc",
"compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc",
"compiler-dispatcher/compiler-dispatcher-unittest.cc",
@@ -101,25 +102,29 @@ v8_executable("unittests") {
"eh-frame-iterator-unittest.cc",
"eh-frame-writer-unittest.cc",
"heap/bitmap-unittest.cc",
+ "heap/concurrent-marking-deque-unittest.cc",
"heap/embedder-tracing-unittest.cc",
"heap/gc-idle-time-handler-unittest.cc",
"heap/gc-tracer-unittest.cc",
"heap/heap-unittest.cc",
+ "heap/item-parallel-job-unittest.cc",
"heap/marking-unittest.cc",
"heap/memory-reducer-unittest.cc",
"heap/scavenge-job-unittest.cc",
"heap/slot-set-unittest.cc",
"heap/spaces-unittest.cc",
"heap/unmapper-unittest.cc",
+ "heap/workstealing-marking-deque-unittest.cc",
"interpreter/bytecode-array-builder-unittest.cc",
"interpreter/bytecode-array-iterator-unittest.cc",
"interpreter/bytecode-array-random-iterator-unittest.cc",
"interpreter/bytecode-array-writer-unittest.cc",
"interpreter/bytecode-decoder-unittest.cc",
+ "interpreter/bytecode-node-unittest.cc",
"interpreter/bytecode-operands-unittest.cc",
- "interpreter/bytecode-pipeline-unittest.cc",
"interpreter/bytecode-register-allocator-unittest.cc",
"interpreter/bytecode-register-optimizer-unittest.cc",
+ "interpreter/bytecode-source-info-unittest.cc",
"interpreter/bytecode-utils.h",
"interpreter/bytecodes-unittest.cc",
"interpreter/constant-array-builder-unittest.cc",
@@ -130,21 +135,23 @@ v8_executable("unittests") {
"libplatform/worker-thread-unittest.cc",
"locked-queue-unittest.cc",
"object-unittest.cc",
+ "parser/preparser-unittest.cc",
"register-configuration-unittest.cc",
"run-all-unittests.cc",
"source-position-table-unittest.cc",
+ "test-helpers.cc",
+ "test-helpers.h",
"test-utils.cc",
"test-utils.h",
"unicode-unittest.cc",
"value-serializer-unittest.cc",
- "wasm/asm-types-unittest.cc",
"wasm/control-transfer-unittest.cc",
"wasm/decoder-unittest.cc",
"wasm/function-body-decoder-unittest.cc",
"wasm/leb-helper-unittest.cc",
"wasm/loop-assignment-analysis-unittest.cc",
"wasm/module-decoder-unittest.cc",
- "wasm/switch-logic-unittest.cc",
+ "wasm/streaming-decoder-unittest.cc",
"wasm/wasm-macro-gen-unittest.cc",
"wasm/wasm-module-builder-unittest.cc",
"wasm/wasm-opcodes-unittest.cc",
@@ -183,30 +190,15 @@ v8_executable("unittests") {
#}],
deps = [
- "../..:v8_builtins_generators",
+ "../..:v8_for_testing",
"../..:v8_libbase",
"../..:v8_libplatform",
- "//build/config/sanitizers:deps",
+ "//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
"//testing/gmock",
"//testing/gtest",
]
- if (v8_enable_i18n_support) {
- deps += [ "//third_party/icu" ]
- }
-
- defines = []
-
- if (is_component_build) {
- # unittests can't be built against a shared library, so we
- # need to depend on the underlying static target in that case.
- deps += [ "../..:v8_maybe_snapshot" ]
- defines += [ "BUILDING_V8_SHARED" ]
- } else {
- deps += [ "../..:v8" ]
- }
-
if (is_win) {
# This warning is benignly triggered by the U16 and U32 macros in
# bytecode-utils.h.
diff --git a/deps/v8/test/unittests/asmjs/asm-scanner-unittest.cc b/deps/v8/test/unittests/asmjs/asm-scanner-unittest.cc
index 9924244ec7..c7a68d23ef 100644
--- a/deps/v8/test/unittests/asmjs/asm-scanner-unittest.cc
+++ b/deps/v8/test/unittests/asmjs/asm-scanner-unittest.cc
@@ -209,6 +209,26 @@ TEST_F(AsmJsScannerTest, Numbers) {
CheckForEnd();
}
+TEST_F(AsmJsScannerTest, UnsignedNumbers) {
+ SetupSource("0x7fffffff 0x80000000 0xffffffff 0x100000000");
+
+ CHECK(scanner.IsUnsigned());
+ CHECK_EQ(0x7fffffff, scanner.AsUnsigned());
+ scanner.Next();
+
+ CHECK(scanner.IsUnsigned());
+ CHECK_EQ(0x80000000, scanner.AsUnsigned());
+ scanner.Next();
+
+ CHECK(scanner.IsUnsigned());
+ CHECK_EQ(0xffffffff, scanner.AsUnsigned());
+ scanner.Next();
+
+ // Numeric "unsigned" literals with a payload of more than 32-bit are rejected
+ // by asm.js in all contexts, we hence consider `0x100000000` to be an error.
+ CheckForParseError();
+}
+
TEST_F(AsmJsScannerTest, BadNumber) {
SetupSource(".123fe");
Skip('.');
@@ -254,7 +274,7 @@ TEST_F(AsmJsScannerTest, TrailingCComment) {
TEST_F(AsmJsScannerTest, Seeking) {
SetupSource("var eval do arguments function break\n");
Skip(TOK(var));
- int old_pos = scanner.GetPosition();
+ size_t old_pos = scanner.Position();
Skip(TOK(eval));
Skip(TOK(do));
Skip(TOK(arguments));
@@ -262,6 +282,7 @@ TEST_F(AsmJsScannerTest, Seeking) {
Skip(TOK(arguments));
scanner.Rewind();
scanner.Seek(old_pos);
+ Skip(TOK(eval));
Skip(TOK(do));
Skip(TOK(arguments));
Skip(TOK(function));
diff --git a/deps/v8/test/unittests/wasm/asm-types-unittest.cc b/deps/v8/test/unittests/asmjs/asm-types-unittest.cc
index ebdd74a9f4..3ffe6c0622 100644
--- a/deps/v8/test/unittests/wasm/asm-types-unittest.cc
+++ b/deps/v8/test/unittests/asmjs/asm-types-unittest.cc
@@ -250,7 +250,8 @@ TEST_F(AsmTypeTest, IsExactly) {
Overload(Function(Type::Int)(Type::Double)),
Function(Type::Int)(Type::Int, Type::Int),
Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
- Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Function(Type::Int)(Type::Float),
+ Type::FroundType(zone()),
Type::FFIType(zone()),
Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
};
@@ -285,7 +286,8 @@ TEST_F(AsmTypeTest, IsA) {
Overload(Function(Type::Int)(Type::Double)),
Function(Type::Int)(Type::Int, Type::Int),
Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
- Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Function(Type::Int)(Type::Float),
+ Type::FroundType(zone()),
Type::FFIType(zone()),
Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
};
@@ -470,7 +472,8 @@ TEST_F(AsmTypeTest, ToReturnType) {
Overload(Function(Type::Int)(Type::Double)),
Function(Type::Int)(Type::Int, Type::Int),
Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
- Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Function(Type::Int)(Type::Float),
+ Type::FroundType(zone()),
Type::FFIType(zone()),
Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
};
@@ -496,7 +499,8 @@ TEST_F(AsmTypeTest, IsReturnType) {
Overload(Function(Type::Int)(Type::Double)),
Function(Type::Int)(Type::Int, Type::Int),
Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
- Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Function(Type::Int)(Type::Float),
+ Type::FroundType(zone()),
Type::FFIType(zone()),
Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
};
@@ -529,7 +533,8 @@ TEST_F(AsmTypeTest, ToParameterType) {
Overload(Function(Type::Int)(Type::Double)),
Function(Type::Int)(Type::Int, Type::Int),
Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
- Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Function(Type::Int)(Type::Float),
+ Type::FroundType(zone()),
Type::FFIType(zone()),
Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
};
@@ -556,7 +561,8 @@ TEST_F(AsmTypeTest, IsParameterType) {
Overload(Function(Type::Int)(Type::Double)),
Function(Type::Int)(Type::Int, Type::Int),
Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
- Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Function(Type::Int)(Type::Float),
+ Type::FroundType(zone()),
Type::FFIType(zone()),
Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
};
@@ -584,7 +590,8 @@ TEST_F(AsmTypeTest, IsComparableType) {
Overload(Function(Type::Int)(Type::Double)),
Function(Type::Int)(Type::Int, Type::Int),
Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
- Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Function(Type::Int)(Type::Float),
+ Type::FroundType(zone()),
Type::FFIType(zone()),
Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
};
@@ -612,7 +619,8 @@ TEST_F(AsmTypeTest, ElementSizeInBytes) {
Overload(Function(Type::Int)(Type::Double)),
Function(Type::Int)(Type::Int, Type::Int),
Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
- Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Function(Type::Int)(Type::Float),
+ Type::FroundType(zone()),
Type::FFIType(zone()),
Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
};
@@ -650,7 +658,8 @@ TEST_F(AsmTypeTest, LoadType) {
Overload(Function(Type::Int)(Type::Double)),
Function(Type::Int)(Type::Int, Type::Int),
Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
- Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Function(Type::Int)(Type::Float),
+ Type::FroundType(zone()),
Type::FFIType(zone()),
Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
};
@@ -688,7 +697,8 @@ TEST_F(AsmTypeTest, StoreType) {
Overload(Function(Type::Int)(Type::Double)),
Function(Type::Int)(Type::Int, Type::Int),
Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
- Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Function(Type::Int)(Type::Float),
+ Type::FroundType(zone()),
Type::FFIType(zone()),
Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
};
diff --git a/deps/v8/test/unittests/wasm/switch-logic-unittest.cc b/deps/v8/test/unittests/asmjs/switch-logic-unittest.cc
index cc3fbb05cc..cc3fbb05cc 100644
--- a/deps/v8/test/unittests/wasm/switch-logic-unittest.cc
+++ b/deps/v8/test/unittests/asmjs/switch-logic-unittest.cc
diff --git a/deps/v8/test/unittests/base/bits-unittest.cc b/deps/v8/test/unittests/base/bits-unittest.cc
index 3d17a050db..2bb4956d71 100644
--- a/deps/v8/test/unittests/base/bits-unittest.cc
+++ b/deps/v8/test/unittests/base/bits-unittest.cc
@@ -117,7 +117,8 @@ TEST(Bits, RoundUpToPowerOfTwo32) {
TRACED_FORRANGE(uint32_t, shift, 0, 31) {
EXPECT_EQ(1u << shift, RoundUpToPowerOfTwo32(1u << shift));
}
- EXPECT_EQ(0u, RoundUpToPowerOfTwo32(0));
+ EXPECT_EQ(1u, RoundUpToPowerOfTwo32(0));
+ EXPECT_EQ(1u, RoundUpToPowerOfTwo32(1));
EXPECT_EQ(4u, RoundUpToPowerOfTwo32(3));
EXPECT_EQ(0x80000000u, RoundUpToPowerOfTwo32(0x7fffffffu));
}
@@ -125,7 +126,24 @@ TEST(Bits, RoundUpToPowerOfTwo32) {
TEST(BitsDeathTest, DISABLE_IN_RELEASE(RoundUpToPowerOfTwo32)) {
ASSERT_DEATH_IF_SUPPORTED({ RoundUpToPowerOfTwo32(0x80000001u); },
- "0x80000000");
+ "Check failed:.* << 31");
+}
+
+TEST(Bits, RoundUpToPowerOfTwo64) {
+ TRACED_FORRANGE(uint64_t, shift, 0, 63) {
+ uint64_t value = uint64_t{1} << shift;
+ EXPECT_EQ(value, RoundUpToPowerOfTwo64(value));
+ }
+ EXPECT_EQ(uint64_t{1}, RoundUpToPowerOfTwo64(0));
+ EXPECT_EQ(uint64_t{1}, RoundUpToPowerOfTwo64(1));
+ EXPECT_EQ(uint64_t{4}, RoundUpToPowerOfTwo64(3));
+ EXPECT_EQ(uint64_t{1} << 63, RoundUpToPowerOfTwo64((uint64_t{1} << 63) - 1));
+ EXPECT_EQ(uint64_t{1} << 63, RoundUpToPowerOfTwo64(uint64_t{1} << 63));
+}
+
+TEST(BitsDeathTest, DISABLE_IN_RELEASE(RoundUpToPowerOfTwo64)) {
+ ASSERT_DEATH_IF_SUPPORTED({ RoundUpToPowerOfTwo64((uint64_t{1} << 63) + 1); },
+ "Check failed:.* << 63");
}
diff --git a/deps/v8/test/unittests/base/iterator-unittest.cc b/deps/v8/test/unittests/base/iterator-unittest.cc
index 8da26ce20e..c5fe7bc505 100644
--- a/deps/v8/test/unittests/base/iterator-unittest.cc
+++ b/deps/v8/test/unittests/base/iterator-unittest.cc
@@ -42,7 +42,7 @@ TEST(IteratorTest, IteratorRangeArray) {
TEST(IteratorTest, IteratorRangeDeque) {
- typedef std::deque<unsigned> C;
+ typedef std::deque<int> C;
C c;
c.push_back(1);
c.push_back(2);
diff --git a/deps/v8/test/unittests/base/logging-unittest.cc b/deps/v8/test/unittests/base/logging-unittest.cc
index e59456f4fc..35da8a33db 100644
--- a/deps/v8/test/unittests/base/logging-unittest.cc
+++ b/deps/v8/test/unittests/base/logging-unittest.cc
@@ -63,11 +63,11 @@ TEST(LoggingTest, CompareAgainstStaticConstPointer) {
CHECK_SUCCEED(GT, 0, v8::internal::Smi::kMinValue);
}
-TEST(LoggingTest, CompareWithDifferentSignedness) {
#define CHECK_BOTH(name, lhs, rhs) \
CHECK_##name(lhs, rhs); \
DCHECK_##name(lhs, rhs)
+TEST(LoggingTest, CompareWithDifferentSignedness) {
int32_t i32 = 10;
uint32_t u32 = 20;
int64_t i64 = 30;
@@ -82,5 +82,18 @@ TEST(LoggingTest, CompareWithDifferentSignedness) {
CHECK_BOTH(IMPLIES, !u32, !i64);
}
+TEST(LoggingTest, CompareWithReferenceType) {
+ int32_t i32 = 10;
+ uint32_t u32 = 20;
+ int64_t i64 = 30;
+ uint64_t u64 = 40;
+
+ // All these checks should compile (!) and succeed.
+ CHECK_BOTH(EQ, i32 + 10, *&u32);
+ CHECK_BOTH(LT, *&i32, u64);
+ CHECK_BOTH(IMPLIES, *&i32, i64);
+ CHECK_BOTH(IMPLIES, *&i32, u64);
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/test/unittests/base/platform/platform-unittest.cc b/deps/v8/test/unittests/base/platform/platform-unittest.cc
index 0f0fb375b1..098b989a60 100644
--- a/deps/v8/test/unittests/base/platform/platform-unittest.cc
+++ b/deps/v8/test/unittests/base/platform/platform-unittest.cc
@@ -5,6 +5,8 @@
#include "src/base/platform/platform.h"
#if V8_OS_POSIX
+#include <setjmp.h>
+#include <signal.h>
#include <unistd.h> // NOLINT
#endif
@@ -96,5 +98,106 @@ TEST_F(ThreadLocalStorageTest, DoTest) {
Join();
}
+#if V8_OS_POSIX
+// TODO(eholk): Add a windows version of these tests
+
+namespace {
+
+// These tests make sure the routines to allocate memory do so with the correct
+// permissions.
+//
+// Unfortunately, there is no API to find the protection of a memory address,
+// so instead we test permissions by installing a signal handler, probing a
+// memory location and recovering from the fault.
+//
+// We don't test the execution permission because to do so we'd have to
+// dynamically generate code and test if we can execute it.
+
+class MemoryAllocationPermissionsTest : public ::testing::Test {
+ static void SignalHandler(int signal, siginfo_t* info, void*) {
+ siglongjmp(continuation_, 1);
+ }
+ struct sigaction old_action_;
+// On Mac, sometimes we get SIGBUS instead of SIGSEGV.
+#if V8_OS_MACOSX
+ struct sigaction old_bus_action_;
+#endif
+
+ protected:
+ virtual void SetUp() {
+ struct sigaction action;
+ action.sa_sigaction = SignalHandler;
+ sigemptyset(&action.sa_mask);
+ action.sa_flags = SA_SIGINFO;
+ sigaction(SIGSEGV, &action, &old_action_);
+#if V8_OS_MACOSX
+ sigaction(SIGBUS, &action, &old_bus_action_);
+#endif
+ }
+
+ virtual void TearDown() {
+ // be a good citizen and restore the old signal handler.
+ sigaction(SIGSEGV, &old_action_, nullptr);
+#if V8_OS_MACOSX
+ sigaction(SIGBUS, &old_bus_action_, nullptr);
+#endif
+ }
+
+ public:
+ static sigjmp_buf continuation_;
+
+ enum class MemoryAction { kRead, kWrite };
+
+ void ProbeMemory(volatile int* buffer, MemoryAction action,
+ bool should_succeed) {
+ const int save_sigs = 1;
+ if (!sigsetjmp(continuation_, save_sigs)) {
+ switch (action) {
+ case MemoryAction::kRead: {
+ USE(*buffer);
+ break;
+ }
+ case MemoryAction::kWrite: {
+ *buffer = 0;
+ break;
+ }
+ }
+ if (should_succeed) {
+ SUCCEED();
+ } else {
+ FAIL();
+ }
+ return;
+ }
+ if (should_succeed) {
+ FAIL();
+ } else {
+ SUCCEED();
+ }
+ }
+
+ void TestPermissions(OS::MemoryPermission permission, bool can_read,
+ bool can_write) {
+ const size_t allocation_size = OS::CommitPageSize();
+ size_t actual = 0;
+ int* buffer =
+ static_cast<int*>(OS::Allocate(allocation_size, &actual, permission));
+ ProbeMemory(buffer, MemoryAction::kRead, can_read);
+ ProbeMemory(buffer, MemoryAction::kWrite, can_write);
+ OS::Free(buffer, actual);
+ }
+};
+
+sigjmp_buf MemoryAllocationPermissionsTest::continuation_;
+
+TEST_F(MemoryAllocationPermissionsTest, DoTest) {
+ TestPermissions(OS::MemoryPermission::kNoAccess, false, false);
+ TestPermissions(OS::MemoryPermission::kReadWrite, true, true);
+ TestPermissions(OS::MemoryPermission::kReadWriteExecute, true, true);
+}
+
+} // namespace
+#endif // V8_OS_POSIX
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/test/unittests/char-predicates-unittest.cc b/deps/v8/test/unittests/char-predicates-unittest.cc
index d2b71c275f..a34e3a1680 100644
--- a/deps/v8/test/unittests/char-predicates-unittest.cc
+++ b/deps/v8/test/unittests/char-predicates-unittest.cc
@@ -84,8 +84,7 @@ TEST(CharPredicatesTest, IdentifierPart) {
EXPECT_FALSE(IdentifierPart::Is(0x2E2F));
}
-
-#ifdef V8_I18N_SUPPORT
+#ifdef V8_INTL_SUPPORT
TEST(CharPredicatesTest, SupplementaryPlaneIdentifiers) {
// Both ID_Start and ID_Continue.
EXPECT_TRUE(IdentifierStart::Is(0x10403)); // Category Lu
@@ -113,7 +112,7 @@ TEST(CharPredicatesTest, SupplementaryPlaneIdentifiers) {
EXPECT_FALSE(IdentifierStart::Is(0x1F4A9)); // Category So
EXPECT_FALSE(IdentifierPart::Is(0x1F4A9));
}
-#endif // V8_I18N_SUPPORT
+#endif // V8_INTL_SUPPORT
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-job-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-job-unittest.cc
index bcacf04f48..da13758b12 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-job-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-job-unittest.cc
@@ -15,7 +15,7 @@
#include "src/isolate-inl.h"
#include "src/parsing/parse-info.h"
#include "src/v8.h"
-#include "test/unittests/compiler-dispatcher/compiler-dispatcher-helper.h"
+#include "test/unittests/test-helpers.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -72,7 +72,7 @@ TEST_F(CompilerDispatcherJobTest, ConstructWithoutSFI) {
tracer(), FLAG_stack_size,
test::CreateSource(i_isolate(), resource.get()), 0,
static_cast<int>(resource->length()), SLOPPY, 1, false, false, false,
- false, i_isolate()->heap()->HashSeed(), i_isolate()->allocator(),
+ i_isolate()->heap()->HashSeed(), i_isolate()->allocator(),
ScriptCompiler::kNoCompileOptions, i_isolate()->ast_string_constants(),
callback.get()));
}
@@ -109,7 +109,7 @@ TEST_F(CompilerDispatcherJobTest, StateTransitionsParseWithCallback) {
tracer(), FLAG_stack_size,
test::CreateSource(i_isolate(), resource.get()), 0,
static_cast<int>(resource->length()), SLOPPY, 1, false, false, false,
- false, i_isolate()->heap()->HashSeed(), i_isolate()->allocator(),
+ i_isolate()->heap()->HashSeed(), i_isolate()->allocator(),
ScriptCompiler::kNoCompileOptions, i_isolate()->ast_string_constants(),
callback.get()));
ASSERT_TRUE(job->status() == CompileJobStatus::kReadyToParse);
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
index e54e6992b3..143b5d4ad5 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc
@@ -18,7 +18,7 @@
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
#include "src/v8.h"
-#include "test/unittests/compiler-dispatcher/compiler-dispatcher-helper.h"
+#include "test/unittests/test-helpers.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -891,8 +891,8 @@ TEST_F(CompilerDispatcherTest, EnqueueWithoutSFI) {
ASSERT_TRUE(callback->result() == nullptr);
ASSERT_TRUE(dispatcher.Enqueue(CreateSource(i_isolate(), resource.get()), 0,
static_cast<int>(resource->length()), SLOPPY,
- 1, false, false, false, false, 0,
- callback.get(), nullptr));
+ 1, false, false, false, 0, callback.get(),
+ nullptr));
ASSERT_TRUE(!dispatcher.jobs_.empty());
ASSERT_TRUE(dispatcher.jobs_.begin()->second->status() ==
CompileJobStatus::kReadyToParse);
diff --git a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
index 5bd957af54..df545aa06e 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
+++ b/deps/v8/test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc
@@ -12,7 +12,7 @@
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/parsing/parse-info.h"
-#include "test/unittests/compiler-dispatcher/compiler-dispatcher-helper.h"
+#include "test/unittests/test-helpers.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
index 20d67e687d..b330e81a53 100644
--- a/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
+++ b/deps/v8/test/unittests/compiler/bytecode-analysis-unittest.cc
@@ -90,7 +90,7 @@ class BytecodeAnalysisTest : public TestWithIsolateAndZone {
SaveFlags* BytecodeAnalysisTest::save_flags_ = nullptr;
TEST_F(BytecodeAnalysisTest, EmptyBlock) {
- interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 0, 3);
+ interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 3);
std::vector<std::pair<std::string, std::string>> expected_liveness;
interpreter::Register reg_0(0);
@@ -104,7 +104,7 @@ TEST_F(BytecodeAnalysisTest, EmptyBlock) {
}
TEST_F(BytecodeAnalysisTest, SimpleLoad) {
- interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 0, 3);
+ interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 3);
std::vector<std::pair<std::string, std::string>> expected_liveness;
interpreter::Register reg_0(0);
@@ -121,7 +121,7 @@ TEST_F(BytecodeAnalysisTest, SimpleLoad) {
}
TEST_F(BytecodeAnalysisTest, StoreThenLoad) {
- interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 0, 3);
+ interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 3);
std::vector<std::pair<std::string, std::string>> expected_liveness;
interpreter::Register reg_0(0);
@@ -141,7 +141,7 @@ TEST_F(BytecodeAnalysisTest, StoreThenLoad) {
}
TEST_F(BytecodeAnalysisTest, DiamondLoad) {
- interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 0, 3);
+ interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 3);
std::vector<std::pair<std::string, std::string>> expected_liveness;
interpreter::Register reg_0(0);
@@ -178,7 +178,7 @@ TEST_F(BytecodeAnalysisTest, DiamondLoad) {
}
TEST_F(BytecodeAnalysisTest, DiamondLookupsAndBinds) {
- interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 0, 3);
+ interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 3);
std::vector<std::pair<std::string, std::string>> expected_liveness;
interpreter::Register reg_0(0);
@@ -225,7 +225,7 @@ TEST_F(BytecodeAnalysisTest, DiamondLookupsAndBinds) {
}
TEST_F(BytecodeAnalysisTest, SimpleLoop) {
- interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 0, 3);
+ interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 3);
std::vector<std::pair<std::string, std::string>> expected_liveness;
interpreter::Register reg_0(0);
@@ -235,9 +235,10 @@ TEST_F(BytecodeAnalysisTest, SimpleLoop) {
builder.StoreAccumulatorInRegister(reg_0);
expected_liveness.emplace_back("..LL", "L.LL");
- interpreter::LoopBuilder loop_builder(&builder);
- loop_builder.LoopHeader();
{
+ interpreter::LoopBuilder loop_builder(&builder);
+ loop_builder.LoopHeader();
+
builder.JumpIfTrue(ToBooleanMode::kConvertToBoolean,
loop_builder.break_labels()->New());
expected_liveness.emplace_back("L.LL", "L.L.");
@@ -252,7 +253,6 @@ TEST_F(BytecodeAnalysisTest, SimpleLoop) {
loop_builder.JumpToHeader(0);
expected_liveness.emplace_back("L.LL", "L.LL");
}
- loop_builder.EndLoop();
builder.LoadAccumulatorWithRegister(reg_2);
expected_liveness.emplace_back("..L.", "...L");
@@ -266,7 +266,7 @@ TEST_F(BytecodeAnalysisTest, SimpleLoop) {
}
TEST_F(BytecodeAnalysisTest, TryCatch) {
- interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 0, 3);
+ interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 3);
std::vector<std::pair<std::string, std::string>> expected_liveness;
interpreter::Register reg_0(0);
@@ -311,7 +311,7 @@ TEST_F(BytecodeAnalysisTest, TryCatch) {
}
TEST_F(BytecodeAnalysisTest, DiamondInLoop) {
- interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 0, 3);
+ interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 3);
std::vector<std::pair<std::string, std::string>> expected_liveness;
interpreter::Register reg_0(0);
@@ -321,9 +321,10 @@ TEST_F(BytecodeAnalysisTest, DiamondInLoop) {
builder.StoreAccumulatorInRegister(reg_0);
expected_liveness.emplace_back("...L", "L..L");
- interpreter::LoopBuilder loop_builder(&builder);
- loop_builder.LoopHeader();
{
+ interpreter::LoopBuilder loop_builder(&builder);
+ loop_builder.LoopHeader();
+
builder.JumpIfTrue(ToBooleanMode::kConvertToBoolean,
loop_builder.break_labels()->New());
expected_liveness.emplace_back("L..L", "L..L");
@@ -350,7 +351,6 @@ TEST_F(BytecodeAnalysisTest, DiamondInLoop) {
loop_builder.JumpToHeader(0);
expected_liveness.emplace_back("L..L", "L..L");
}
- loop_builder.EndLoop();
builder.Return();
expected_liveness.emplace_back("...L", "....");
@@ -361,7 +361,7 @@ TEST_F(BytecodeAnalysisTest, DiamondInLoop) {
}
TEST_F(BytecodeAnalysisTest, KillingLoopInsideLoop) {
- interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 0, 3);
+ interpreter::BytecodeArrayBuilder builder(isolate(), zone(), 3, 3);
std::vector<std::pair<std::string, std::string>> expected_liveness;
interpreter::Register reg_0(0);
@@ -370,9 +370,10 @@ TEST_F(BytecodeAnalysisTest, KillingLoopInsideLoop) {
builder.StoreAccumulatorInRegister(reg_0);
expected_liveness.emplace_back(".L.L", "LL..");
- interpreter::LoopBuilder loop_builder(&builder);
- loop_builder.LoopHeader();
{
+ interpreter::LoopBuilder loop_builder(&builder);
+ loop_builder.LoopHeader();
+
builder.LoadAccumulatorWithRegister(reg_0);
expected_liveness.emplace_back("LL..", ".L..");
@@ -383,9 +384,10 @@ TEST_F(BytecodeAnalysisTest, KillingLoopInsideLoop) {
loop_builder.break_labels()->New());
expected_liveness.emplace_back(".L.L", ".L.L");
- interpreter::LoopBuilder inner_loop_builder(&builder);
- inner_loop_builder.LoopHeader();
{
+ interpreter::LoopBuilder inner_loop_builder(&builder);
+ inner_loop_builder.LoopHeader();
+
builder.StoreAccumulatorInRegister(reg_0);
expected_liveness.emplace_back(".L.L", "LL.L");
@@ -397,13 +399,11 @@ TEST_F(BytecodeAnalysisTest, KillingLoopInsideLoop) {
inner_loop_builder.JumpToHeader(1);
expected_liveness.emplace_back(".L.L", ".L.L");
}
- inner_loop_builder.EndLoop();
loop_builder.BindContinueTarget();
loop_builder.JumpToHeader(0);
expected_liveness.emplace_back("LL..", "LL..");
}
- loop_builder.EndLoop();
builder.Return();
expected_liveness.emplace_back("...L", "....");
diff --git a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
index 2afeed6c00..5604b81ecd 100644
--- a/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/int64-lowering-unittest.cc
@@ -603,7 +603,8 @@ TEST_F(Int64LoweringTest, F64ReinterpretI64) {
MachineRepresentation::kFloat64);
Capture<Node*> stack_slot_capture;
- Matcher<Node*> stack_slot_matcher = IsStackSlot(sizeof(int64_t));
+ Matcher<Node*> stack_slot_matcher =
+ IsStackSlot(StackSlotRepresentation(sizeof(int64_t), 0));
Capture<Node*> store_capture;
Matcher<Node*> store_matcher =
@@ -634,7 +635,8 @@ TEST_F(Int64LoweringTest, I64ReinterpretF64) {
MachineRepresentation::kWord64);
Capture<Node*> stack_slot;
- Matcher<Node*> stack_slot_matcher = IsStackSlot(sizeof(int64_t));
+ Matcher<Node*> stack_slot_matcher =
+ IsStackSlot(StackSlotRepresentation(sizeof(int64_t), 0));
Capture<Node*> store;
Matcher<Node*> store_matcher = IsStore(
diff --git a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
index e8bbc33578..34da77dec4 100644
--- a/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
@@ -144,6 +144,23 @@ TEST_F(JSIntrinsicLoweringTest, InlineIsJSReceiver) {
EXPECT_THAT(r.replacement(), IsObjectIsReceiver(input));
}
+// -----------------------------------------------------------------------------
+// %_CreateJSGeneratorObject
+
+TEST_F(JSIntrinsicLoweringTest, InlineCreateJSGeneratorObject) {
+ Node* const function = Parameter(0);
+ Node* const receiver = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kInlineCreateJSGeneratorObject, 2),
+ function, receiver, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(IrOpcode::kJSCreateGeneratorObject,
+ r.replacement()->op()->opcode());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/mips/OWNERS b/deps/v8/test/unittests/compiler/mips/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/test/unittests/compiler/mips/OWNERS
+++ b/deps/v8/test/unittests/compiler/mips/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/test/unittests/compiler/mips64/OWNERS b/deps/v8/test/unittests/compiler/mips64/OWNERS
index 89455a4fbd..3f8fbfc7c8 100644
--- a/deps/v8/test/unittests/compiler/mips64/OWNERS
+++ b/deps/v8/test/unittests/compiler/mips64/OWNERS
@@ -1,6 +1,3 @@
-paul.lind@imgtec.com
-gergely.kis@imgtec.com
-akos.palfi@imgtec.com
-balazs.kilvady@imgtec.com
-dusan.milosavljevic@imgtec.com
ivica.bogosavljevic@imgtec.com
+Miran.Karic@imgtec.com
+dusan.simicic@imgtec.com
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index 46c0a8ffaa..764a4da2d7 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -1339,24 +1339,25 @@ STORE_MATCHER(UnalignedStore)
class IsStackSlotMatcher final : public NodeMatcher {
public:
- explicit IsStackSlotMatcher(const Matcher<int>& size_matcher)
- : NodeMatcher(IrOpcode::kStackSlot), size_matcher_(size_matcher) {}
+ explicit IsStackSlotMatcher(
+ const Matcher<StackSlotRepresentation>& rep_matcher)
+ : NodeMatcher(IrOpcode::kStackSlot), rep_matcher_(rep_matcher) {}
void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
- *os << " whose size (";
- size_matcher_.DescribeTo(os);
+ *os << " whose rep (";
+ rep_matcher_.DescribeTo(os);
*os << ")";
}
bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(OpParameter<int>(node), "size", size_matcher_,
- listener));
+ PrintMatchAndExplain(OpParameter<StackSlotRepresentation>(node),
+ "rep", rep_matcher_, listener));
}
private:
- const Matcher<int> size_matcher_;
+ const Matcher<StackSlotRepresentation> rep_matcher_;
};
class IsToNumberMatcher final : public NodeMatcher {
@@ -2175,8 +2176,9 @@ Matcher<Node*> IsUnalignedStore(
control_matcher));
}
-Matcher<Node*> IsStackSlot(const Matcher<int>& size_matcher) {
- return MakeMatcher(new IsStackSlotMatcher(size_matcher));
+Matcher<Node*> IsStackSlot(
+ const Matcher<StackSlotRepresentation>& rep_matcher) {
+ return MakeMatcher(new IsStackSlotMatcher(rep_matcher));
}
Matcher<Node*> IsToNumber(const Matcher<Node*>& base_matcher,
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index 1e0f3d8b15..683ee2c964 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -334,7 +334,7 @@ Matcher<Node*> IsUnalignedStore(
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
const Matcher<Node*>& value_matcher, const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
-Matcher<Node*> IsStackSlot(const Matcher<int>& size_matcher);
+Matcher<Node*> IsStackSlot(const Matcher<StackSlotRepresentation>& rep_matcher);
Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord32Or(const Matcher<Node*>& lhs_matcher,
diff --git a/deps/v8/test/unittests/heap/concurrent-marking-deque-unittest.cc b/deps/v8/test/unittests/heap/concurrent-marking-deque-unittest.cc
new file mode 100644
index 0000000000..25369217e3
--- /dev/null
+++ b/deps/v8/test/unittests/heap/concurrent-marking-deque-unittest.cc
@@ -0,0 +1,57 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+
+#include "src/globals.h"
+#include "src/heap/concurrent-marking-deque.h"
+#include "src/heap/heap-inl.h"
+#include "src/isolate.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+class ConcurrentMarkingDequeTest : public TestWithIsolate {
+ public:
+ ConcurrentMarkingDequeTest() {
+ marking_deque_ = new ConcurrentMarkingDeque(i_isolate()->heap());
+ object_ = i_isolate()->heap()->undefined_value();
+ }
+
+ ~ConcurrentMarkingDequeTest() { delete marking_deque_; }
+
+ ConcurrentMarkingDeque* marking_deque() { return marking_deque_; }
+
+ HeapObject* object() { return object_; }
+
+ private:
+ ConcurrentMarkingDeque* marking_deque_;
+ HeapObject* object_;
+ DISALLOW_COPY_AND_ASSIGN(ConcurrentMarkingDequeTest);
+};
+
+TEST_F(ConcurrentMarkingDequeTest, Empty) {
+ EXPECT_TRUE(marking_deque()->IsEmpty());
+ EXPECT_EQ(0, marking_deque()->Size());
+}
+
+TEST_F(ConcurrentMarkingDequeTest, SharedDeque) {
+ marking_deque()->Push(object());
+ EXPECT_FALSE(marking_deque()->IsEmpty());
+ EXPECT_EQ(1, marking_deque()->Size());
+ EXPECT_EQ(object(), marking_deque()->Pop(MarkingThread::kConcurrent));
+}
+
+TEST_F(ConcurrentMarkingDequeTest, BailoutDeque) {
+ marking_deque()->Push(object(), MarkingThread::kConcurrent,
+ TargetDeque::kBailout);
+ EXPECT_FALSE(marking_deque()->IsEmpty());
+ EXPECT_EQ(1, marking_deque()->Size());
+ EXPECT_EQ(nullptr, marking_deque()->Pop(MarkingThread::kConcurrent));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc b/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
new file mode 100644
index 0000000000..23ff94fae9
--- /dev/null
+++ b/deps/v8/test/unittests/heap/item-parallel-job-unittest.cc
@@ -0,0 +1,211 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/item-parallel-job.h"
+
+#include "src/isolate.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+
+class ItemParallelJobTest : public TestWithIsolate {
+ public:
+ ItemParallelJobTest() : parallel_job_semaphore_(0) {}
+
+ base::Semaphore* parallel_job_semaphore() { return &parallel_job_semaphore_; }
+
+ private:
+ base::Semaphore parallel_job_semaphore_;
+ DISALLOW_COPY_AND_ASSIGN(ItemParallelJobTest);
+};
+
+namespace {
+
+class EmptyTask : public ItemParallelJob::Task {
+ public:
+ explicit EmptyTask(Isolate* isolate, bool* did_run)
+ : ItemParallelJob::Task(isolate), did_run_(did_run) {}
+
+ void RunInParallel() override { *did_run_ = true; }
+
+ private:
+ bool* did_run_;
+};
+
+class SimpleItem : public ItemParallelJob::Item {
+ public:
+ explicit SimpleItem(bool* was_processed)
+ : ItemParallelJob::Item(), was_processed_(was_processed) {}
+ void Process() { *was_processed_ = true; }
+
+ private:
+ bool* was_processed_;
+};
+
+class EagerTask : public ItemParallelJob::Task {
+ public:
+ explicit EagerTask(Isolate* isolate) : ItemParallelJob::Task(isolate) {}
+
+ void RunInParallel() override {
+ SimpleItem* item = nullptr;
+ while ((item = GetItem<SimpleItem>()) != nullptr) {
+ item->Process();
+ item->MarkFinished();
+ }
+ }
+};
+
+class OneShotBarrier {
+ public:
+ explicit OneShotBarrier(size_t counter) : counter_(counter) {
+ DCHECK_GE(counter_, 0);
+ }
+
+ void Wait() {
+ DCHECK_NE(counter_, 0);
+ mutex_.Lock();
+ counter_--;
+ if (counter_ == 0) {
+ condition_.NotifyAll();
+ } else {
+ while (counter_ > 0) {
+ condition_.Wait(&mutex_);
+ }
+ }
+ mutex_.Unlock();
+ }
+
+ private:
+ base::Mutex mutex_;
+ base::ConditionVariable condition_;
+ size_t counter_;
+};
+
+class TaskProcessingOneItem : public ItemParallelJob::Task {
+ public:
+ explicit TaskProcessingOneItem(Isolate* isolate, OneShotBarrier* barrier)
+ : ItemParallelJob::Task(isolate), barrier_(barrier) {}
+
+ void RunInParallel() override {
+ SimpleItem* item = GetItem<SimpleItem>();
+ EXPECT_NE(nullptr, item);
+ item->Process();
+ item->MarkFinished();
+ // Avoid canceling the remaining tasks with a simple barrier.
+ barrier_->Wait();
+ }
+
+ private:
+ OneShotBarrier* barrier_;
+};
+
+class TaskForDifferentItems;
+
+class BaseItem : public ItemParallelJob::Item {
+ public:
+ virtual ~BaseItem() {}
+ virtual void ProcessItem(TaskForDifferentItems* task) = 0;
+};
+
+class TaskForDifferentItems : public ItemParallelJob::Task {
+ public:
+ explicit TaskForDifferentItems(Isolate* isolate, bool* processed_a,
+ bool* processed_b)
+ : ItemParallelJob::Task(isolate),
+ processed_a_(processed_a),
+ processed_b_(processed_b) {}
+ virtual ~TaskForDifferentItems() {}
+
+ void RunInParallel() override {
+ BaseItem* item = nullptr;
+ while ((item = GetItem<BaseItem>()) != nullptr) {
+ item->ProcessItem(this);
+ item->MarkFinished();
+ }
+ }
+
+ void ProcessA() { *processed_a_ = true; }
+ void ProcessB() { *processed_b_ = true; }
+
+ private:
+ bool* processed_a_;
+ bool* processed_b_;
+};
+
+class ItemA : public BaseItem {
+ public:
+ virtual ~ItemA() {}
+ void ProcessItem(TaskForDifferentItems* task) override { task->ProcessA(); }
+};
+
+class ItemB : public BaseItem {
+ public:
+ virtual ~ItemB() {}
+ void ProcessItem(TaskForDifferentItems* task) override { task->ProcessB(); }
+};
+
+} // namespace
+
+TEST_F(ItemParallelJobTest, EmptyTaskRuns) {
+ bool did_run = false;
+ ItemParallelJob job(i_isolate()->cancelable_task_manager(),
+ parallel_job_semaphore());
+ job.AddTask(new EmptyTask(i_isolate(), &did_run));
+ job.Run();
+ EXPECT_TRUE(did_run);
+}
+
+TEST_F(ItemParallelJobTest, FinishAllItems) {
+ const int kItems = 111;
+ bool was_processed[kItems];
+ for (int i = 0; i < kItems; i++) {
+ was_processed[i] = false;
+ }
+ ItemParallelJob job(i_isolate()->cancelable_task_manager(),
+ parallel_job_semaphore());
+ job.AddTask(new EagerTask(i_isolate()));
+ for (int i = 0; i < kItems; i++) {
+ job.AddItem(new SimpleItem(&was_processed[i]));
+ }
+ job.Run();
+ for (int i = 0; i < kItems; i++) {
+ EXPECT_TRUE(was_processed[i]);
+ }
+}
+
+TEST_F(ItemParallelJobTest, DistributeItemsMultipleTasks) {
+ const int kItemsAndTasks = 2; // Main thread + additional task.
+ bool was_processed[kItemsAndTasks];
+ OneShotBarrier barrier(kItemsAndTasks);
+ for (int i = 0; i < kItemsAndTasks; i++) {
+ was_processed[i] = false;
+ }
+ ItemParallelJob job(i_isolate()->cancelable_task_manager(),
+ parallel_job_semaphore());
+ for (int i = 0; i < kItemsAndTasks; i++) {
+ job.AddItem(new SimpleItem(&was_processed[i]));
+ job.AddTask(new TaskProcessingOneItem(i_isolate(), &barrier));
+ }
+ job.Run();
+ for (int i = 0; i < kItemsAndTasks; i++) {
+ EXPECT_TRUE(was_processed[i]);
+ }
+}
+
+TEST_F(ItemParallelJobTest, DifferentItems) {
+ bool item_a = false;
+ bool item_b = false;
+ ItemParallelJob job(i_isolate()->cancelable_task_manager(),
+ parallel_job_semaphore());
+ job.AddItem(new ItemA());
+ job.AddItem(new ItemB());
+ job.AddTask(new TaskForDifferentItems(i_isolate(), &item_a, &item_b));
+ job.Run();
+ EXPECT_TRUE(item_a);
+ EXPECT_TRUE(item_b);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/heap/spaces-unittest.cc b/deps/v8/test/unittests/heap/spaces-unittest.cc
index bdd9933398..9b4b2675e3 100644
--- a/deps/v8/test/unittests/heap/spaces-unittest.cc
+++ b/deps/v8/test/unittests/heap/spaces-unittest.cc
@@ -22,6 +22,12 @@ TEST_F(SpacesTest, CompactionSpaceMerge) {
EXPECT_TRUE(compaction_space != NULL);
EXPECT_TRUE(compaction_space->SetUp());
+ for (Page* p : *old_space) {
+ // Unlink free lists from the main space to avoid reusing the memory for
+ // compaction spaces.
+ old_space->UnlinkFreeListCategories(p);
+ }
+
// Cannot loop until "Available()" since we initially have 0 bytes available
// and would thus neither grow, nor be able to allocate an object.
const int kNumObjects = 10;
diff --git a/deps/v8/test/unittests/heap/workstealing-marking-deque-unittest.cc b/deps/v8/test/unittests/heap/workstealing-marking-deque-unittest.cc
new file mode 100644
index 0000000000..c8165285c7
--- /dev/null
+++ b/deps/v8/test/unittests/heap/workstealing-marking-deque-unittest.cc
@@ -0,0 +1,33 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/workstealing-marking-deque.h"
+
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+
+class HeapObject {};
+
+TEST(WorkStealingMarkingDeque, LocalEmpty) {
+ WorkStealingMarkingDeque marking_deque;
+ LocalWorkStealingMarkingDeque local_marking_deque(&marking_deque, 0);
+ EXPECT_TRUE(local_marking_deque.IsEmpty());
+}
+
+TEST(WorkStealingMarkingDeque, LocalPushPop) {
+ WorkStealingMarkingDeque marking_deque;
+ LocalWorkStealingMarkingDeque local_marking_deque(&marking_deque, 0);
+ HeapObject* object1 = new HeapObject();
+ HeapObject* object2 = nullptr;
+ EXPECT_TRUE(local_marking_deque.Push(object1));
+ EXPECT_FALSE(local_marking_deque.IsEmpty());
+ EXPECT_TRUE(local_marking_deque.Pop(&object2));
+ EXPECT_EQ(object1, object2);
+ delete object1;
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 46270241fe..8f0821fd81 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -7,6 +7,7 @@
#include "src/ast/scopes.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-jump-table.h"
#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "src/objects-inl.h"
@@ -25,16 +26,14 @@ class BytecodeArrayBuilderTest : public TestWithIsolateAndZone {
using ToBooleanMode = BytecodeArrayBuilder::ToBooleanMode;
TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
- CanonicalHandleScope canonical(isolate());
- BytecodeArrayBuilder builder(isolate(), zone(), 0, 1, 131);
+ BytecodeArrayBuilder builder(isolate(), zone(), 1, 131);
Factory* factory = isolate()->factory();
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
isolate()->heap()->HashSeed());
DeclarationScope scope(zone(), &ast_factory);
CHECK_EQ(builder.locals_count(), 131);
- CHECK_EQ(builder.context_count(), 1);
- CHECK_EQ(builder.fixed_register_count(), 132);
+ CHECK_EQ(builder.fixed_register_count(), 131);
Register reg(0);
Register other(reg.index() + 1);
@@ -278,6 +277,10 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.JumpIfJSReceiver(&end[10]);
}
+ // Emit Smi table switch bytecode.
+ BytecodeJumpTable* jump_table = builder.AllocateJumpTable(1, 0);
+ builder.SwitchOnSmiNoFeedback(jump_table).Bind(jump_table, 0);
+
// Emit set pending message bytecode.
builder.SetPendingMessage();
@@ -429,37 +432,33 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
TEST_F(BytecodeArrayBuilderTest, FrameSizesLookGood) {
- CanonicalHandleScope canonical(isolate());
for (int locals = 0; locals < 5; locals++) {
- for (int contexts = 0; contexts < 4; contexts++) {
- for (int temps = 0; temps < 3; temps++) {
- BytecodeArrayBuilder builder(isolate(), zone(), 0, contexts, locals);
- BytecodeRegisterAllocator* allocator(builder.register_allocator());
- for (int i = 0; i < locals + contexts; i++) {
- builder.LoadLiteral(Smi::kZero);
- builder.StoreAccumulatorInRegister(Register(i));
- }
- for (int i = 0; i < temps; i++) {
- Register temp = allocator->NewRegister();
- builder.LoadLiteral(Smi::kZero);
- builder.StoreAccumulatorInRegister(temp);
- // Ensure temporaries are used so not optimized away by the
- // register optimizer.
- builder.ConvertAccumulatorToName(temp);
- }
- builder.Return();
-
- Handle<BytecodeArray> the_array = builder.ToBytecodeArray(isolate());
- int total_registers = locals + contexts + temps;
- CHECK_EQ(the_array->frame_size(), total_registers * kPointerSize);
+ for (int temps = 0; temps < 3; temps++) {
+ BytecodeArrayBuilder builder(isolate(), zone(), 1, locals);
+ BytecodeRegisterAllocator* allocator(builder.register_allocator());
+ for (int i = 0; i < locals; i++) {
+ builder.LoadLiteral(Smi::kZero);
+ builder.StoreAccumulatorInRegister(Register(i));
+ }
+ for (int i = 0; i < temps; i++) {
+ Register temp = allocator->NewRegister();
+ builder.LoadLiteral(Smi::kZero);
+ builder.StoreAccumulatorInRegister(temp);
+ // Ensure temporaries are used so not optimized away by the
+ // register optimizer.
+ builder.ConvertAccumulatorToName(temp);
}
+ builder.Return();
+
+ Handle<BytecodeArray> the_array = builder.ToBytecodeArray(isolate());
+ int total_registers = locals + temps;
+ CHECK_EQ(the_array->frame_size(), total_registers * kPointerSize);
}
}
}
TEST_F(BytecodeArrayBuilderTest, RegisterValues) {
- CanonicalHandleScope canonical(isolate());
int index = 1;
Register the_register(index);
@@ -472,8 +471,7 @@ TEST_F(BytecodeArrayBuilderTest, RegisterValues) {
TEST_F(BytecodeArrayBuilderTest, Parameters) {
- CanonicalHandleScope canonical(isolate());
- BytecodeArrayBuilder builder(isolate(), zone(), 10, 0, 0);
+ BytecodeArrayBuilder builder(isolate(), zone(), 10, 0);
Register receiver(builder.Receiver());
Register param8(builder.Parameter(8));
@@ -482,8 +480,7 @@ TEST_F(BytecodeArrayBuilderTest, Parameters) {
TEST_F(BytecodeArrayBuilderTest, Constants) {
- CanonicalHandleScope canonical(isolate());
- BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 0);
+ BytecodeArrayBuilder builder(isolate(), zone(), 1, 0);
AstValueFactory ast_factory(zone(), isolate()->ast_string_constants(),
isolate()->heap()->HashSeed());
@@ -509,10 +506,9 @@ TEST_F(BytecodeArrayBuilderTest, Constants) {
}
TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
- CanonicalHandleScope canonical(isolate());
static const int kFarJumpDistance = 256 + 20;
- BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 1);
+ BytecodeArrayBuilder builder(isolate(), zone(), 1, 1);
Register reg(0);
BytecodeLabel far0, far1, far2, far3, far4;
@@ -627,8 +623,7 @@ TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
- CanonicalHandleScope canonical(isolate());
- BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 1);
+ BytecodeArrayBuilder builder(isolate(), zone(), 1, 1);
Register reg(0);
@@ -675,10 +670,104 @@ TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
CHECK(iterator.done());
}
+TEST_F(BytecodeArrayBuilderTest, SmallSwitch) {
+ BytecodeArrayBuilder builder(isolate(), zone(), 1, 1);
+
+ // Small jump table that fits into the single-size constant pool
+ int small_jump_table_size = 5;
+ int small_jump_table_base = -2;
+ BytecodeJumpTable* small_jump_table =
+ builder.AllocateJumpTable(small_jump_table_size, small_jump_table_base);
+
+ builder.LoadLiteral(Smi::FromInt(7)).SwitchOnSmiNoFeedback(small_jump_table);
+ for (int i = 0; i < small_jump_table_size; i++) {
+ builder.Bind(small_jump_table, small_jump_table_base + i).Debugger();
+ }
+ builder.Return();
+
+ Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
+ BytecodeArrayIterator iterator(array);
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kSwitchOnSmiNoFeedback);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ {
+ int i = 0;
+ int switch_end =
+ iterator.current_offset() + iterator.current_bytecode_size();
+
+ for (const auto& entry : iterator.GetJumpTableTargetOffsets()) {
+ CHECK_EQ(entry.case_value, small_jump_table_base + i);
+ CHECK_EQ(entry.target_offset, switch_end + i);
+
+ i++;
+ }
+ CHECK_EQ(i, small_jump_table_size);
+ }
+ iterator.Advance();
+
+ for (int i = 0; i < small_jump_table_size; i++) {
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kDebugger);
+ iterator.Advance();
+ }
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
+ iterator.Advance();
+ CHECK(iterator.done());
+}
+
+TEST_F(BytecodeArrayBuilderTest, WideSwitch) {
+ BytecodeArrayBuilder builder(isolate(), zone(), 1, 1);
+
+ // Large jump table that requires a wide Switch bytecode.
+ int large_jump_table_size = 256;
+ int large_jump_table_base = -10;
+ BytecodeJumpTable* large_jump_table =
+ builder.AllocateJumpTable(large_jump_table_size, large_jump_table_base);
+
+ builder.LoadLiteral(Smi::FromInt(7)).SwitchOnSmiNoFeedback(large_jump_table);
+ for (int i = 0; i < large_jump_table_size; i++) {
+ builder.Bind(large_jump_table, large_jump_table_base + i).Debugger();
+ }
+ builder.Return();
+
+ Handle<BytecodeArray> array = builder.ToBytecodeArray(isolate());
+ BytecodeArrayIterator iterator(array);
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi);
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kSwitchOnSmiNoFeedback);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
+ {
+ int i = 0;
+ int switch_end =
+ iterator.current_offset() + iterator.current_bytecode_size();
+
+ for (const auto& entry : iterator.GetJumpTableTargetOffsets()) {
+ CHECK_EQ(entry.case_value, large_jump_table_base + i);
+ CHECK_EQ(entry.target_offset, switch_end + i);
+
+ i++;
+ }
+ CHECK_EQ(i, large_jump_table_size);
+ }
+ iterator.Advance();
+
+ for (int i = 0; i < large_jump_table_size; i++) {
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kDebugger);
+ iterator.Advance();
+ }
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
+ iterator.Advance();
+ CHECK(iterator.done());
+}
TEST_F(BytecodeArrayBuilderTest, LabelReuse) {
- CanonicalHandleScope canonical(isolate());
- BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 0);
+ BytecodeArrayBuilder builder(isolate(), zone(), 1, 0);
// Labels can only have 1 forward reference, but
// can be referred to mulitple times once bound.
@@ -710,10 +799,9 @@ TEST_F(BytecodeArrayBuilderTest, LabelReuse) {
TEST_F(BytecodeArrayBuilderTest, LabelAddressReuse) {
- CanonicalHandleScope canonical(isolate());
static const int kRepeats = 3;
- BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 0);
+ BytecodeArrayBuilder builder(isolate(), zone(), 1, 0);
for (int i = 0; i < kRepeats; i++) {
BytecodeLabel label, after_jump0, after_jump1;
builder.Jump(&label)
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
index 915521f3d7..680d8197b2 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-writer-unittest.cc
@@ -8,6 +8,9 @@
#include "src/factory.h"
#include "src/interpreter/bytecode-array-writer.h"
#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/bytecode-node.h"
+#include "src/interpreter/bytecode-register.h"
+#include "src/interpreter/bytecode-source-info.h"
#include "src/interpreter/constant-array-builder.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
@@ -145,7 +148,7 @@ TEST_F(BytecodeArrayWriterUnittest, SimpleExample) {
PositionTableEntry expected_positions[] = {
{0, 10, false}, {1, 55, true}, {9, 70, true}};
SourcePositionTableIterator source_iterator(
- bytecode_array->source_position_table());
+ bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
const PositionTableEntry& expected = expected_positions[i];
CHECK_EQ(source_iterator.code_offset(), expected.code_offset);
@@ -230,7 +233,7 @@ TEST_F(BytecodeArrayWriterUnittest, ComplexExample) {
Handle<BytecodeArray> bytecode_array = writer()->ToBytecodeArray(
isolate(), 0, 0, factory()->empty_fixed_array());
SourcePositionTableIterator source_iterator(
- bytecode_array->source_position_table());
+ bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
const PositionTableEntry& expected = expected_positions[i];
CHECK_EQ(source_iterator.code_offset(), expected.code_offset);
@@ -282,7 +285,7 @@ TEST_F(BytecodeArrayWriterUnittest, ElideNoneffectfulBytecodes) {
Handle<BytecodeArray> bytecode_array = writer()->ToBytecodeArray(
isolate(), 0, 0, factory()->empty_fixed_array());
SourcePositionTableIterator source_iterator(
- bytecode_array->source_position_table());
+ bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
const PositionTableEntry& expected = expected_positions[i];
CHECK_EQ(source_iterator.code_offset(), expected.code_offset);
@@ -348,7 +351,7 @@ TEST_F(BytecodeArrayWriterUnittest, DeadcodeElimination) {
Handle<BytecodeArray> bytecode_array = writer()->ToBytecodeArray(
isolate(), 0, 0, factory()->empty_fixed_array());
SourcePositionTableIterator source_iterator(
- bytecode_array->source_position_table());
+ bytecode_array->SourcePositionTable());
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
const PositionTableEntry& expected = expected_positions[i];
CHECK_EQ(source_iterator.code_offset(), expected.code_offset);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-node-unittest.cc
index 45366196f4..af793ebcfe 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-pipeline-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-node-unittest.cc
@@ -4,9 +4,7 @@
#include "src/v8.h"
-#include "src/interpreter/bytecode-pipeline.h"
-#include "src/interpreter/bytecode-register-allocator.h"
-#include "src/isolate.h"
+#include "src/interpreter/bytecode-node.h"
#include "test/unittests/test-utils.h"
namespace v8 {
@@ -15,42 +13,6 @@ namespace interpreter {
using BytecodeNodeTest = TestWithIsolateAndZone;
-TEST(BytecodeSourceInfo, Operations) {
- BytecodeSourceInfo x(0, true);
- CHECK_EQ(x.source_position(), 0);
- CHECK_EQ(x.is_statement(), true);
- CHECK_EQ(x.is_valid(), true);
- x.set_invalid();
- CHECK_EQ(x.is_statement(), false);
- CHECK_EQ(x.is_valid(), false);
-
- x.MakeStatementPosition(1);
- BytecodeSourceInfo y(1, true);
- CHECK(x == y);
- CHECK(!(x != y));
-
- x.set_invalid();
- CHECK(!(x == y));
- CHECK(x != y);
-
- y.MakeStatementPosition(1);
- CHECK_EQ(y.source_position(), 1);
- CHECK_EQ(y.is_statement(), true);
-
- y.MakeStatementPosition(2);
- CHECK_EQ(y.source_position(), 2);
- CHECK_EQ(y.is_statement(), true);
-
- y.set_invalid();
- y.MakeExpressionPosition(3);
- CHECK_EQ(y.source_position(), 3);
- CHECK_EQ(y.is_statement(), false);
-
- y.MakeStatementPosition(3);
- CHECK_EQ(y.source_position(), 3);
- CHECK_EQ(y.is_statement(), true);
-}
-
TEST_F(BytecodeNodeTest, Constructor1) {
BytecodeNode node(Bytecode::kLdaZero);
CHECK_EQ(node.bytecode(), Bytecode::kLdaZero);
diff --git a/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
index cb5e1f927e..8c7b363ebf 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
@@ -4,12 +4,8 @@
#include "src/v8.h"
-#include "src/factory.h"
#include "src/interpreter/bytecode-label.h"
-#include "src/interpreter/bytecode-pipeline.h"
#include "src/interpreter/bytecode-register-optimizer.h"
-#include "src/objects-inl.h"
-#include "src/objects.h"
#include "test/unittests/test-utils.h"
namespace v8 {
@@ -99,7 +95,6 @@ TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotEmitted) {
CHECK_EQ(write_count(), 0u);
Register temp = NewTemporary();
optimizer()->DoStar(temp);
- BytecodeNode node1(Bytecode::kStar, NewTemporary().ToOperand());
ReleaseTemporaries(temp);
CHECK_EQ(write_count(), 0u);
optimizer()->PrepareForBytecode<Bytecode::kReturn, AccumulatorUse::kRead>();
diff --git a/deps/v8/test/unittests/interpreter/bytecode-source-info-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-source-info-unittest.cc
new file mode 100644
index 0000000000..f08bfe307f
--- /dev/null
+++ b/deps/v8/test/unittests/interpreter/bytecode-source-info-unittest.cc
@@ -0,0 +1,52 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/interpreter/bytecode-source-info.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+TEST(BytecodeSourceInfo, Operations) {
+ BytecodeSourceInfo x(0, true);
+ CHECK_EQ(x.source_position(), 0);
+ CHECK_EQ(x.is_statement(), true);
+ CHECK_EQ(x.is_valid(), true);
+ x.set_invalid();
+ CHECK_EQ(x.is_statement(), false);
+ CHECK_EQ(x.is_valid(), false);
+
+ x.MakeStatementPosition(1);
+ BytecodeSourceInfo y(1, true);
+ CHECK(x == y);
+ CHECK(!(x != y));
+
+ x.set_invalid();
+ CHECK(!(x == y));
+ CHECK(x != y);
+
+ y.MakeStatementPosition(1);
+ CHECK_EQ(y.source_position(), 1);
+ CHECK_EQ(y.is_statement(), true);
+
+ y.MakeStatementPosition(2);
+ CHECK_EQ(y.source_position(), 2);
+ CHECK_EQ(y.is_statement(), true);
+
+ y.set_invalid();
+ y.MakeExpressionPosition(3);
+ CHECK_EQ(y.source_position(), 3);
+ CHECK_EQ(y.is_statement(), false);
+
+ y.MakeStatementPosition(3);
+ CHECK_EQ(y.source_position(), 3);
+ CHECK_EQ(y.is_statement(), true);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/object-unittest.cc b/deps/v8/test/unittests/object-unittest.cc
index b09b97dea6..e0a65f2ac8 100644
--- a/deps/v8/test/unittests/object-unittest.cc
+++ b/deps/v8/test/unittests/object-unittest.cc
@@ -8,6 +8,7 @@
#include "src/objects-inl.h"
#include "src/objects.h"
+#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -53,5 +54,64 @@ TEST(Object, StructListOrder) {
#undef TEST_STRUCT
}
+typedef TestWithIsolate ObjectWithIsolate;
+
+TEST_F(ObjectWithIsolate, DictionaryGrowth) {
+ Handle<SeededNumberDictionary> dict =
+ SeededNumberDictionary::New(isolate(), 1);
+ Handle<Object> value = isolate()->factory()->null_value();
+ PropertyDetails details = PropertyDetails::Empty();
+
+ // This test documents the expected growth behavior of a dictionary getting
+ // elements added to it one by one.
+ STATIC_ASSERT(HashTableBase::kMinCapacity == 4);
+ uint32_t i = 1;
+ // 3 elements fit into the initial capacity.
+ for (; i <= 3; i++) {
+ dict = SeededNumberDictionary::Add(dict, i, value, details);
+ CHECK_EQ(4, dict->Capacity());
+ }
+ // 4th element triggers growth.
+ DCHECK_EQ(4, i);
+ for (; i <= 5; i++) {
+ dict = SeededNumberDictionary::Add(dict, i, value, details);
+ CHECK_EQ(8, dict->Capacity());
+ }
+ // 6th element triggers growth.
+ DCHECK_EQ(6, i);
+ for (; i <= 11; i++) {
+ dict = SeededNumberDictionary::Add(dict, i, value, details);
+ CHECK_EQ(16, dict->Capacity());
+ }
+ // 12th element triggers growth.
+ DCHECK_EQ(12, i);
+ for (; i <= 21; i++) {
+ dict = SeededNumberDictionary::Add(dict, i, value, details);
+ CHECK_EQ(32, dict->Capacity());
+ }
+ // 22nd element triggers growth.
+ DCHECK_EQ(22, i);
+ for (; i <= 43; i++) {
+ dict = SeededNumberDictionary::Add(dict, i, value, details);
+ CHECK_EQ(64, dict->Capacity());
+ }
+ // 44th element triggers growth.
+ DCHECK_EQ(44, i);
+ for (; i <= 50; i++) {
+ dict = SeededNumberDictionary::Add(dict, i, value, details);
+ CHECK_EQ(128, dict->Capacity());
+ }
+
+ // If we grow by larger chunks, the next (sufficiently big) power of 2 is
+ // chosen as the capacity.
+ dict = SeededNumberDictionary::New(isolate(), 1);
+ dict = SeededNumberDictionary::EnsureCapacity(dict, 65, 1);
+ CHECK_EQ(128, dict->Capacity());
+
+ dict = SeededNumberDictionary::New(isolate(), 1);
+ dict = SeededNumberDictionary::EnsureCapacity(dict, 30, 1);
+ CHECK_EQ(64, dict->Capacity());
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/parser/preparser-unittest.cc b/deps/v8/test/unittests/parser/preparser-unittest.cc
new file mode 100644
index 0000000000..26f5cd5ab3
--- /dev/null
+++ b/deps/v8/test/unittests/parser/preparser-unittest.cc
@@ -0,0 +1,41 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/api.h"
+#include "src/objects-inl.h"
+#include "test/unittests/test-helpers.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+class PreParserTest : public TestWithContext {
+ public:
+ PreParserTest() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(PreParserTest);
+};
+
+TEST_F(PreParserTest, LazyFunctionLength) {
+ const char* script_source = "function lazy(a, b, c) { } lazy";
+
+ Handle<Object> lazy_object = test::RunJS(isolate(), script_source);
+
+ Handle<SharedFunctionInfo> shared(
+ Handle<JSFunction>::cast(lazy_object)->shared(), i_isolate());
+ CHECK_EQ(shared->length(), SharedFunctionInfo::kInvalidLength);
+
+ const char* get_length_source = "lazy.length";
+
+ Handle<Object> length = test::RunJS(isolate(), get_length_source);
+ CHECK(length->IsSmi());
+ int32_t value;
+ CHECK(length->ToInt32(&value));
+ CHECK_EQ(3, value);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-helper.cc b/deps/v8/test/unittests/test-helpers.cc
index 047e1c1688..eb3dd267f5 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-helper.cc
+++ b/deps/v8/test/unittests/test-helpers.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "test/unittests/compiler-dispatcher/compiler-dispatcher-helper.h"
+#include "test/unittests/test-helpers.h"
#include "include/v8.h"
#include "src/api.h"
diff --git a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-helper.h b/deps/v8/test/unittests/test-helpers.h
index ad90a1114d..e5e003be24 100644
--- a/deps/v8/test/unittests/compiler-dispatcher/compiler-dispatcher-helper.h
+++ b/deps/v8/test/unittests/test-helpers.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef V8_UNITTESTS_COMPILER_DISPATCHER_COMPILER_DISPATCHER_HELPER_H_
-#define V8_UNITTESTS_COMPILER_DISPATCHER_COMPILER_DISPATCHER_HELPER_H_
+#ifndef V8_UNITTESTS_TEST_HELPERS_H_
+#define V8_UNITTESTS_TEST_HELPERS_H_
#include <memory>
@@ -63,4 +63,4 @@ Handle<SharedFunctionInfo> CreateSharedFunctionInfo(
} // namespace internal
} // namespace v8
-#endif // V8_UNITTESTS_COMPILER_DISPATCHER_COMPILER_DISPATCHER_HELPER_H_
+#endif // V8_UNITTESTS_TEST_HELPERS_H_
diff --git a/deps/v8/test/unittests/unittests.gyp b/deps/v8/test/unittests/unittests.gyp
index 6add56471f..01cf0c5bfe 100644
--- a/deps/v8/test/unittests/unittests.gyp
+++ b/deps/v8/test/unittests/unittests.gyp
@@ -15,6 +15,8 @@
'api/remote-object-unittest.cc',
'api/v8-object-unittest.cc',
'asmjs/asm-scanner-unittest.cc',
+ 'asmjs/asm-types-unittest.cc',
+ 'asmjs/switch-logic-unittest.cc',
'base/atomic-utils-unittest.cc',
'base/bits-unittest.cc',
'base/cpu-unittest.cc',
@@ -89,8 +91,6 @@
'compiler/typer-unittest.cc',
'compiler/value-numbering-reducer-unittest.cc',
'compiler/zone-stats-unittest.cc',
- 'compiler-dispatcher/compiler-dispatcher-helper.cc',
- 'compiler-dispatcher/compiler-dispatcher-helper.h',
'compiler-dispatcher/compiler-dispatcher-job-unittest.cc',
'compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc',
'compiler-dispatcher/compiler-dispatcher-unittest.cc',
@@ -98,16 +98,31 @@
'counters-unittest.cc',
'eh-frame-iterator-unittest.cc',
'eh-frame-writer-unittest.cc',
+ 'heap/bitmap-unittest.cc',
+ 'heap/concurrent-marking-deque-unittest.cc',
+ 'heap/embedder-tracing-unittest.cc',
+ 'heap/gc-idle-time-handler-unittest.cc',
+ 'heap/gc-tracer-unittest.cc',
+ 'heap/item-parallel-job-unittest.cc',
+ 'heap/marking-unittest.cc',
+ 'heap/memory-reducer-unittest.cc',
+ 'heap/heap-unittest.cc',
+ 'heap/scavenge-job-unittest.cc',
+ 'heap/slot-set-unittest.cc',
+ 'heap/spaces-unittest.cc',
+ 'heap/unmapper-unittest.cc',
+ 'heap/workstealing-marking-deque-unittest.cc',
'interpreter/bytecodes-unittest.cc',
'interpreter/bytecode-array-builder-unittest.cc',
'interpreter/bytecode-array-iterator-unittest.cc',
'interpreter/bytecode-array-random-iterator-unittest.cc',
'interpreter/bytecode-array-writer-unittest.cc',
'interpreter/bytecode-decoder-unittest.cc',
+ 'interpreter/bytecode-node-unittest.cc',
'interpreter/bytecode-operands-unittest.cc',
- 'interpreter/bytecode-pipeline-unittest.cc',
'interpreter/bytecode-register-allocator-unittest.cc',
'interpreter/bytecode-register-optimizer-unittest.cc',
+ 'interpreter/bytecode-source-info-unittest.cc',
'interpreter/bytecode-utils.h',
'interpreter/constant-array-builder-unittest.cc',
'interpreter/interpreter-assembler-unittest.cc',
@@ -115,22 +130,14 @@
'libplatform/default-platform-unittest.cc',
'libplatform/task-queue-unittest.cc',
'libplatform/worker-thread-unittest.cc',
- 'heap/bitmap-unittest.cc',
- 'heap/embedder-tracing-unittest.cc',
- 'heap/gc-idle-time-handler-unittest.cc',
- 'heap/gc-tracer-unittest.cc',
- 'heap/marking-unittest.cc',
- 'heap/memory-reducer-unittest.cc',
- 'heap/heap-unittest.cc',
- 'heap/scavenge-job-unittest.cc',
- 'heap/slot-set-unittest.cc',
- 'heap/spaces-unittest.cc',
- 'heap/unmapper-unittest.cc',
'locked-queue-unittest.cc',
'object-unittest.cc',
+ 'parser/preparser-unittest.cc',
'register-configuration-unittest.cc',
'run-all-unittests.cc',
'source-position-table-unittest.cc',
+ 'test-helpers.cc',
+ 'test-helpers.h',
'test-utils.h',
'test-utils.cc',
'unicode-unittest.cc',
@@ -139,14 +146,13 @@
'zone/zone-allocator-unittest.cc',
'zone/zone-chunk-list-unittest.cc',
'zone/zone-unittest.cc',
- 'wasm/asm-types-unittest.cc',
'wasm/control-transfer-unittest.cc',
'wasm/decoder-unittest.cc',
'wasm/function-body-decoder-unittest.cc',
'wasm/leb-helper-unittest.cc',
'wasm/loop-assignment-analysis-unittest.cc',
'wasm/module-decoder-unittest.cc',
- 'wasm/switch-logic-unittest.cc',
+ 'wasm/streaming-decoder-unittest.cc',
'wasm/wasm-macro-gen-unittest.cc',
'wasm/wasm-module-builder-unittest.cc',
'wasm/wasm-opcodes-unittest.cc',
@@ -190,6 +196,7 @@
'../../src/v8.gyp:v8',
'../../src/v8.gyp:v8_libbase',
'../../src/v8.gyp:v8_libplatform',
+ '../../src/v8.gyp:v8_maybe_snapshot',
],
'include_dirs': [
'../..',
@@ -257,6 +264,9 @@
'<(icu_gyp_path):icuuc',
],
}],
+ ['v8_use_snapshot=="true"', {
+ 'dependencies': ['../../src/v8.gyp:v8_builtins_generators'],
+ }],
],
},
],
diff --git a/deps/v8/test/unittests/value-serializer-unittest.cc b/deps/v8/test/unittests/value-serializer-unittest.cc
index 9f49c6498a..7ca7433eeb 100644
--- a/deps/v8/test/unittests/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/value-serializer-unittest.cc
@@ -1603,6 +1603,44 @@ TEST_F(ValueSerializerTest, DecodeRegExp) {
});
}
+// Tests that invalid flags are not accepted by the deserializer. In particular,
+// the dotAll flag ('s') is only valid when the corresponding flag is enabled.
+TEST_F(ValueSerializerTest, DecodeRegExpDotAll) {
+ i::FLAG_harmony_regexp_dotall = false;
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x52, 0x03, 0x66, 0x6f, 0x6f, 0x1f},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === RegExp.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === '/foo/gimuy'"));
+ });
+ InvalidDecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x52, 0x03, 0x66, 0x6f, 0x6f, 0x3f});
+ InvalidDecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x52, 0x03, 0x66, 0x6f, 0x6f, 0x7f});
+
+ i::FLAG_harmony_regexp_dotall = true;
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x52, 0x03, 0x66, 0x6f, 0x6f, 0x1f},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === RegExp.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === '/foo/gimuy'"));
+ });
+ DecodeTest({0xff, 0x09, 0x3f, 0x00, 0x52, 0x03, 0x66, 0x6f, 0x6f, 0x3f},
+ [this](Local<Value> value) {
+ ASSERT_TRUE(value->IsRegExp());
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "Object.getPrototypeOf(result) === RegExp.prototype"));
+ EXPECT_TRUE(EvaluateScriptForResultBool(
+ "result.toString() === '/foo/gimsuy'"));
+ });
+ InvalidDecodeTest(
+ {0xff, 0x09, 0x3f, 0x00, 0x52, 0x03, 0x66, 0x6f, 0x6f, 0x7f});
+}
+
TEST_F(ValueSerializerTest, RoundTripMap) {
RoundTripTest(
"(() => { var m = new Map(); m.set(42, 'foo'); return m; })()",
@@ -1866,21 +1904,24 @@ TEST_F(ValueSerializerTest, DecodeArrayBufferOOM) {
Isolate::CreateParams params;
params.array_buffer_allocator = &allocator;
Isolate* isolate = Isolate::New(params);
- Isolate::Scope isolate_scope(isolate);
- HandleScope handle_scope(isolate);
- Local<Context> context = Context::New(isolate);
- Context::Scope context_scope(context);
- TryCatch try_catch(isolate);
-
- const std::vector<uint8_t> data = {0xff, 0x09, 0x3f, 0x00, 0x42,
- 0x03, 0x00, 0x80, 0xff, 0x00};
- ValueDeserializer deserializer(isolate, &data[0],
- static_cast<int>(data.size()), nullptr);
- deserializer.SetSupportsLegacyWireFormat(true);
- ASSERT_TRUE(deserializer.ReadHeader(context).FromMaybe(false));
- ASSERT_FALSE(try_catch.HasCaught());
- EXPECT_TRUE(deserializer.ReadValue(context).IsEmpty());
- EXPECT_TRUE(try_catch.HasCaught());
+ {
+ Isolate::Scope isolate_scope(isolate);
+ HandleScope handle_scope(isolate);
+ Local<Context> context = Context::New(isolate);
+ Context::Scope context_scope(context);
+ TryCatch try_catch(isolate);
+
+ const std::vector<uint8_t> data = {0xff, 0x09, 0x3f, 0x00, 0x42,
+ 0x03, 0x00, 0x80, 0xff, 0x00};
+ ValueDeserializer deserializer(isolate, &data[0],
+ static_cast<int>(data.size()), nullptr);
+ deserializer.SetSupportsLegacyWireFormat(true);
+ ASSERT_TRUE(deserializer.ReadHeader(context).FromMaybe(false));
+ ASSERT_FALSE(try_catch.HasCaught());
+ EXPECT_TRUE(deserializer.ReadValue(context).IsEmpty());
+ EXPECT_TRUE(try_catch.HasCaught());
+ }
+ isolate->Dispose();
}
// Includes an ArrayBuffer wrapper marked for transfer from the serialization
diff --git a/deps/v8/test/unittests/wasm/control-transfer-unittest.cc b/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
index 12712683c1..f49ba9c862 100644
--- a/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
+++ b/deps/v8/test/unittests/wasm/control-transfer-unittest.cc
@@ -6,9 +6,9 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "src/v8.h"
-
#include "src/wasm/wasm-interpreter.h"
-#include "src/wasm/wasm-macro-gen.h"
+
+#include "test/common/wasm/wasm-macro-gen.h"
using testing::MakeMatcher;
using testing::Matcher;
@@ -27,88 +27,90 @@ namespace wasm {
#define TRANSFER_VOID 0
#define TRANSFER_ONE 1
-struct ExpectedPcDelta {
+struct ExpectedControlTransfer {
pc_t pc;
- pcdiff_t expected;
+ pcdiff_t pc_diff;
+ uint32_t sp_diff;
+ uint32_t target_arity;
};
// For nicer error messages.
-class ControlTransferMatcher : public MatcherInterface<const pcdiff_t&> {
+class ControlTransferMatcher
+ : public MatcherInterface<const ControlTransferEntry&> {
public:
- explicit ControlTransferMatcher(pc_t pc, const pcdiff_t& expected)
+ explicit ControlTransferMatcher(pc_t pc,
+ const ExpectedControlTransfer& expected)
: pc_(pc), expected_(expected) {}
void DescribeTo(std::ostream* os) const override {
- *os << "@" << pc_ << " pcdiff = " << expected_;
+ *os << "@" << pc_ << ": pcdiff = " << expected_.pc_diff
+ << ", spdiff = " << expected_.sp_diff
+ << ", target arity = " << expected_.target_arity;
}
- bool MatchAndExplain(const pcdiff_t& input,
+ bool MatchAndExplain(const ControlTransferEntry& input,
MatchResultListener* listener) const override {
- if (input != expected_) {
- *listener << "@" << pc_ << " pcdiff = " << input;
- return false;
+ if (input.pc_diff == expected_.pc_diff &&
+ input.sp_diff == expected_.sp_diff &&
+ input.target_arity == expected_.target_arity) {
+ return true;
}
- return true;
+ *listener << "@" << pc_ << ": pcdiff = " << input.pc_diff
+ << ", spdiff = " << input.sp_diff
+ << ", target arity = " << input.target_arity;
+ return false;
}
private:
pc_t pc_;
- const pcdiff_t& expected_;
+ const ExpectedControlTransfer& expected_;
};
class ControlTransferTest : public TestWithZone {
public:
- void CheckPcDeltas(const byte* start, const byte* end,
- ExpectedPcDelta* expected_deltas, size_t num_targets) {
- ControlTransferMap map =
- WasmInterpreter::ComputeControlTransfersForTesting(zone(), start, end);
+ template <int code_len>
+ void CheckTransfers(
+ const byte (&code)[code_len],
+ std::initializer_list<ExpectedControlTransfer> expected_transfers) {
+ byte code_with_end[code_len + 1]; // NOLINT: code_len is a constant here
+ memcpy(code_with_end, code, code_len);
+ code_with_end[code_len] = kExprEnd;
+
+ ControlTransferMap map = WasmInterpreter::ComputeControlTransfersForTesting(
+ zone(), nullptr, code_with_end, code_with_end + code_len + 1);
// Check all control targets in the map.
- for (size_t i = 0; i < num_targets; i++) {
- pc_t pc = expected_deltas[i].pc;
- auto it = map.find(pc);
- if (it == map.end()) {
- EXPECT_TRUE(false) << "expected control target @ " << pc;
- } else {
- pcdiff_t expected = expected_deltas[i].expected;
- pcdiff_t& target = it->second;
- EXPECT_THAT(target,
- MakeMatcher(new ControlTransferMatcher(pc, expected)));
- }
+ for (auto& expected_transfer : expected_transfers) {
+ pc_t pc = expected_transfer.pc;
+ EXPECT_TRUE(map.count(pc) > 0) << "expected control target @" << pc;
+ if (!map.count(pc)) continue;
+ auto& entry = map[pc];
+ EXPECT_THAT(entry, MakeMatcher(new ControlTransferMatcher(
+ pc, expected_transfer)));
}
// Check there are no other control targets.
- CheckNoOtherTargets<ExpectedPcDelta>(start, end, map, expected_deltas,
- num_targets);
+ CheckNoOtherTargets(code_with_end, code_with_end + code_len + 1, map,
+ expected_transfers);
}
- template <typename T>
- void CheckNoOtherTargets(const byte* start, const byte* end,
- ControlTransferMap& map, T* targets,
- size_t num_targets) {
+ void CheckNoOtherTargets(
+ const byte* start, const byte* end, ControlTransferMap& map,
+ std::initializer_list<ExpectedControlTransfer> targets) {
// Check there are no other control targets.
for (pc_t pc = 0; start + pc < end; pc++) {
bool found = false;
- for (size_t i = 0; i < num_targets; i++) {
- if (targets[i].pc == pc) {
+ for (auto& target : targets) {
+ if (target.pc == pc) {
found = true;
break;
}
}
if (found) continue;
- if (map.find(pc) != map.end()) {
- printf("expected no control @ +%zu\n", pc);
- EXPECT_TRUE(false);
- }
+ EXPECT_TRUE(map.count(pc) == 0) << "expected no control @ +" << pc;
}
}
};
-#define EXPECT_PC_DELTAS(...) \
- do { \
- ExpectedPcDelta pairs[] = {__VA_ARGS__}; \
- CheckPcDeltas(code, code + sizeof(code), pairs, arraysize(pairs)); \
- } while (false)
-
TEST_F(ControlTransferTest, SimpleIf) {
byte code[] = {
kExprI32Const, // @0
@@ -117,7 +119,7 @@ TEST_F(ControlTransferTest, SimpleIf) {
kLocalVoid, // @3
kExprEnd // @4
};
- EXPECT_PC_DELTAS({2, 2});
+ CheckTransfers(code, {{2, 2, 0, 0}});
}
TEST_F(ControlTransferTest, SimpleIf1) {
@@ -129,7 +131,7 @@ TEST_F(ControlTransferTest, SimpleIf1) {
kExprNop, // @4
kExprEnd // @5
};
- EXPECT_PC_DELTAS({2, 3});
+ CheckTransfers(code, {{2, 3, 0, 0}});
}
TEST_F(ControlTransferTest, SimpleIf2) {
@@ -142,7 +144,7 @@ TEST_F(ControlTransferTest, SimpleIf2) {
kExprNop, // @5
kExprEnd // @6
};
- EXPECT_PC_DELTAS({2, 4});
+ CheckTransfers(code, {{2, 4, 0, 0}});
}
TEST_F(ControlTransferTest, SimpleIfElse) {
@@ -154,7 +156,7 @@ TEST_F(ControlTransferTest, SimpleIfElse) {
kExprElse, // @4
kExprEnd // @5
};
- EXPECT_PC_DELTAS({2, 3}, {4, 2});
+ CheckTransfers(code, {{2, 3, 0, 0}, {4, 2, 0, 0}});
}
TEST_F(ControlTransferTest, SimpleIfElse_v1) {
@@ -170,7 +172,7 @@ TEST_F(ControlTransferTest, SimpleIfElse_v1) {
0, // @8
kExprEnd // @9
};
- EXPECT_PC_DELTAS({2, 5}, {6, 4});
+ CheckTransfers(code, {{2, 5, 0, 0}, {6, 4, 1, 0}});
}
TEST_F(ControlTransferTest, SimpleIfElse1) {
@@ -183,7 +185,7 @@ TEST_F(ControlTransferTest, SimpleIfElse1) {
kExprNop, // @5
kExprEnd // @6
};
- EXPECT_PC_DELTAS({2, 3}, {4, 3});
+ CheckTransfers(code, {{2, 3, 0, 0}, {4, 3, 0, 0}});
}
TEST_F(ControlTransferTest, IfBr) {
@@ -196,7 +198,7 @@ TEST_F(ControlTransferTest, IfBr) {
0, // @5
kExprEnd // @6
};
- EXPECT_PC_DELTAS({2, 4}, {4, 3});
+ CheckTransfers(code, {{2, 4, 0, 0}, {4, 3, 0, 0}});
}
TEST_F(ControlTransferTest, IfBrElse) {
@@ -210,7 +212,7 @@ TEST_F(ControlTransferTest, IfBrElse) {
kExprElse, // @6
kExprEnd // @7
};
- EXPECT_PC_DELTAS({2, 5}, {4, 4}, {6, 2});
+ CheckTransfers(code, {{2, 5, 0, 0}, {4, 4, 0, 0}, {6, 2, 0, 0}});
}
TEST_F(ControlTransferTest, IfElseBr) {
@@ -224,15 +226,16 @@ TEST_F(ControlTransferTest, IfElseBr) {
0, // @6
kExprEnd // @7
};
- EXPECT_PC_DELTAS({2, 3}, {4, 4}, {5, 3});
+ CheckTransfers(code, {{2, 3, 0, 0}, {4, 4, 0, 0}, {5, 3, 0, 0}});
}
TEST_F(ControlTransferTest, BlockEmpty) {
byte code[] = {
kExprBlock, // @0
- kExprEnd // @1
+ kLocalVoid, // @1
+ kExprEnd // @2
};
- CheckPcDeltas(code, code + sizeof(code), nullptr, 0);
+ CheckTransfers(code, {});
}
TEST_F(ControlTransferTest, Br0) {
@@ -243,7 +246,7 @@ TEST_F(ControlTransferTest, Br0) {
0, // @3
kExprEnd // @4
};
- EXPECT_PC_DELTAS({2, 3});
+ CheckTransfers(code, {{2, 3, 0, 0}});
}
TEST_F(ControlTransferTest, Br1) {
@@ -255,7 +258,7 @@ TEST_F(ControlTransferTest, Br1) {
0, // @4
kExprEnd // @5
};
- EXPECT_PC_DELTAS({3, 3});
+ CheckTransfers(code, {{3, 3, 0, 0}});
}
TEST_F(ControlTransferTest, Br_v1a) {
@@ -268,7 +271,7 @@ TEST_F(ControlTransferTest, Br_v1a) {
0, // @5
kExprEnd // @6
};
- EXPECT_PC_DELTAS({4, 3});
+ CheckTransfers(code, {{4, 3, 1, 0}});
}
TEST_F(ControlTransferTest, Br_v1b) {
@@ -281,7 +284,7 @@ TEST_F(ControlTransferTest, Br_v1b) {
0, // @5
kExprEnd // @6
};
- EXPECT_PC_DELTAS({4, 3});
+ CheckTransfers(code, {{4, 3, 1, 0}});
}
TEST_F(ControlTransferTest, Br_v1c) {
@@ -294,7 +297,20 @@ TEST_F(ControlTransferTest, Br_v1c) {
0, // @5
kExprEnd // @6
};
- EXPECT_PC_DELTAS({4, 3});
+ CheckTransfers(code, {{4, 3, 0, 0}});
+}
+
+TEST_F(ControlTransferTest, Br_v1d) {
+ byte code[] = {
+ kExprBlock, // @0
+ kLocalI32, // @1
+ kExprI32Const, // @2
+ 0, // @3
+ kExprBr, // @4
+ 0, // @5
+ kExprEnd // @6
+ };
+ CheckTransfers(code, {{4, 3, 1, 1}});
}
TEST_F(ControlTransferTest, Br2) {
@@ -307,7 +323,7 @@ TEST_F(ControlTransferTest, Br2) {
0, // @5
kExprEnd // @6
};
- EXPECT_PC_DELTAS({4, 3});
+ CheckTransfers(code, {{4, 3, 0, 0}});
}
TEST_F(ControlTransferTest, Br0b) {
@@ -319,7 +335,7 @@ TEST_F(ControlTransferTest, Br0b) {
kExprNop, // @4
kExprEnd // @5
};
- EXPECT_PC_DELTAS({2, 4});
+ CheckTransfers(code, {{2, 4, 0, 0}});
}
TEST_F(ControlTransferTest, Br0c) {
@@ -332,7 +348,7 @@ TEST_F(ControlTransferTest, Br0c) {
kExprNop, // @5
kExprEnd // @6
};
- EXPECT_PC_DELTAS({2, 5});
+ CheckTransfers(code, {{2, 5, 0, 0}});
}
TEST_F(ControlTransferTest, SimpleLoop1) {
@@ -343,7 +359,7 @@ TEST_F(ControlTransferTest, SimpleLoop1) {
0, // @3
kExprEnd // @4
};
- EXPECT_PC_DELTAS({2, -2});
+ CheckTransfers(code, {{2, -2, 0, 0}});
}
TEST_F(ControlTransferTest, SimpleLoop2) {
@@ -355,7 +371,7 @@ TEST_F(ControlTransferTest, SimpleLoop2) {
0, // @4
kExprEnd // @5
};
- EXPECT_PC_DELTAS({3, -3});
+ CheckTransfers(code, {{3, -3, 0, 0}});
}
TEST_F(ControlTransferTest, SimpleLoopExit1) {
@@ -366,7 +382,7 @@ TEST_F(ControlTransferTest, SimpleLoopExit1) {
1, // @3
kExprEnd // @4
};
- EXPECT_PC_DELTAS({2, 3});
+ CheckTransfers(code, {{2, 4, 0, 0}});
}
TEST_F(ControlTransferTest, SimpleLoopExit2) {
@@ -378,7 +394,7 @@ TEST_F(ControlTransferTest, SimpleLoopExit2) {
1, // @4
kExprEnd // @5
};
- EXPECT_PC_DELTAS({3, 3});
+ CheckTransfers(code, {{3, 4, 0, 0}});
}
TEST_F(ControlTransferTest, BrTable0) {
@@ -392,7 +408,7 @@ TEST_F(ControlTransferTest, BrTable0) {
U32V_1(0), // @6
kExprEnd // @7
};
- EXPECT_PC_DELTAS({4, 4});
+ CheckTransfers(code, {{4, 4, 0, 0}});
}
TEST_F(ControlTransferTest, BrTable0_v1a) {
@@ -408,7 +424,7 @@ TEST_F(ControlTransferTest, BrTable0_v1a) {
U32V_1(0), // @8
kExprEnd // @9
};
- EXPECT_PC_DELTAS({6, 4});
+ CheckTransfers(code, {{6, 4, 1, 0}});
}
TEST_F(ControlTransferTest, BrTable0_v1b) {
@@ -424,7 +440,7 @@ TEST_F(ControlTransferTest, BrTable0_v1b) {
U32V_1(0), // @8
kExprEnd // @9
};
- EXPECT_PC_DELTAS({6, 4});
+ CheckTransfers(code, {{6, 4, 1, 0}});
}
TEST_F(ControlTransferTest, BrTable1) {
@@ -439,7 +455,7 @@ TEST_F(ControlTransferTest, BrTable1) {
U32V_1(0), // @7
kExprEnd // @8
};
- EXPECT_PC_DELTAS({4, 5}, {5, 4});
+ CheckTransfers(code, {{4, 5, 0, 0}, {5, 4, 0, 0}});
}
TEST_F(ControlTransferTest, BrTable2) {
@@ -458,7 +474,56 @@ TEST_F(ControlTransferTest, BrTable2) {
kExprEnd, // @11
kExprEnd // @12
};
- EXPECT_PC_DELTAS({6, 6}, {7, 5}, {8, 5});
+ CheckTransfers(code, {{6, 6, 0, 0}, {7, 5, 0, 0}, {8, 5, 0, 0}});
+}
+
+TEST_F(ControlTransferTest, BiggerSpDiffs) {
+ byte code[] = {
+ kExprBlock, // @0
+ kLocalI32, // @1
+ kExprI32Const, // @2
+ 0, // @3
+ kExprBlock, // @4
+ kLocalVoid, // @5
+ kExprI32Const, // @6
+ 0, // @7
+ kExprI32Const, // @8
+ 0, // @9
+ kExprI32Const, // @10
+ 0, // @11
+ kExprBrIf, // @12
+ 0, // @13
+ kExprBr, // @14
+ 1, // @15
+ kExprEnd, // @16
+ kExprEnd // @17
+ };
+ CheckTransfers(code, {{12, 5, 2, 0}, {14, 4, 3, 1}});
+}
+
+TEST_F(ControlTransferTest, NoInfoForUnreachableCode) {
+ byte code[] = {
+ kExprBlock, // @0
+ kLocalVoid, // @1
+ kExprBr, // @2
+ 0, // @3
+ kExprBr, // @4 -- no control transfer entry!
+ 1, // @5
+ kExprEnd, // @6
+ kExprBlock, // @7
+ kLocalVoid, // @8
+ kExprUnreachable, // @9
+ kExprI32Const, // @10
+ 0, // @11
+ kExprIf, // @12 -- no control transfer entry!
+ kLocalVoid, // @13
+ kExprBr, // @14 -- no control transfer entry!
+ 0, // @15
+ kExprElse, // @16 -- no control transfer entry!
+ kExprEnd, // @17
+ kExprEnd // @18
+ };
+ CheckTransfers(code, {{2, 5, 0, 0}});
}
} // namespace wasm
diff --git a/deps/v8/test/unittests/wasm/decoder-unittest.cc b/deps/v8/test/unittests/wasm/decoder-unittest.cc
index f28d5473d7..e0c7908b90 100644
--- a/deps/v8/test/unittests/wasm/decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/decoder-unittest.cc
@@ -6,7 +6,7 @@
#include "src/objects-inl.h"
#include "src/wasm/decoder.h"
-#include "src/wasm/wasm-macro-gen.h"
+#include "test/common/wasm/wasm-macro-gen.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index 6961df9b5a..49c2ca76e6 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -4,21 +4,20 @@
#include "test/unittests/test-utils.h"
-#include "src/v8.h"
-
-#include "test/common/wasm/test-signatures.h"
-
#include "src/objects-inl.h"
#include "src/objects.h"
-
+#include "src/v8.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/local-decl-encoder.h"
#include "src/wasm/signature-map.h"
#include "src/wasm/wasm-limits.h"
-#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-opcodes.h"
+#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+
namespace v8 {
namespace internal {
namespace wasm {
@@ -135,11 +134,11 @@ class FunctionBodyDecoderTest : public TestWithZone {
start, end);
if (result.ok() != expected_success) {
- uint32_t pc = result.error_offset;
+ uint32_t pc = result.error_offset();
std::ostringstream str;
if (expected_success) {
str << "Verification failed: pc = +" << pc
- << ", msg = " << result.error_msg;
+ << ", msg = " << result.error_msg();
} else {
str << "Verification successed, expected failure; pc = +" << pc;
}
@@ -1296,7 +1295,7 @@ TEST_F(FunctionBodyDecoderTest, StoreMemOffset) {
TestModuleEnv module_env;
module = &module_env;
module_env.InitializeMemory();
- for (int offset = 0; offset < 128; offset += 7) {
+ for (byte offset = 0; offset < 128; offset += 7) {
byte code[] = {WASM_STORE_MEM_OFFSET(MachineType::Int32(), offset,
WASM_ZERO, WASM_ZERO)};
EXPECT_VERIFIES_C(v_i, code);
@@ -2636,6 +2635,10 @@ TEST_F(WasmOpcodeLengthTest, SimdExpressions) {
EXPECT_LENGTH_N(3, kSimdPrefix, static_cast<byte>(kExpr##name & 0xff));
FOREACH_SIMD_1_OPERAND_OPCODE(TEST_SIMD)
#undef TEST_SIMD
+ EXPECT_LENGTH_N(6, kSimdPrefix, static_cast<byte>(kExprS32x4Shuffle & 0xff));
+ EXPECT_LENGTH_N(10, kSimdPrefix, static_cast<byte>(kExprS16x8Shuffle & 0xff));
+ EXPECT_LENGTH_N(18, kSimdPrefix, static_cast<byte>(kExprS8x16Shuffle & 0xff));
+#undef TEST_SIMD
// test for bad simd opcode
EXPECT_LENGTH_N(2, kSimdPrefix, 0xff);
}
diff --git a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
index 9e1954d481..e6507b9685 100644
--- a/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
+++ b/deps/v8/test/unittests/wasm/loop-assignment-analysis-unittest.cc
@@ -4,18 +4,16 @@
#include "test/unittests/test-utils.h"
-#include "src/v8.h"
-
-#include "test/common/wasm/test-signatures.h"
-
#include "src/bit-vector.h"
#include "src/objects-inl.h"
#include "src/objects.h"
-
+#include "src/v8.h"
#include "src/wasm/function-body-decoder.h"
-#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-module.h"
+#include "test/common/wasm/test-signatures.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+
#define WASM_SET_ZERO(i) WASM_SET_LOCAL(i, WASM_ZERO)
namespace v8 {
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index adbb3d5808..3bdd3d9b99 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -8,8 +8,8 @@
#include "src/objects-inl.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-limits.h"
-#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-opcodes.h"
+#include "test/common/wasm/wasm-macro-gen.h"
namespace v8 {
namespace internal {
@@ -89,14 +89,12 @@ namespace wasm {
do { \
ModuleResult result = DecodeModule(data, data + sizeof(data)); \
EXPECT_TRUE(result.ok()); \
- if (result.val) delete result.val; \
} while (false)
#define EXPECT_FAILURE_LEN(data, length) \
do { \
ModuleResult result = DecodeModule(data, data + length); \
EXPECT_FALSE(result.ok()); \
- if (result.val) delete result.val; \
} while (false)
#define EXPECT_FAILURE(data) EXPECT_FAILURE_LEN(data, sizeof(data))
@@ -108,13 +106,10 @@ namespace wasm {
} \
} while (false)
-#define EXPECT_OK(result) \
- do { \
- EXPECT_TRUE(result.ok()); \
- if (!result.ok()) { \
- if (result.val) delete result.val; \
- return; \
- } \
+#define EXPECT_OK(result) \
+ do { \
+ EXPECT_TRUE(result.ok()); \
+ if (!result.ok()) return; \
} while (false)
static size_t SizeOfVarInt(size_t value) {
@@ -161,7 +156,6 @@ TEST_F(WasmModuleVerifyTest, WrongMagic) {
const byte data[] = {U32_LE(kWasmMagic ^ x), U32_LE(kWasmVersion)};
ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
EXPECT_FALSE(result.ok());
- if (result.val) delete result.val;
}
}
@@ -170,14 +164,12 @@ TEST_F(WasmModuleVerifyTest, WrongVersion) {
const byte data[] = {U32_LE(kWasmMagic), U32_LE(kWasmVersion ^ x)};
ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
EXPECT_FALSE(result.ok());
- if (result.val) delete result.val;
}
}
TEST_F(WasmModuleVerifyTest, DecodeEmpty) {
ModuleResult result = DecodeModule(nullptr, 0);
EXPECT_TRUE(result.ok());
- if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, OneGlobal) {
@@ -204,8 +196,6 @@ TEST_F(WasmModuleVerifyTest, OneGlobal) {
EXPECT_FALSE(global->mutability);
EXPECT_EQ(WasmInitExpr::kI32Const, global->init.kind);
EXPECT_EQ(13, global->init.val.i32_const);
-
- if (result.val) delete result.val;
}
EXPECT_OFF_END_FAILURE(data, 1, sizeof(data));
@@ -222,7 +212,6 @@ TEST_F(WasmModuleVerifyTest, Global_invalid_type) {
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_FALSE(result.ok());
- if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, Global_invalid_type2) {
@@ -236,7 +225,6 @@ TEST_F(WasmModuleVerifyTest, Global_invalid_type2) {
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_FALSE(result.ok());
- if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ZeroGlobals) {
@@ -246,7 +234,6 @@ TEST_F(WasmModuleVerifyTest, ZeroGlobals) {
};
ModuleResult result = DecodeModule(data, data + sizeof(data));
EXPECT_OK(result);
- if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ExportMutableGlobal) {
@@ -325,7 +312,6 @@ TEST_F(WasmModuleVerifyTest, NGlobals) {
ModuleResult result = DecodeModule(&buffer[0], &buffer[0] + buffer.size());
EXPECT_OK(result);
- if (result.val) delete result.val;
}
}
@@ -371,8 +357,6 @@ TEST_F(WasmModuleVerifyTest, TwoGlobals) {
EXPECT_EQ(8u, g1->offset);
EXPECT_TRUE(g1->mutability);
EXPECT_EQ(WasmInitExpr::kF64Const, g1->init.kind);
-
- if (result.val) delete result.val;
}
EXPECT_OFF_END_FAILURE(data, 1, sizeof(data));
@@ -413,7 +397,6 @@ TEST_F(WasmModuleVerifyTest, MultipleSignatures) {
EXPECT_EQ(1u, result.val->signatures[1]->parameter_count());
EXPECT_EQ(2u, result.val->signatures[2]->parameter_count());
}
- if (result.val) delete result.val;
EXPECT_OFF_END_FAILURE(data, 1, sizeof(data));
}
@@ -456,7 +439,6 @@ TEST_F(WasmModuleVerifyTest, DataSegmentWithImmutableImportedGlobal) {
WasmInitExpr expr = result.val->data_segments.back().dest_addr;
EXPECT_EQ(WasmInitExpr::kGlobalIndex, expr.kind);
EXPECT_EQ(1u, expr.val.global_index);
- if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, DataSegmentWithMutableImportedGlobal) {
@@ -544,8 +526,6 @@ TEST_F(WasmModuleVerifyTest, OneDataSegment) {
EXPECT_EQ(0x9bbaa, segment->dest_addr.val.i32_const);
EXPECT_EQ(kDataSegmentSourceOffset, segment->source_offset);
EXPECT_EQ(3u, segment->source_size);
-
- if (result.val) delete result.val;
}
EXPECT_OFF_END_FAILURE(data, 14, sizeof(data));
@@ -604,8 +584,6 @@ TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
EXPECT_EQ(0x6ddcc, s1->dest_addr.val.i32_const);
EXPECT_EQ(kDataSegment1SourceOffset, s1->source_offset);
EXPECT_EQ(10u, s1->source_size);
-
- if (result.val) delete result.val;
}
EXPECT_OFF_END_FAILURE(data, 14, sizeof(data));
@@ -679,7 +657,26 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction) {
EXPECT_EQ(1u, result.val->function_tables.size());
EXPECT_EQ(1u, result.val->function_tables[0].min_size);
}
- if (result.val) delete result.val;
+}
+
+TEST_F(WasmModuleVerifyTest, Regression_735887) {
+ // Test with an invalid function index in the element section.
+ static const byte data[] = {
+ // sig#0 ---------------------------------------------------------------
+ SIGNATURES_SECTION_VOID_VOID,
+ // funcs ---------------------------------------------------------------
+ ONE_EMPTY_FUNCTION,
+ // table declaration ---------------------------------------------------
+ SECTION(Table, 4), ENTRY_COUNT(1), kWasmAnyFunctionTypeForm, 0, 1,
+ // elements ------------------------------------------------------------
+ SECTION(Element, 7),
+ 1, // entry count
+ TABLE_INDEX(0), WASM_INIT_EXPR_I32V_1(0),
+ 1, // elements count
+ 0x9a // invalid I32V as function index
+ };
+
+ EXPECT_FAILURE(data);
}
TEST_F(WasmModuleVerifyTest, OneIndirectFunction_one_entry) {
@@ -705,7 +702,6 @@ TEST_F(WasmModuleVerifyTest, OneIndirectFunction_one_entry) {
EXPECT_EQ(1u, result.val->function_tables.size());
EXPECT_EQ(1u, result.val->function_tables[0].min_size);
}
- if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
@@ -742,7 +738,6 @@ TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
EXPECT_EQ(1u, result.val->function_tables.size());
EXPECT_EQ(8u, result.val->function_tables[0].min_size);
}
- if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, IndirectFunctionNoFunctions) {
@@ -955,7 +950,7 @@ TEST_F(WasmFunctionVerifyTest, Ok_v_v_empty) {
EXPECT_OK(result);
if (result.val && result.ok()) {
- WasmFunction* function = result.val;
+ WasmFunction* function = result.val.get();
EXPECT_EQ(0u, function->sig->parameter_count());
EXPECT_EQ(0u, function->sig->return_count());
EXPECT_EQ(0u, function->name_offset);
@@ -964,8 +959,6 @@ TEST_F(WasmFunctionVerifyTest, Ok_v_v_empty) {
EXPECT_EQ(sizeof(data), function->code_end_offset);
// TODO(titzer): verify encoding of local declarations
}
-
- if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, SectionWithoutNameLength) {
@@ -1070,8 +1063,6 @@ TEST_F(WasmModuleVerifyTest, UnknownSectionSkipped) {
EXPECT_EQ(kWasmI32, global->type);
EXPECT_EQ(0u, global->offset);
-
- if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ImportTable_empty) {
@@ -1213,8 +1204,6 @@ TEST_F(WasmModuleVerifyTest, ExportTable_empty1) {
EXPECT_EQ(1u, result.val->functions.size());
EXPECT_EQ(0u, result.val->export_table.size());
-
- if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ExportTable_empty2) {
@@ -1244,8 +1233,6 @@ TEST_F(WasmModuleVerifyTest, ExportTableOne) {
EXPECT_EQ(1u, result.val->functions.size());
EXPECT_EQ(1u, result.val->export_table.size());
-
- if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ExportNameWithInvalidStringLength) {
@@ -1288,8 +1275,6 @@ TEST_F(WasmModuleVerifyTest, ExportTableTwo) {
EXPECT_EQ(1u, result.val->functions.size());
EXPECT_EQ(2u, result.val->export_table.size());
-
- if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ExportTableThree) {
@@ -1316,8 +1301,6 @@ TEST_F(WasmModuleVerifyTest, ExportTableThree) {
EXPECT_EQ(3u, result.val->functions.size());
EXPECT_EQ(3u, result.val->export_table.size());
-
- if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ExportTableThreeOne) {
@@ -1357,7 +1340,6 @@ TEST_F(WasmModuleVerifyTest, ExportTableOne_off_end) {
for (size_t length = 33; length < sizeof(data); length++) {
ModuleResult result = DecodeModule(data, data + length);
EXPECT_FALSE(result.ok());
- if (result.val) delete result.val;
}
}
diff --git a/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc b/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
new file mode 100644
index 0000000000..98b3e25457
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/streaming-decoder-unittest.cc
@@ -0,0 +1,498 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/test-utils.h"
+
+#include "src/objects-inl.h"
+
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/streaming-decoder.h"
+
+#include "src/objects/descriptor-array.h"
+#include "src/objects/dictionary.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class WasmStreamingDecoderTest : public ::testing::Test {
+ public:
+ void ExpectVerifies(Vector<const uint8_t> data) {
+ for (int split = 0; split <= data.length(); ++split) {
+ StreamingDecoder stream(nullptr);
+ stream.OnBytesReceived(data.SubVector(0, split));
+ stream.OnBytesReceived(data.SubVector(split, data.length()));
+ EXPECT_TRUE(stream.FinishForTesting());
+ }
+ }
+
+ void ExpectFailure(Vector<const uint8_t> data) {
+ for (int split = 0; split <= data.length(); ++split) {
+ StreamingDecoder stream(nullptr);
+ stream.OnBytesReceived(data.SubVector(0, split));
+ stream.OnBytesReceived(data.SubVector(split, data.length()));
+ EXPECT_FALSE(stream.FinishForTesting());
+ }
+ }
+};
+
+TEST_F(WasmStreamingDecoderTest, EmptyStream) {
+ StreamingDecoder stream(nullptr);
+ EXPECT_FALSE(stream.FinishForTesting());
+}
+
+TEST_F(WasmStreamingDecoderTest, IncompleteModuleHeader) {
+ const uint8_t data[] = {U32_LE(kWasmMagic), U32_LE(kWasmVersion)};
+ {
+ StreamingDecoder stream(nullptr);
+ stream.OnBytesReceived(Vector<const uint8_t>(data, 1));
+ EXPECT_FALSE(stream.FinishForTesting());
+ }
+ for (int length = 1; length < static_cast<int>(arraysize(data)); ++length) {
+ ExpectFailure(Vector<const uint8_t>(data, length));
+ }
+}
+
+TEST_F(WasmStreamingDecoderTest, MagicAndVersion) {
+ const uint8_t data[] = {U32_LE(kWasmMagic), U32_LE(kWasmVersion)};
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, BadMagic) {
+ for (uint32_t x = 1; x; x <<= 1) {
+ const uint8_t data[] = {U32_LE(kWasmMagic ^ x), U32_LE(kWasmVersion)};
+ ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+ }
+}
+
+TEST_F(WasmStreamingDecoderTest, BadVersion) {
+ for (uint32_t x = 1; x; x <<= 1) {
+ const uint8_t data[] = {U32_LE(kWasmMagic), U32_LE(kWasmVersion ^ x)};
+ ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+ }
+}
+
+TEST_F(WasmStreamingDecoderTest, OneSection) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ 0x1, // Section ID
+ 0x6, // Section Length
+ 0x0, // Payload
+ 0x0, // 2
+ 0x0, // 3
+ 0x0, // 4
+ 0x0, // 5
+ 0x0 // 6
+ };
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, OneSection_b) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ 0x1, // Section ID
+ 0x86, // Section Length = 6 (LEB)
+ 0x0, // --
+ 0x0, // Payload
+ 0x0, // 2
+ 0x0, // 3
+ 0x0, // 4
+ 0x0, // 5
+ 0x0 // 6
+ };
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, OneShortSection) {
+ // Short section means that section length + payload is less than 5 bytes,
+ // which is the maximum size of the length field.
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ 0x1, // Section ID
+ 0x2, // Section Length
+ 0x0, // Payload
+ 0x0 // 2
+ };
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, OneShortSection_b) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ 0x1, // Section ID
+ 0x82, // Section Length = 2 (LEB)
+ 0x80, // --
+ 0x0, // --
+ 0x0, // Payload
+ 0x0 // 2
+ };
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, OneEmptySection) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ 0x1, // Section ID
+ 0x0 // Section Length
+ };
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, OneSectionNotEnoughPayload1) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ 0x1, // Section ID
+ 0x6, // Section Length
+ 0x0, // Payload
+ 0x0, // 2
+ 0x0, // 3
+ 0x0, // 4
+ 0x0 // 5
+ };
+ ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, OneSectionNotEnoughPayload2) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ 0x1, // Section ID
+ 0x6, // Section Length
+ 0x0 // Payload
+ };
+ ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, OneSectionInvalidLength) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ 0x1, // Section ID
+ 0x80, // Section Length (0 in LEB)
+ 0x80, // --
+ 0x80, // --
+ 0x80, // --
+ 0x80, // --
+ };
+ ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, TwoLongSections) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ 0x1, // Section ID
+ 0x6, // Section Length
+ 0x0, // Payload
+ 0x0, // 2
+ 0x0, // 3
+ 0x0, // 4
+ 0x0, // 5
+ 0x0, // 6
+ 0x2, // Section ID
+ 0x7, // Section Length
+ 0x0, // Payload
+ 0x0, // 2
+ 0x0, // 3
+ 0x0, // 4
+ 0x0, // 5
+ 0x0, // 6
+ 0x0 // 7
+ };
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, TwoShortSections) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ 0x1, // Section ID
+ 0x1, // Section Length
+ 0x0, // Payload
+ 0x2, // Section ID
+ 0x2, // Section Length
+ 0x0, // Payload
+ 0x0, // 2
+ };
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, TwoSectionsShortLong) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ 0x1, // Section ID
+ 0x1, // Section Length
+ 0x0, // Payload
+ 0x2, // Section ID
+ 0x7, // Section Length
+ 0x0, // Payload
+ 0x0, // 2
+ 0x0, // 3
+ 0x0, // 4
+ 0x0, // 5
+ 0x0, // 6
+ 0x0 // 7
+ };
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, TwoEmptySections) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ 0x1, // Section ID
+ 0x0, // Section Length
+ 0x2, // Section ID
+ 0x0 // Section Length
+ };
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, EmptyCodeSection) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ kCodeSectionCode, // Section ID
+ 0x0, // Section Length
+ 0xb, // Section ID
+ 0x0 // Section Length
+ };
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, OneFunction) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ kCodeSectionCode, // Section ID
+ 0x8, // Section Length
+ 0x1, // Number of Functions
+ 0x6, // Function Length
+ 0x0, // Function
+ 0x0, // 2
+ 0x0, // 3
+ 0x0, // 4
+ 0x0, // 5
+ 0x0, // 6
+ };
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, OneShortFunction) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ kCodeSectionCode, // Section ID
+ 0x3, // Section Length
+ 0x1, // Number of Functions
+ 0x1, // Function Length
+ 0x0, // Function
+ };
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, EmptyFunction) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ kCodeSectionCode, // Section ID
+ 0x2, // Section Length
+ 0x1, // Number of Functions
+ 0x0, // Function Length
+ };
+ ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, TwoFunctions) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ kCodeSectionCode, // Section ID
+ 0x10, // Section Length
+ 0x2, // Number of Functions
+ 0x6, // Function Length
+ 0x0, // Function
+ 0x0, // 2
+ 0x0, // 3
+ 0x0, // 4
+ 0x0, // 5
+ 0x0, // 6
+ 0x7, // Function Length
+ 0x0, // Function
+ 0x0, // 2
+ 0x0, // 3
+ 0x0, // 4
+ 0x0, // 5
+ 0x0, // 6
+ 0x0, // 7
+ };
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, TwoFunctions_b) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ kCodeSectionCode, // Section ID
+ 0xb, // Section Length
+ 0x2, // Number of Functions
+ 0x1, // Function Length
+ 0x0, // Function
+ 0x7, // Function Length
+ 0x0, // Function
+ 0x0, // 2
+ 0x0, // 3
+ 0x0, // 4
+ 0x0, // 5
+ 0x0, // 6
+ 0x0, // 7
+ };
+ ExpectVerifies(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooHigh) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ kCodeSectionCode, // Section ID
+ 0xd, // Section Length
+ 0x2, // Number of Functions
+ 0x7, // Function Length
+ 0x0, // Function
+ 0x0, // 2
+ 0x0, // 3
+ 0x0, // 4
+ 0x0, // 5
+ 0x0, // 6
+ 0x0, // 7
+ 0x1, // Function Length
+ 0x0, // Function
+ };
+ ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLow) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ kCodeSectionCode, // Section ID
+ 0x9, // Section Length
+ 0x2, // Number of Functions
+ 0x7, // Function Length
+ 0x0, // Function
+ 0x0, // 2
+ 0x0, // 3
+ 0x0, // 4
+ 0x0, // 5
+ 0x0, // 6
+ 0x0, // 7
+ 0x1, // Function Length
+ 0x0, // Function
+ };
+ ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLowEndsInNumFunctions) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ kCodeSectionCode, // Section ID
+ 0x1, // Section Length
+ 0x82, // Number of Functions
+ 0x80, // --
+ 0x00, // --
+ 0x7, // Function Length
+ 0x0, // Function
+ 0x0, // 2
+ 0x0, // 3
+ 0x0, // 4
+ 0x0, // 5
+ 0x0, // 6
+ 0x0, // 7
+ 0x1, // Function Length
+ 0x0, // Function
+ };
+ ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, CodeSectionLengthTooLowEndsInFunctionLength) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ kCodeSectionCode, // Section ID
+ 0x5, // Section Length
+ 0x82, // Number of Functions
+ 0x80, // --
+ 0x00, // --
+ 0x87, // Function Length
+ 0x80, // --
+ 0x00, // --
+ 0x0, // Function
+ 0x0, // 2
+ 0x0, // 3
+ 0x0, // 4
+ 0x0, // 5
+ 0x0, // 6
+ 0x0, // 7
+ 0x1, // Function Length
+ 0x0, // Function
+ };
+ ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooHigh) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ kCodeSectionCode, // Section ID
+ 0xb, // Section Length
+ 0x4, // Number of Functions
+ 0x7, // Function Length
+ 0x0, // Function
+ 0x0, // 2
+ 0x0, // 3
+ 0x0, // 4
+ 0x0, // 5
+ 0x0, // 6
+ 0x0, // 7
+ 0x1, // Function Length
+ 0x0, // Function
+ };
+ ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+}
+
+TEST_F(WasmStreamingDecoderTest, NumberOfFunctionsTooLow) {
+ const uint8_t data[] = {
+ U32_LE(kWasmMagic), // --
+ U32_LE(kWasmVersion), // --
+ kCodeSectionCode, // Section ID
+ 0xe, // Section Length
+ 0x2, // Number of Functions
+ 0x1, // Function Length
+ 0x0, // Function
+ 0x2, // Function Length
+ 0x0, // Function
+ 0x0, // 2
+ 0x7, // Function Length
+ 0x0, // Function
+ 0x0, // 2
+ 0x0, // 3
+ 0x0, // 4
+ 0x0, // 5
+ 0x0, // 6
+ 0x0, // 7
+ };
+ ExpectFailure(Vector<const uint8_t>(data, arraysize(data)));
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
index 4f21b7e9e9..63309db078 100644
--- a/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
+++ b/deps/v8/test/unittests/wasm/wasm-macro-gen-unittest.cc
@@ -4,7 +4,7 @@
#include "test/unittests/test-utils.h"
-#include "src/wasm/wasm-macro-gen.h"
+#include "test/common/wasm/wasm-macro-gen.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/wasm-spec-tests/OWNERS b/deps/v8/test/wasm-spec-tests/OWNERS
new file mode 100644
index 0000000000..f1e9a3c2a6
--- /dev/null
+++ b/deps/v8/test/wasm-spec-tests/OWNERS
@@ -0,0 +1,4 @@
+ahaas@chromium.org
+clemensh@chromium.org
+machenbach@chromium.org
+rossberg@chromium.org
diff --git a/deps/v8/test/wasm-spec-tests/testcfg.py b/deps/v8/test/wasm-spec-tests/testcfg.py
new file mode 100644
index 0000000000..d3b06d076b
--- /dev/null
+++ b/deps/v8/test/wasm-spec-tests/testcfg.py
@@ -0,0 +1,35 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+
+from testrunner.local import testsuite
+from testrunner.objects import testcase
+
+class WasmSpecTestsTestSuite(testsuite.TestSuite):
+ def __init__(self, name, root):
+ super(WasmSpecTestsTestSuite, self).__init__(name, root)
+
+ def ListTests(self, context):
+ tests = []
+ for dirname, dirs, files in os.walk(self.root):
+ for dotted in [x for x in dirs if x.startswith('.')]:
+ dirs.remove(dotted)
+ for filename in files:
+ if (filename.endswith(".js")):
+ fullpath = os.path.join(dirname, filename)
+ relpath = fullpath[len(self.root) + 1 : -3]
+ testname = relpath.replace(os.path.sep, "/")
+ test = testcase.TestCase(self, testname)
+ tests.append(test)
+ return tests
+
+ def GetFlagsForTestCase(self, testcase, context):
+ flags = [] + context.mode_flags
+ flags.append(os.path.join(self.root, testcase.path + self.suffix()))
+ return testcase.flags + flags
+
+
+def GetSuite(name, root):
+ return WasmSpecTestsTestSuite(name, root)
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
new file mode 100644
index 0000000000..c7c7302719
--- /dev/null
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -0,0 +1 @@
+5f9abe489629911cd1ea4c47f45ae2b3f9050a44 \ No newline at end of file
diff --git a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.gyp b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.gyp
new file mode 100644
index 0000000000..711f982c9a
--- /dev/null
+++ b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.gyp
@@ -0,0 +1,26 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'wasm_spec_tests_run',
+ 'type': 'none',
+ 'dependencies': [
+ '../../src/d8.gyp:d8_run',
+ ],
+ 'includes': [
+ '../../gypfiles/features.gypi',
+ '../../gypfiles/isolate.gypi',
+ ],
+ 'sources': [
+ 'wasm-spec-tests.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.isolate b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.isolate
new file mode 100644
index 0000000000..4c29f9fed7
--- /dev/null
+++ b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.isolate
@@ -0,0 +1,15 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'files': [
+ './',
+ ],
+ },
+ 'includes': [
+ '../../src/d8.isolate',
+ '../../tools/testrunner/testrunner.isolate',
+ ],
+}
+
diff --git a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
new file mode 100644
index 0000000000..eccfbd58e3
--- /dev/null
+++ b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
@@ -0,0 +1,37 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+[ALWAYS, {
+ # These tests fail because mips does not support the correct NaN bit patterns.
+ 'tests/float_misc': [PASS, ['arch == mipsel or arch == mips64el', SKIP]],
+ 'tests/float_exprs': [PASS, ['arch == mipsel or arch == mips64el', SKIP]],
+ 'tests/f32': [PASS, ['arch == mipsel or arch == mips64el', SKIP]],
+ 'tests/f64': [PASS, ['arch == mipsel or arch == mips64el', SKIP]],
+
+ #TODO(ahaas): Add additional stack checks on mips.
+ # Issue 6318: Stack checks for functions with huge stack frames fail on x64 and ia32
+ 'tests/skip-stack-guard-page': [PASS, ['arch == mipsel or arch == mips64el or arch == x64 or arch == ia32 or ((arch == ppc or arch == ppc64 or arch == s390 or arch == s390x) and simulator_run)', SKIP]],
+}], # ALWAYS
+
+['arch == arm and not simulator_run', {
+ # Too slow on chromebooks.
+ 'tests/br_table': [SKIP],
+}], # 'arch == arm and not simulator_run'
+
+['arch == ppc or arch == ppc64', {
+ # These tests fail because ppc float min and max doesn't convert sNaN to qNaN.
+ 'tests/f32': [SKIP],
+ 'tests/f64': [SKIP],
+ # This test fails because ppc float to double doesn't convert sNaN to qNaN.
+ 'tests/conversions': [SKIP],
+}], # 'arch == ppc or arch == ppc64'
+
+['arch == s390 or arch == s390x', {
+ # These tests fail because s390 float min and max doesn't convert sNaN to qNaN.
+ 'tests/f32': [SKIP],
+ 'tests/f64': [SKIP],
+}], # 'arch == s390 or arch == s390x'
+
+]
diff --git a/deps/v8/test/webkit/webkit.status b/deps/v8/test/webkit/webkit.status
index c2b3f73a69..60ddea7fd7 100644
--- a/deps/v8/test/webkit/webkit.status
+++ b/deps/v8/test/webkit/webkit.status
@@ -53,9 +53,9 @@
# Skip tests that are too slow for simulators.
'dfg-int-overflow-in-loop': [SKIP],
}], # 'simulator'
-['arch == arm64 and simulator_run == True', {
+['arch == arm64 and simulator_run', {
'dfg-int-overflow-in-loop': [SKIP],
-}], # 'arch == arm64 and simulator_run == True'
+}], # 'arch == arm64 and simulator_run'
['dcheck_always_on == True and (arch == arm or arch == arm64)', {
# Doesn't work with gcc 4.6 on arm or arm64 for some reason.
'reentrant-caching': [SKIP],
@@ -64,7 +64,7 @@
# Too slow for mips big-endian boards on bots (no FPU).
'dfg-int-overflow-in-loop': [SKIP],
}], # 'arch == mips'
-['(arch == ppc or arch == ppc64) and simulator_run == True', {
+['(arch == ppc or arch == ppc64) and simulator_run', {
# Too slow.
'dfg-int-overflow-in-loop': [SKIP],
}], # 'arch == ppc or arch == ppc64'
diff --git a/deps/v8/third_party/inspector_protocol/lib/Parser_cpp.template b/deps/v8/third_party/inspector_protocol/lib/Parser_cpp.template
index 4bf6bebc46..f3dde5ac21 100644
--- a/deps/v8/third_party/inspector_protocol/lib/Parser_cpp.template
+++ b/deps/v8/third_party/inspector_protocol/lib/Parser_cpp.template
@@ -51,19 +51,13 @@ double charactersToDouble(const uint16_t* characters, size_t length, bool* ok)
buffer.push_back(static_cast<char>(characters[i]));
}
buffer.push_back('\0');
- char* endptr;
- double result = std::strtod(buffer.data(), &endptr);
- *ok = !(*endptr);
- return result;
+ return StringUtil::toDouble(buffer.data(), length, ok);
}
double charactersToDouble(const uint8_t* characters, size_t length, bool* ok)
{
std::string buffer(reinterpret_cast<const char*>(characters), length);
- char* endptr;
- double result = std::strtod(buffer.data(), &endptr);
- *ok = !(*endptr);
- return result;
+ return StringUtil::toDouble(buffer.data(), length, ok);
}
template<typename Char>
diff --git a/deps/v8/tools/dev/gm.py b/deps/v8/tools/dev/gm.py
index 482f6e036a..96b9c3816b 100755
--- a/deps/v8/tools/dev/gm.py
+++ b/deps/v8/tools/dev/gm.py
@@ -37,7 +37,7 @@ MODES = ["release", "debug", "optdebug"]
# Modes that get built/run when you don't specify any.
DEFAULT_MODES = ["release", "debug"]
# Build targets that can be manually specified.
-TARGETS = ["d8", "cctest", "unittests", "v8_fuzzers"]
+TARGETS = ["d8", "cctest", "unittests", "v8_fuzzers", "mkgrokdump"]
# Build targets that get built when you don't specify any (and specified tests
# don't imply any other targets).
DEFAULT_TARGETS = ["d8"]
@@ -106,7 +106,6 @@ v8_enable_verify_heap = true
""".replace("{GOMA}", USE_GOMA)
DEBUG_ARGS_TEMPLATE = """\
-gdb_index = true
is_component_build = true
is_debug = true
symbol_level = 2
@@ -118,7 +117,6 @@ v8_optimized_debug = false
""".replace("{GOMA}", USE_GOMA)
OPTDEBUG_ARGS_TEMPLATE = """\
-gdb_index = false
is_component_build = true
is_debug = true
symbol_level = 1
@@ -144,11 +142,23 @@ def _Call(cmd, silent=False):
if not silent: print("# %s" % cmd)
return subprocess.call(cmd, shell=True)
+def _Which(cmd):
+ for path in os.environ["PATH"].split(os.pathsep):
+ if os.path.exists(os.path.join(path, cmd)):
+ return os.path.join(path, cmd)
+ return None
+
def _Write(filename, content):
print("# echo > %s << EOF\n%sEOF" % (filename, content))
with open(filename, "w") as f:
f.write(content)
+def _Notify(summary, body):
+ if _Which('notify-send') is not None:
+ _Call("notify-send '{}' '{}'".format(summary, body), silent=True)
+ else:
+ print("{} - {}".format(summary, body))
+
def GetPath(arch, mode):
subdir = "%s.%s" % (arch, mode)
return os.path.join(OUTDIR, subdir)
@@ -166,7 +176,7 @@ class Config(object):
def GetTargetCpu(self):
cpu = "x86"
- if self.arch.endswith("64") or self.arch == "s390x":
+ if "64" in self.arch or self.arch == "s390x":
cpu = "x64"
return "target_cpu = \"%s\"" % cpu
@@ -242,7 +252,11 @@ class ArgumentParser(object):
targets = []
actions = []
tests = []
- words = argstring.split('.')
+ # Specifying a single unit test looks like "unittests/Foo.Bar".
+ if argstring.startswith("unittests/"):
+ words = [argstring]
+ else:
+ words = argstring.split('.')
if len(words) == 1:
word = words[0]
if word in ACTIONS:
@@ -304,11 +318,9 @@ def Main(argv):
for c in configs:
return_code += configs[c].RunTests()
if return_code == 0:
- _Call("notify-send 'Done!' 'V8 compilation finished successfully.'",
- silent=True)
+ _Notify('Done!', 'V8 compilation finished successfully.')
else:
- _Call("notify-send 'Error!' 'V8 compilation finished with errors.'",
- silent=True)
+ _Notify('Error!', 'V8 compilation finished with errors.')
return return_code
if __name__ == "__main__":
diff --git a/deps/v8/tools/disasm.py b/deps/v8/tools/disasm.py
index f409cb003e..a91d0dbff4 100644
--- a/deps/v8/tools/disasm.py
+++ b/deps/v8/tools/disasm.py
@@ -38,8 +38,10 @@ OBJDUMP_BIN = "/usr/bin/objdump"
if not os.path.exists(OBJDUMP_BIN):
OBJDUMP_BIN = "objdump"
-
-_COMMON_DISASM_OPTIONS = ["-M", "intel-mnemonic", "-C"]
+# -M intel-mnemonic selects Intel syntax.
+# -C demangles.
+# -z disables skipping over sections of zeroes.
+_COMMON_DISASM_OPTIONS = ["-M", "intel-mnemonic", "-C", "-z"]
_DISASM_HEADER_RE = re.compile(r"[a-f0-9]+\s+<.*:$")
_DISASM_LINE_RE = re.compile(r"\s*([a-f0-9]+):\s*(\S.*)")
diff --git a/deps/v8/tools/eval_gc_time.sh b/deps/v8/tools/eval_gc_time.sh
index 140165da43..70786041d7 100755
--- a/deps/v8/tools/eval_gc_time.sh
+++ b/deps/v8/tools/eval_gc_time.sh
@@ -86,7 +86,6 @@ INTERESTING_NEW_GEN_KEYS="\
old_new \
code \
semispace \
- object_groups \
"
INTERESTING_OLD_GEN_KEYS="\
diff --git a/deps/v8/tools/foozzie/testdata/failure_output.txt b/deps/v8/tools/foozzie/testdata/failure_output.txt
index d35104f2da..33a6161565 100644
--- a/deps/v8/tools/foozzie/testdata/failure_output.txt
+++ b/deps/v8/tools/foozzie/testdata/failure_output.txt
@@ -9,7 +9,7 @@
# Compared x64,ignition with x64,ignition_turbo
#
# Flags of x64,ignition:
---abort_on_stack_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --random-seed 12345 --ignition --turbo-filter=~ --hydrogen-filter=~ --nocrankshaft
+--abort_on_stack_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --random-seed 12345 --ignition --turbo-filter=~ --hydrogen-filter=~ --noopt
# Flags of x64,ignition_turbo:
--abort_on_stack_overflow --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --random-seed 12345 --ignition --turbo
#
diff --git a/deps/v8/tools/foozzie/testdata/fuzz-123.js b/deps/v8/tools/foozzie/testdata/fuzz-123.js
index 7af5c2e1ec..fbde5736d4 100644
--- a/deps/v8/tools/foozzie/testdata/fuzz-123.js
+++ b/deps/v8/tools/foozzie/testdata/fuzz-123.js
@@ -3,3 +3,4 @@
// found in the LICENSE file.
// Empty test dummy.
+print("js-mutation: start generated test case");
diff --git a/deps/v8/tools/foozzie/v8_foozzie.py b/deps/v8/tools/foozzie/v8_foozzie.py
index 96ed2a8501..6f585fdf8a 100755
--- a/deps/v8/tools/foozzie/v8_foozzie.py
+++ b/deps/v8/tools/foozzie/v8_foozzie.py
@@ -22,20 +22,29 @@ import v8_suppressions
CONFIGS = dict(
default=[],
fullcode=[
- '--nocrankshaft',
+ '--noopt',
'--turbo-filter=~',
],
ignition=[
'--ignition',
'--turbo-filter=~',
'--hydrogen-filter=~',
- '--nocrankshaft',
+ '--noopt',
+ ],
+ ignition_asm=[
+ '--ignition',
+ '--turbo-filter=~',
+ '--hydrogen-filter=~',
+ '--noopt',
+ '--validate-asm',
+ '--stress-validate-asm',
+ '--suppress-asm-messages',
],
ignition_eager=[
'--ignition',
'--turbo-filter=~',
'--hydrogen-filter=~',
- '--nocrankshaft',
+ '--noopt',
'--no-lazy',
'--no-lazy-inner-functions',
],
diff --git a/deps/v8/tools/foozzie/v8_foozzie_test.py b/deps/v8/tools/foozzie/v8_foozzie_test.py
index c46aba679e..ffe18a88d5 100644
--- a/deps/v8/tools/foozzie/v8_foozzie_test.py
+++ b/deps/v8/tools/foozzie/v8_foozzie_test.py
@@ -18,7 +18,7 @@ class UnitTest(unittest.TestCase):
def testDiff(self):
# TODO(machenbach): Mock out suppression configuration.
suppress = v8_suppressions.get_suppression(
- 'x64', 'fullcode', 'x64', 'default')
+ 'x64', 'ignition', 'x64', 'ignition_turbo')
one = ''
two = ''
diff = None, None
@@ -29,15 +29,12 @@ class UnitTest(unittest.TestCase):
diff = None, None
self.assertEquals(diff, suppress.diff(one, two))
- # Ignore line before caret, caret position, stack trace char numbers
- # error message and validator output.
+ # Ignore line before caret, caret position and error message.
one = """
undefined
weird stuff
^
-Validation of asm.js module failed: foo bar
somefile.js: TypeError: undefined is not a function
-stack line :15: foo
undefined
"""
two = """
@@ -45,8 +42,6 @@ undefined
other weird stuff
^
somefile.js: TypeError: baz is not a function
-stack line :2: foo
-Validation of asm.js module failed: baz
undefined
"""
diff = None, None
diff --git a/deps/v8/tools/foozzie/v8_suppressions.py b/deps/v8/tools/foozzie/v8_suppressions.py
index b39973b5de..a84cee6296 100644
--- a/deps/v8/tools/foozzie/v8_suppressions.py
+++ b/deps/v8/tools/foozzie/v8_suppressions.py
@@ -46,23 +46,9 @@ IGNORE_SOURCES = {
'/v8/test/mjsunit/regress/regress-2989.js',
],
- 'crbug.com/681088': [
- '/v8/test/mjsunit/asm/asm-validation.js',
- '/v8/test/mjsunit/asm/b5528-comma.js',
- '/v8/test/mjsunit/asm/pointer-masking.js',
- '/v8/test/mjsunit/compiler/regress-443744.js',
- '/v8/test/mjsunit/regress/regress-599719.js',
- '/v8/test/mjsunit/regress/wasm/regression-647649.js',
- '/v8/test/mjsunit/wasm/asm-wasm.js',
- '/v8/test/mjsunit/wasm/asm-wasm-deopt.js',
- '/v8/test/mjsunit/wasm/asm-wasm-heap.js',
- '/v8/test/mjsunit/wasm/asm-wasm-literals.js',
- '/v8/test/mjsunit/wasm/asm-wasm-stack.js',
- ],
-
- 'crbug.com/681241': [
- '/v8/test/mjsunit/regress/regress-617526.js',
- '/v8/test/mjsunit/regress/wasm/regression-02862.js',
+ 'crbug.com/718739': [
+ '/v8/test/mjsunit/regress/regress-105.js',
+ '/v8/test/mjsunit/regress/regress-crbug-599714.js',
],
'crbug.com/688159': [
@@ -81,16 +67,9 @@ IGNORE_SOURCES = {
}
# Ignore by test case pattern. Map from bug->regexp.
-# Regular expressions are assumed to be compiled. We use regexp.match.
-# Make sure the code doesn't match in the preamble portion of the test case
-# (i.e. in the modified inlined mjsunit.js). You can reference the comment
-# between the two parts like so:
-# 'crbug.com/666308':
-# re.compile(r'.*End stripped down and modified version.*'
-# r'\.prototype.*instanceof.*.*', re.S)
-# TODO(machenbach): Insert a JS sentinel between the two parts, because
-# comments are stripped during minimization.
+# Regular expressions are assumed to be compiled. We use regexp.search.
IGNORE_TEST_CASES = {
+ 'crbug.com/718739': re.compile(r'\.caller'),
}
# Ignore by output pattern. Map from config->bug->regexp. Config '' is used
@@ -107,17 +86,9 @@ IGNORE_OUTPUT = {
re.compile(r'RangeError(?!: byte length)', re.S),
'crbug.com/667678':
re.compile(r'\[native code\]', re.S),
- 'crbug.com/681806':
- re.compile(r'WebAssembly\.Instance', re.S),
- 'crbug.com/681088':
- re.compile(r'TypeError: Cannot read property \w+ of undefined', re.S),
'crbug.com/689877':
re.compile(r'^.*SyntaxError: .*Stack overflow$', re.M),
},
- 'validate_asm': {
- 'validate_asm':
- re.compile(r'TypeError'),
- },
}
# Lines matching any of the following regular expressions will be ignored
@@ -143,10 +114,6 @@ ALLOWED_LINE_DIFFS = [
r'^.* is not a function(.*)$',
r'^(.*) is not a .*$',
- # crbug.com/662840
- r"^.*(?:Trying to access ')?(\w*)(?:(?:' through proxy)|"
- r"(?: is not defined))$",
-
# crbug.com/680064. This subsumes one of the above expressions.
r'^(.*)TypeError: .* function$',
@@ -308,8 +275,17 @@ class V8Suppression(Suppression):
)
def ignore_by_content(self, testcase):
+ # Strip off test case preamble.
+ try:
+ lines = testcase.splitlines()
+ lines = lines[lines.index('print("js-mutation: start generated test case");'):]
+ content = '\n'.join(lines)
+ except ValueError:
+ # Search the whole test case if preamble can't be found. E.g. older
+ # already minimized test cases might have dropped the delimiter line.
+ content = testcase
for bug, exp in IGNORE_TEST_CASES.iteritems():
- if exp.match(testcase):
+ if exp.search(content):
return bug
return False
diff --git a/deps/v8/tools/gcmole/gcmole.lua b/deps/v8/tools/gcmole/gcmole.lua
index 42cb2e370b..9172fe208c 100644
--- a/deps/v8/tools/gcmole/gcmole.lua
+++ b/deps/v8/tools/gcmole/gcmole.lua
@@ -109,7 +109,7 @@ local function MakeClangCommandLine(
.. " -Xclang -triple -Xclang " .. triple
.. " -D" .. arch_define
.. " -DENABLE_DEBUGGER_SUPPORT"
- .. " -DV8_I18N_SUPPORT"
+ .. " -DV8_INTL_SUPPORT"
.. " -I./"
.. " -Iinclude/"
.. " -Ithird_party/icu/source/common"
diff --git a/deps/v8/tools/gdbinit b/deps/v8/tools/gdbinit
index c78baa238e..2d7d254ca4 100644
--- a/deps/v8/tools/gdbinit
+++ b/deps/v8/tools/gdbinit
@@ -123,5 +123,30 @@ Print stack trace with assertion scopes
Usage: bta
end
+# Search for a pointer inside all valid pages.
+define space_find
+ set $space = $arg0
+ set $current_page = $space->anchor()->next_page()
+ while ($current_page != $space->anchor())
+ printf "# Searching in %p - %p\n", $current_page->area_start(), $current_page->area_end()-1
+ find $current_page->area_start(), $current_page->area_end()-1, $arg1
+ set $current_page = $current_page->next_page()
+ end
+end
+
+define heap_find
+ set $heap = v8::internal::Isolate::Current()->heap()
+ printf "# Searching for %p in old_space ===============================\n", $arg0
+ space_find $heap->old_space() ($arg0)
+ printf "# Searching for %p in map_space ===============================\n", $arg0
+ space_find $heap->map_space() $arg0
+ printf "# Searching for %p in code_space ===============================\n", $arg0
+ space_find $heap->code_space() $arg0
+end
+document heap_find
+Find the location of a given address in V8 pages.
+Usage: heap_find address
+end
+
set disassembly-flavor intel
set disable-randomization off
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 0e3d088fbb..e793a91865 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -335,15 +335,9 @@ def get_base_class(klass):
return get_base_class(k['parent']);
#
-# Loads class hierarchy and type information from "objects.h".
+# Loads class hierarchy and type information from "objects.h" etc.
#
def load_objects():
- objfilename = sys.argv[2];
- objfile = open(objfilename, 'r');
- in_insttype = False;
-
- typestr = '';
-
#
# Construct a dictionary for the classes we're sure should be present.
#
@@ -351,11 +345,29 @@ def load_objects():
for klass in expected_classes:
checktypes[klass] = True;
+
+ for filename in sys.argv[2:]:
+ if not filename.endswith("-inl.h"):
+ load_objects_from_file(filename, checktypes)
+
+ if (len(checktypes) > 0):
+ for klass in checktypes:
+ print('error: expected class \"%s\" not found' % klass);
+
+ sys.exit(1);
+
+
+def load_objects_from_file(objfilename, checktypes):
+ objfile = open(objfilename, 'r');
+ in_insttype = False;
+
+ typestr = '';
+
#
- # Iterate objects.h line-by-line to collect type and class information.
- # For types, we accumulate a string representing the entire InstanceType
- # enum definition and parse it later because it's easier to do so
- # without the embedded newlines.
+ # Iterate the header file line-by-line to collect type and class
+ # information. For types, we accumulate a string representing the entire
+ # InstanceType enum definition and parse it later because it's easier to
+ # do so without the embedded newlines.
#
for line in objfile:
if (line.startswith('enum InstanceType {')):
@@ -482,13 +494,6 @@ def load_objects():
if (cctype in checktypes):
del checktypes[cctype];
- if (len(checktypes) > 0):
- for klass in checktypes:
- print('error: expected class \"%s\" not found' % klass);
-
- sys.exit(1);
-
-
#
# For a given macro call, pick apart the arguments and return an object
# describing the corresponding output constant. See load_fields().
@@ -509,7 +514,7 @@ def parse_field(call):
if (kind == 'ACCESSORS' or kind == 'ACCESSORS_GCSAFE'):
klass = args[0];
field = args[1];
- dtype = args[2];
+ dtype = args[2].replace('<', '_').replace('>', '_')
offset = args[3];
return ({
@@ -528,11 +533,19 @@ def parse_field(call):
});
#
-# Load field offset information from objects-inl.h.
+# Load field offset information from objects-inl.h etc.
#
def load_fields():
- inlfilename = sys.argv[3];
- inlfile = open(inlfilename, 'r');
+ for filename in sys.argv[2:]:
+ if filename.endswith("-inl.h"):
+ load_fields_from_file(filename)
+
+ for body in extras_accessors:
+ fields.append(parse_field('ACCESSORS(%s)' % body));
+
+
+def load_fields_from_file(filename):
+ inlfile = open(filename, 'r');
#
# Each class's fields and the corresponding offsets are described in the
@@ -584,9 +597,6 @@ def load_fields():
fields.append(parse_field(current));
current = '';
- for body in extras_accessors:
- fields.append(parse_field('ACCESSORS(%s)' % body));
-
#
# Emit a block of constants.
#
diff --git a/deps/v8/tools/get_byteorder.py b/deps/v8/tools/get_byteorder.py
deleted file mode 100755
index 598948b42a..0000000000
--- a/deps/v8/tools/get_byteorder.py
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2017 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""Get Byteorder of host architecture"""
-
-
-import sys
-
-def main():
- print sys.byteorder
- return 0
-
-if __name__ == '__main__':
- sys.exit(main())
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index 4a3cc7c6f5..dd944f8ab4 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -1952,9 +1952,12 @@ class InspectionPadawan(object):
return None
def FrameMarkerName(self, value):
- if 0 < value <= len(FRAME_MARKERS):
- return "Possibly %s frame marker" % FRAME_MARKERS[value-1]
- return ""
+ # The frame marker is Smi-tagged but not Smi encoded and 0 is not a valid
+ # frame type.
+ value = (value >> 1) - 1
+ if 0 <= value < len(FRAME_MARKERS):
+ return "Possibly %s frame marker" % FRAME_MARKERS[value]
+ return None
def IsFrameMarker(self, slot, address):
if not slot: return False
@@ -1964,18 +1967,17 @@ class InspectionPadawan(object):
next_address = self.reader.ReadUIntPtr(slot + self.reader.PointerSize())
return self.reader.IsExceptionStackAddress(next_address)
- def FormatSmi(self, address, slot=None):
+ def FormatSmi(self, address):
value = self.heap.SmiUntag(address)
- marker = ""
- if self.IsFrameMarker(slot, address):
- marker = self.FrameMarkerName(value)
# On 32-bit systems almost everything looks like a Smi.
- if not self.reader.Is64() or value == 0: return marker
- return "Smi(%d) %s" % (value, marker)
+ if not self.reader.Is64() or value == 0: return None
+ return "Smi(%d)" % value
def SenseObject(self, address, slot=None):
+ if self.IsFrameMarker(slot, address):
+ return self.FrameMarkerName(address)
if self.heap.IsSmi(address):
- return self.FormatSmi(address, slot)
+ return self.FormatSmi(address)
if not self.heap.IsTaggedAddress(address): return None
tagged_address = address
if self.IsInKnownOldSpace(tagged_address):
diff --git a/deps/v8/tools/ic-processor.js b/deps/v8/tools/ic-processor.js
index 41bd336c9a..6623b69ed5 100644
--- a/deps/v8/tools/ic-processor.js
+++ b/deps/v8/tools/ic-processor.js
@@ -168,7 +168,7 @@ IcProcessor.prototype.formatName = function(entry) {
var re = /(.*):[0-9]+:[0-9]+$/;
var array = re.exec(name);
if (!array) return name;
- return array[1];
+ return entry.getState() + array[1];
}
IcProcessor.prototype.processPropertyIC = function (
diff --git a/deps/v8/tools/ignition/linux_perf_report.py b/deps/v8/tools/ignition/linux_perf_report.py
index 4e0b8844ea..d2327ca6b8 100755
--- a/deps/v8/tools/ignition/linux_perf_report.py
+++ b/deps/v8/tools/ignition/linux_perf_report.py
@@ -25,7 +25,7 @@ examples:
# samples and other non-Ignition samples.
#
$ tools/run-perf.sh out/x64.release/d8 \\
- --ignition --noturbo --nocrankshaft run.js
+ --ignition --noturbo --noopt run.js
$ tools/ignition/linux_perf_report.py --flamegraph -o out.collapsed
$ flamegraph.pl --colors js out.collapsed > out.svg
@@ -45,7 +45,7 @@ examples:
# See the hottest bytecodes on Octane benchmark, by number of samples.
#
$ tools/run-perf.sh out/x64.release/d8 \\
- --ignition --noturbo --nocrankshaft octane/run.js
+ --ignition --noturbo --noopt octane/run.js
$ tools/ignition/linux_perf_report.py
"""
diff --git a/deps/v8/tools/link_clicker.extension/README.txt b/deps/v8/tools/link_clicker.extension/README.txt
new file mode 100644
index 0000000000..35e88b15ef
--- /dev/null
+++ b/deps/v8/tools/link_clicker.extension/README.txt
@@ -0,0 +1,12 @@
+This extension can be used to repro infrequent crashers on an unclear url-set
+for a given domain. It follows a random link that matches a predefined pattern,
+imitating something like real user interaction on a page.
+
+Usage:
+1. Open chrome://extensions
+2. Enable developer mode
+3. Click "Load unpacked extension"
+4. Click the orange link-clicker extension button in the toolbar
+5. Set the parameters and click "Enable" to start following links on all tabs
+ open in the current window. Beware, this extension will follow arbitrary
+ links. You probably don't want to be logged in with any important account.
diff --git a/deps/v8/tools/link_clicker.extension/background.js b/deps/v8/tools/link_clicker.extension/background.js
new file mode 100644
index 0000000000..43470cb312
--- /dev/null
+++ b/deps/v8/tools/link_clicker.extension/background.js
@@ -0,0 +1,74 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function linkClickerBackgroundScript() {
+
+ // time in ms.
+ let minInterval = 1*1000;
+ let maxInterval = 20*1000;
+ let pattern = /.*/;
+ let enabled = false;
+
+ let animateIconIntervalId;
+
+ // ===========================================================================
+
+ chrome.runtime.onMessage.addListener(function(msg, sender, response) {
+ let result;
+ if (msg.type == 'update') result = updateFromMessage(msg);
+ if (msg.type == 'get') result = getValues();
+ response(result);
+ });
+
+ // ===========================================================================
+ function updateFromMessage(msg) {
+ console.log(msg);
+ minInterval = Number(msg.minInterval)
+ maxInterval = Number(msg.maxInterval);
+ if (maxInterval < minInterval) {
+ let tmpMin = Math.min(minInterval, maxInterval);
+ maxInterval = Math.max(minInterval, maxInterval);
+ minInterval = tmpMin;
+ }
+ pattern = new RegExp(msg.pattern);
+ enabled = Boolean(msg.enabled);
+ updateTabs();
+ scheduleIconAnimation();
+ return getValues();
+ }
+
+ function getValues() {
+ return {
+ type: 'update',
+ minInterval: minInterval,
+ maxInterval: maxInterval,
+ pattern: pattern.source,
+ enabled: enabled
+ }
+ }
+
+ function updateTabs() {
+ chrome.tabs.query({active: true, currentWindow: true}, function(tabs) {
+ let message = getValues();
+ for (let i = 0; i < tabs.length; ++i) {
+ chrome.tabs.sendMessage(tabs[i].id, message);
+ }
+ });
+ }
+
+ let animationIndex = 0;
+ function animateIcon() {
+ animationIndex = (animationIndex + 1) % 4;
+ chrome.browserAction.setBadgeText( { text: ".".repeat(animationIndex) } );
+ }
+
+ function scheduleIconAnimation() {
+ chrome.browserAction.setBadgeText( { text: "" } );
+ clearInterval(animateIconIntervalId);
+ if (enabled) {
+ animateIconIntervalId = setInterval(animateIcon, 500);
+ }
+ }
+
+})();
diff --git a/deps/v8/tools/link_clicker.extension/content.js b/deps/v8/tools/link_clicker.extension/content.js
new file mode 100644
index 0000000000..4ab825e01e
--- /dev/null
+++ b/deps/v8/tools/link_clicker.extension/content.js
@@ -0,0 +1,66 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function linkClickerContentScript() {
+ // time in ms
+ let minInterval;
+ let maxInterval;
+ let pattern;
+ let enabled;
+ let timeoutId;
+
+ // Initialize variables.
+ chrome.runtime.sendMessage({type:'get'}, function(msg) {
+ if (msg.type == 'update') updateFromMessage(msg);
+ });
+
+ chrome.runtime.onMessage.addListener(
+ function(msg, sender, sendResponse) {
+ if (msg.type == 'update') updateFromMessage(msg);
+ });
+
+ function findAllLinks() {
+ let links = document.links;
+ let results = new Set();
+ for (let i = 0; i < links.length; i++) {
+ let href = links[i].href;
+ if (!href) continue;
+ if (href && href.match(pattern)) results.add(href);
+ }
+ return Array.from(results);
+ }
+
+ function updateFromMessage(msg) {
+ console.log(msg);
+ minInterval = Number(msg.minInterval)
+ maxInterval = Number(msg.maxInterval);
+ pattern = new RegExp(msg.pattern);
+ enabled = Boolean(msg.enabled);
+ if (enabled) schedule();
+ }
+
+ function followLink() {
+ if (!enabled) return;
+ let links = findAllLinks();
+ if (links.length <= 5) {
+ // navigate back if the page has not enough links
+ window.history.back()
+ console.log("navigate back");
+ } else {
+ let link = links[Math.round(Math.random() * (links.length-1))];
+ console.log(link);
+ window.location.href = link;
+ // Schedule in case we just followed an anchor.
+ schedule();
+ }
+ }
+
+ function schedule() {
+ clearTimeout(timeoutId);
+ let delta = maxInterval - minInterval;
+ let duration = minInterval + (Math.random() * delta);
+ console.log(duration);
+ timeoutId = setTimeout(followLink, duration);
+ }
+})();
diff --git a/deps/v8/tools/link_clicker.extension/icon.png b/deps/v8/tools/link_clicker.extension/icon.png
new file mode 100644
index 0000000000..1ce0ca3ac9
--- /dev/null
+++ b/deps/v8/tools/link_clicker.extension/icon.png
Binary files differ
diff --git a/deps/v8/tools/link_clicker.extension/manifest.json b/deps/v8/tools/link_clicker.extension/manifest.json
new file mode 100644
index 0000000000..8ca8579b6b
--- /dev/null
+++ b/deps/v8/tools/link_clicker.extension/manifest.json
@@ -0,0 +1,21 @@
+{
+ "name": "A browser action with a popup that automatically clicks links matching a regexp",
+ "description": "Follow links",
+ "version": "1.0",
+ "permissions": [
+ "tabs", "http://*/*", "https://*/*"
+ ],
+ "background": { "scripts": ["background.js"] },
+ "browser_action": {
+ "default_title": "Follow links.",
+ "default_icon": "icon.png",
+ "default_popup": "popup.html"
+ },
+ "content_scripts": [
+ {
+ "matches": ["http://*/*", "https://*/*"],
+ "js": ["content.js"]
+ }
+ ],
+ "manifest_version": 2
+}
diff --git a/deps/v8/tools/link_clicker.extension/popup.html b/deps/v8/tools/link_clicker.extension/popup.html
new file mode 100644
index 0000000000..cce9566acc
--- /dev/null
+++ b/deps/v8/tools/link_clicker.extension/popup.html
@@ -0,0 +1,50 @@
+<!doctype html>
+<!--
+Copyright 2017 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<html>
+ <head>
+ <style>
+ body {
+ overflow: hidden;
+ padding: 5px;
+ width: 310px;
+ }
+ input, textarea, select, button {
+ width : 300px;
+ margin: 0;
+ box-sizing: border-box;
+ }
+ label {
+ clear: both;
+ }
+ </style>
+ <script src="popup.js"></script>
+ </head>
+ <body>
+ <form>
+ <p>
+ <label>Min click-interval <span id="minIntervalValue"></span>:
+ <input type="range" id="minInterval" min="1000" max="60000">
+ </label>
+ </p>
+ <p>
+ <label> Max click-interval <span id="maxIntervalValue"></span>:
+ <input type="range" id="maxInterval" min="1000" max="60000">
+ </label>
+ </p>
+ <p>
+ <label>Link regexp:
+ <input type="input" id="pattern" >
+ </label>
+ </p>
+ <p>
+ <label>Enable:
+ <input type="checkbox" id="enabled" >
+ </label>
+ </p>
+ </form>
+ </body>
+</html>
diff --git a/deps/v8/tools/link_clicker.extension/popup.js b/deps/v8/tools/link_clicker.extension/popup.js
new file mode 100644
index 0000000000..865a9480eb
--- /dev/null
+++ b/deps/v8/tools/link_clicker.extension/popup.js
@@ -0,0 +1,53 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function $(id) {
+ return document.querySelector(id);
+}
+
+// ===========================================================================
+document.addEventListener('DOMContentLoaded', function () {
+ installFormChangeHandler()
+});
+
+
+function installFormChangeHandler() {
+ initForm();
+ let inputs = document.getElementsByTagName("input");
+ for (let i = 0; i < inputs.length; i++){
+ inputs[i].onchange = onFormChange;
+ }
+}
+
+function initForm() {
+ chrome.runtime.sendMessage({type:'get'}, function(response) {
+ updateFromMessage(response);
+ });
+}
+// ===========================================================================
+
+function updateFromMessage(msg) {
+ $("#minInterval").value = msg.minInterval;
+ $("#maxInterval").value = msg.maxInterval;
+ $("#pattern").value = msg.pattern;
+ $("#enabled").checked = msg.enabled;
+ $("#minIntervalValue").innerText = msg.minInterval+"ms";
+ $("#maxIntervalValue").innerText = msg.maxInterval+"ms";
+}
+
+function onFormChange() {
+ let minInterval = $("#minInterval").value;
+ let maxInterval = $("#maxInterval").value;
+
+ let message = {
+ type: 'update',
+ minInterval: minInterval,
+ maxInterval: maxInterval,
+ pattern: $("#pattern").value,
+ enabled: $("#enabled").checked
+ }
+ chrome.runtime.sendMessage(message, function(response) {
+ updateFromMessage(response);
+ });
+}
diff --git a/deps/v8/tools/luci-go/linux64/isolate.sha1 b/deps/v8/tools/luci-go/linux64/isolate.sha1
index b8593a765b..f14d0ea76c 100644
--- a/deps/v8/tools/luci-go/linux64/isolate.sha1
+++ b/deps/v8/tools/luci-go/linux64/isolate.sha1
@@ -1 +1 @@
-3c0fbcab83730c86bbd5a09e760388dcb7053bc4
+bcc0e73f051cc01452c24babbb4be9d5f4556c55
diff --git a/deps/v8/tools/luci-go/mac64/isolate.sha1 b/deps/v8/tools/luci-go/mac64/isolate.sha1
index bf7e1c1dd5..7506974143 100644
--- a/deps/v8/tools/luci-go/mac64/isolate.sha1
+++ b/deps/v8/tools/luci-go/mac64/isolate.sha1
@@ -1 +1 @@
-d37a2f34eff58e1fb04038bd52381001479d4aa1
+47ffac85c87dd0a2cfd6c4ded9c29c5bbcc8245d
diff --git a/deps/v8/tools/luci-go/win64/isolate.exe.sha1 b/deps/v8/tools/luci-go/win64/isolate.exe.sha1
index c575f97042..9dccf311b2 100644
--- a/deps/v8/tools/luci-go/win64/isolate.exe.sha1
+++ b/deps/v8/tools/luci-go/win64/isolate.exe.sha1
@@ -1 +1 @@
-d4b894493b1ee5c04ec5bc88e6ea286426540770
+1ed79378fe41640a963f1aa6d1674e8456993d10
diff --git a/deps/v8/tools/memory/lsan/suppressions.txt b/deps/v8/tools/memory/lsan/suppressions.txt
index 36e59ecd51..f5c73935be 100644
--- a/deps/v8/tools/memory/lsan/suppressions.txt
+++ b/deps/v8/tools/memory/lsan/suppressions.txt
@@ -14,6 +14,3 @@ leak:v8::internal::compiler::JumpThreading::ApplyForwarding
# mjsunit
leak:v8::internal::FuncNameInferrer::FuncNameInferrer
leak:v8::internal::JSArrayBuffer::SetupAllocatingData
-
-# unittests
-leak:v8::internal::Isolate::FindOrAllocatePerThreadDataForThisThread
diff --git a/deps/v8/tools/perf_tests/chromium_revision b/deps/v8/tools/perf_tests/chromium_revision
deleted file mode 100644
index 0cdcc110f8..0000000000
--- a/deps/v8/tools/perf_tests/chromium_revision
+++ /dev/null
@@ -1 +0,0 @@
-210122
diff --git a/deps/v8/tools/plot-timer-events b/deps/v8/tools/plot-timer-events
index da2e823c14..b65937cfe6 100755
--- a/deps/v8/tools/plot-timer-events
+++ b/deps/v8/tools/plot-timer-events
@@ -51,7 +51,7 @@ if test "$contains" -eq 0; then
calibration_log=calibration.log
calibration_script="for (var i = 0; i < 1000000; i++) print();"
- $d8_exec --nocrankshaft --prof --logfile $calibration_log \
+ $d8_exec --noopt --prof --logfile $calibration_log \
--log-timer-events -e "$calibration_script" > /dev/null
t_1_start=`grep "timer-event-start,\"V8.Execute\"" $calibration_log \
| tail -n1 | awk -F, '{print $3}'`
@@ -59,7 +59,7 @@ if test "$contains" -eq 0; then
| tail -n1 | awk -F, '{print $3}'`
n_1=`grep "timer-event\|tick" $calibration_log | wc -l`
- $d8_exec --nocrankshaft --prof --logfile $calibration_log \
+ $d8_exec --noopt --prof --logfile $calibration_log \
--log-internal-timer-events -e "$calibration_script" > /dev/null
t_2_start=`grep "timer-event-start,\"V8.Execute\"" $calibration_log \
| tail -n1 | awk -F, '{print $3}'`
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index c6561476f9..c95936037b 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -70,7 +70,7 @@ LINT_RULES = """
LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]|^Done processing')
FLAGS_LINE = re.compile("//\s*Flags:.*--([A-z0-9-])+_[A-z0-9].*\n")
ASSERT_OPTIMIZED_PATTERN = re.compile("assertOptimized")
-FLAGS_ENABLE_OPT = re.compile("//\s*Flags:.*--(crankshaft|turbo)[^-].*\n")
+FLAGS_ENABLE_OPT = re.compile("//\s*Flags:.*--(opt|turbo)[^-].*\n")
ASSERT_UNOPTIMIZED_PATTERN = re.compile("assertUnoptimized")
FLAGS_NO_ALWAYS_OPT = re.compile("//\s*Flags:.*--no-?always-opt.*\n")
@@ -193,7 +193,7 @@ class SourceFileProcessor(object):
def IgnoreDir(self, name):
return (name.startswith('.') or
name in ('buildtools', 'data', 'gmock', 'gtest', 'kraken',
- 'octane', 'sunspider'))
+ 'octane', 'sunspider', 'traces-arm64'))
def IgnoreFile(self, name):
return name.startswith('.')
@@ -221,7 +221,7 @@ class CppLintProcessor(SourceFileProcessor):
return (super(CppLintProcessor, self).IgnoreDir(name)
or (name == 'third_party'))
- IGNORE_LINT = ['flag-definitions.h']
+ IGNORE_LINT = ['export-template.h', 'flag-definitions.h']
def IgnoreFile(self, name):
return (super(CppLintProcessor, self).IgnoreFile(name)
@@ -413,7 +413,7 @@ class SourceProcessor(SourceFileProcessor):
if not "mjsunit/mjsunit.js" in name:
if ASSERT_OPTIMIZED_PATTERN.search(contents) and \
not FLAGS_ENABLE_OPT.search(contents):
- print "%s Flag --crankshaft or --turbo should be set " \
+ print "%s Flag --opt or --turbo should be set " \
"if assertOptimized() is used" % name
result = False
if ASSERT_UNOPTIMIZED_PATTERN.search(contents) and \
diff --git a/deps/v8/tools/profile.js b/deps/v8/tools/profile.js
index de9c42c5b1..21d9d22a5e 100644
--- a/deps/v8/tools/profile.js
+++ b/deps/v8/tools/profile.js
@@ -509,11 +509,18 @@ Profile.DynamicFuncCodeEntry = function(size, type, func, state) {
Profile.DynamicFuncCodeEntry.STATE_PREFIX = ["", "~", "*"];
/**
+ * Returns state.
+ */
+Profile.DynamicFuncCodeEntry.prototype.getState = function() {
+ return Profile.DynamicFuncCodeEntry.STATE_PREFIX[this.state];
+};
+
+/**
* Returns node name.
*/
Profile.DynamicFuncCodeEntry.prototype.getName = function() {
var name = this.func.getName();
- return this.type + ': ' + Profile.DynamicFuncCodeEntry.STATE_PREFIX[this.state] + name;
+ return this.type + ': ' + this.getState() + name;
};
diff --git a/deps/v8/tools/release/git_recipes.py b/deps/v8/tools/release/git_recipes.py
index e688ecb953..ce65b973ac 100644
--- a/deps/v8/tools/release/git_recipes.py
+++ b/deps/v8/tools/release/git_recipes.py
@@ -32,7 +32,7 @@ SHA1_RE = re.compile('^[a-fA-F0-9]{40}$')
ROLL_DEPS_GIT_SVN_ID_RE = re.compile('^git-svn-id: .*@([0-9]+) .*$')
# Regular expression that matches a single commit footer line.
-COMMIT_FOOTER_ENTRY_RE = re.compile(r'([^:]+):\s+(.+)')
+COMMIT_FOOTER_ENTRY_RE = re.compile(r'([^:]+):\s*(.*)')
# Footer metadata key for commit position.
COMMIT_POSITION_FOOTER_KEY = 'Cr-Commit-Position'
@@ -67,9 +67,9 @@ def GetCommitMessageFooterMap(message):
for line in lines:
m = COMMIT_FOOTER_ENTRY_RE.match(line)
if not m:
- # If any single line isn't valid, the entire footer is invalid.
- footers.clear()
- return footers
+ # If any single line isn't valid, continue anyway for compatibility with
+ # Gerrit (which itself uses JGit for this).
+ continue
footers[m.group(1)] = m.group(2).strip()
return footers
diff --git a/deps/v8/tools/release/update_node.py b/deps/v8/tools/release/update_node.py
index e05f71234d..de6bb06838 100755
--- a/deps/v8/tools/release/update_node.py
+++ b/deps/v8/tools/release/update_node.py
@@ -3,6 +3,26 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+"""
+Use this script to update V8 in a Node.js checkout.
+
+Requirements:
+ - Node.js checkout in which V8 should be updated.
+ - V8 checkout at the commit to which Node.js should be updated.
+
+Usage:
+ $ update_node.py <path_to_v8> <path_to_node>
+
+ This will synchronize the content of <path_to_node>/deps/v8 with <path_to_v8>,
+ and a few V8 dependencies require in Node.js. It will also update .gitignore
+ appropriately.
+
+Optional flags:
+ --gclient Run `gclient sync` on the V8 checkout before updating.
+ --commit Create commit with the updated V8 in the Node.js checkout.
+ --with-patch Also include currently staged files in the V8 checkout.
+"""
+
import argparse
import os
import shutil
@@ -52,7 +72,8 @@ def CommitPatch(options):
"""
print ">> Comitting patch"
subprocess.check_call(
- ["git", "commit", "--allow-empty", "-m", "placeholder-commit"],
+ ["git", "-c", "user.name=fake", "-c", "user.email=fake@chromium.org",
+ "commit", "--allow-empty", "-m", "placeholder-commit"],
cwd=options.v8_path,
)
diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py
index 0b1675b75e..7a9e90cd5e 100755
--- a/deps/v8/tools/run-tests.py
+++ b/deps/v8/tools/run-tests.py
@@ -68,8 +68,10 @@ TEST_MAP = {
"debugger",
"mjsunit",
"cctest",
+ "wasm-spec-tests",
"inspector",
"webkit",
+ "mkgrokdump",
"fuzzer",
"message",
"preparser",
@@ -81,7 +83,9 @@ TEST_MAP = {
"debugger",
"mjsunit",
"cctest",
+ "wasm-spec-tests",
"inspector",
+ "mkgrokdump",
"fuzzer",
"message",
"preparser",
@@ -265,7 +269,7 @@ def BuildOptions():
default=False, action="store_true")
result.add_option("--extra-flags",
help="Additional flags to pass to each test command",
- default="")
+ action="append", default=[])
result.add_option("--isolates", help="Whether to test isolates",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
@@ -419,6 +423,7 @@ def SetupEnvironment(options):
'coverage=1',
'coverage_dir=%s' % options.sancov_dir,
symbolizer,
+ "allow_user_segv_handler=1",
])
if options.cfi_vptr:
@@ -532,7 +537,7 @@ def ProcessOptions(options):
"running tests locally.")
options.no_network = True
options.command_prefix = shlex.split(options.command_prefix)
- options.extra_flags = shlex.split(options.extra_flags)
+ options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
if options.gc_stress:
options.extra_flags += GC_STRESS_FLAGS
@@ -781,8 +786,8 @@ def Execute(arch, mode, args, options, suites):
# target_arch != v8_target_arch in the dumped build config.
simulator_run = not options.dont_skip_simulator_slow_tests and \
arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', \
- 'ppc', 'ppc64'] and \
- ARCH_GUESS and arch != ARCH_GUESS
+ 'ppc', 'ppc64', 's390', 's390x'] and \
+ bool(ARCH_GUESS) and arch != ARCH_GUESS
# Find available test suites and read test cases from them.
variables = {
"arch": arch,
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index 59669c6bbc..e6ec9cb38b 100755
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -1000,6 +1000,12 @@ def Main(args):
"'powersave' for more stable results, or 'performance' "
"for shorter completion time of suite, with potentially "
"more noise in results.")
+ parser.add_option("--filter",
+ help="Only run the benchmarks beginning with this string. "
+ "For example: "
+ "--filter=JSTests/TypedArrays/ will run only TypedArray "
+ "benchmarks from the JSTests suite.",
+ default="")
(options, args) = parser.parse_args(args)
@@ -1092,9 +1098,12 @@ def Main(args):
def NodeCB(node):
platform.PreTests(node, path)
- # Traverse graph/trace tree and interate over all runnables.
+ # Traverse graph/trace tree and iterate over all runnables.
for runnable in FlattenRunnables(root, NodeCB):
- print ">>> Running suite: %s" % "/".join(runnable.graphs)
+ runnable_name = "/".join(runnable.graphs)
+ if not runnable_name.startswith(options.filter):
+ continue
+ print ">>> Running suite: %s" % runnable_name
def Runner():
"""Output generator that reruns several times."""
diff --git a/deps/v8/tools/testrunner/local/execution.py b/deps/v8/tools/testrunner/local/execution.py
index d5b519aadb..dc55129a14 100644
--- a/deps/v8/tools/testrunner/local/execution.py
+++ b/deps/v8/tools/testrunner/local/execution.py
@@ -62,17 +62,18 @@ ProcessContext = collections.namedtuple(
"process_context", ["suites", "context"])
-def MakeProcessContext(context):
+def MakeProcessContext(context, suite_names):
"""Generate a process-local context.
This reloads all suites per process and stores the global context.
Args:
context: The global context from the test runner.
+ suite_names (list of str): Suite names as loaded by the parent process.
+ Load the same suites in each subprocess.
"""
- suite_paths = utils.GetSuitePaths(TEST_DIR)
suites = {}
- for root in suite_paths:
+ for root in suite_names:
# Don't reinitialize global state as this is concurrently called from
# different processes.
suite = testsuite.TestSuite.LoadTestSuite(
@@ -198,7 +199,8 @@ class Runner(object):
self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode)
self.perf_failures = False
self.printed_allocations = False
- self.tests = [ t for s in suites for t in s.tests ]
+ self.tests = [t for s in suites for t in s.tests]
+ self.suite_names = [s.name for s in suites]
# Always pre-sort by status file, slowest tests first.
slow_key = lambda t: statusfile.IsSlow(t.outcomes)
@@ -353,7 +355,7 @@ class Runner(object):
fn=RunTest,
gen=gen_tests(),
process_context_fn=MakeProcessContext,
- process_context_args=[self.context],
+ process_context_args=[self.context, self.suite_names],
)
for result in it:
if result.heartbeat:
diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py
index f7fa19b20a..68f39d6b4a 100644
--- a/deps/v8/tools/testrunner/local/testsuite.py
+++ b/deps/v8/tools/testrunner/local/testsuite.py
@@ -71,7 +71,7 @@ class TestSuite(object):
f = None
try:
(f, pathname, description) = imp.find_module("testcfg", [root])
- module = imp.load_module("testcfg", f, pathname, description)
+ module = imp.load_module(name + "_testcfg", f, pathname, description)
return module.GetSuite(name, root)
except ImportError:
# Use default if no testcfg is present.
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index 2ad00cff2a..0dba0d9579 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -10,11 +10,12 @@ ALL_VARIANT_FLAGS = {
"turbofan_opt": [["--turbo", "--always-opt"]],
"noturbofan": [["--no-turbo"]],
"noturbofan_stress": [["--no-turbo", "--stress-opt", "--always-opt"]],
- "fullcode": [["--nocrankshaft", "--no-turbo"]],
- # No optimization actually means no profile guided optimization -
- # %OptimizeFunctionOnNextCall still works.
- "nooptimization": [["--nocrankshaft"]],
- "asm_wasm": [["--validate-asm", "--fast-validate-asm", "--stress-validate-asm", "--suppress-asm-messages"]],
+ "fullcode": [["--noopt", "--no-turbo"]],
+ # No optimization means disable all optimizations. OptimizeFunctionOnNextCall
+ # would not force optimization too. It turns into a Nop. Please see
+ # https://chromium-review.googlesource.com/c/452620/ for more discussion.
+ "nooptimization": [["--noopt"]],
+ "asm_wasm": [["--validate-asm", "--stress-validate-asm", "--suppress-asm-messages"]],
"wasm_traps": [["--wasm_guard_pages", "--wasm_trap_handler", "--invoke-weak-callbacks"]],
}
@@ -25,11 +26,12 @@ FAST_VARIANT_FLAGS = {
"turbofan": [["--turbo"]],
"noturbofan": [["--no-turbo"]],
"noturbofan_stress": [["--no-turbo", "--stress-opt"]],
- "fullcode": [["--nocrankshaft", "--no-turbo"]],
- # No optimization actually means no profile guided optimization -
- # %OptimizeFunctionOnNextCall still works.
- "nooptimization": [["--nocrankshaft"]],
- "asm_wasm": [["--validate-asm", "--fast-validate-asm", "--stress-validate-asm", "--suppress-asm-messages"]],
+ "fullcode": [["--noopt", "--no-turbo"]],
+ # No optimization means disable all optimizations. OptimizeFunctionOnNextCall
+ # would not force optimization too. It turns into a Nop. Please see
+ # https://chromium-review.googlesource.com/c/452620/ for more discussion.
+ "nooptimization": [["--noopt"]],
+ "asm_wasm": [["--validate-asm", "--stress-validate-asm", "--suppress-asm-messages"]],
"wasm_traps": [["--wasm_guard_pages", "--wasm_trap_handler", "--invoke-weak-callbacks"]],
}
diff --git a/deps/v8/tools/testrunner/testrunner.isolate b/deps/v8/tools/testrunner/testrunner.isolate
index bfc9318adf..545d888871 100644
--- a/deps/v8/tools/testrunner/testrunner.isolate
+++ b/deps/v8/tools/testrunner/testrunner.isolate
@@ -12,7 +12,7 @@
],
},
'conditions': [
- ['coverage==1 and sanitizer_coverage=="bb"', {
+ ['coverage==1 and sanitizer_coverage=="bb,trace-pc-guard"', {
'variables': {
'files': [
'../sanitizers/sancov_merger.py',
diff --git a/deps/v8/tools/try_perf.py b/deps/v8/tools/try_perf.py
index b9dac9ca3f..98d3b067e1 100755
--- a/deps/v8/tools/try_perf.py
+++ b/deps/v8/tools/try_perf.py
@@ -91,7 +91,7 @@ def main():
# Ensure depot_tools are updated.
subprocess.check_output(
- 'gclient', shell=True, stderr=subprocess.STDOUT, cwd=V8_BASE)
+ 'update_depot_tools', shell=True, stderr=subprocess.STDOUT, cwd=V8_BASE)
cmd = ['git cl try -m internal.client.v8']
cmd += ['-b %s' % bot for bot in options.bots]
diff --git a/deps/v8/tools/turbolizer/disassembly-view.js b/deps/v8/tools/turbolizer/disassembly-view.js
index a2a534cd7f..ecee04988c 100644
--- a/deps/v8/tools/turbolizer/disassembly-view.js
+++ b/deps/v8/tools/turbolizer/disassembly-view.js
@@ -87,7 +87,7 @@ class DisassemblyView extends TextView {
[/^.*/, UNCLASSIFIED_STYLE, -1]
],
[
- [/^\s+\d+\s+[0-9a-f]+\s+/, NUMBER_STYLE, 2],
+ [/^\s+[0-9a-f]+\s+[0-9a-f]+\s+/, NUMBER_STYLE, 2],
[/^.*/, null, -1]
],
[
diff --git a/deps/v8/tools/v8-info.sh b/deps/v8/tools/v8-info.sh
deleted file mode 100755
index 838d92a001..0000000000
--- a/deps/v8/tools/v8-info.sh
+++ /dev/null
@@ -1,161 +0,0 @@
-#!/bin/bash
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-########## Global variable definitions
-
-BASE_URL="https://code.google.com/p/v8/source/list"
-VERSION="include/v8-version.h"
-MAJOR="V8_MAJOR_VERSION"
-MINOR="V8_MINOR_VERSION"
-BUILD="V8_BUILD_NUMBER"
-PATCH="V8_PATCH_LEVEL"
-
-V8="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
-
-########## Function definitions
-
-cd $V8
-
-usage() {
-cat << EOF
-usage: $0 OPTIONS
-
-Fetches V8 revision information from a git-svn checkout.
-
-OPTIONS:
- -h Show this message.
-
- -i Print revision info for all branches matching the V8 version.
- Example usage: $0 -i 3.19.10$
- Output format: [Git hash] [SVN revision] [V8 version]
-
- -v Print the V8 version tag for a trunk SVN revision.
- Example usage: $0 -v 14981
- Output format: [V8 version]
-
- -m Print all patches that were merged to the specified V8 branch.
- Example usage: $0 -m 3.18
- Output format: [V8 version] [SVN revision] [SVN patch merged]*.
-
- -p Print all patches merged to a specific V8 point-release.
- Example usage: $0 -p 3.19.12.1
- Output format: [SVN patch merged]*
-
- -u Print a link to all SVN revisions between two V8 revision tags.
- Example usage: $0 -u 3.19.10:3.19.11
-EOF
-}
-
-tags() {
- git for-each-ref --format="%(objectname) %(refname:short)" refs/remotes/svn
-}
-
-tag_revision() {
- cut -d" " -f1
-}
-
-tag_log() {
- git log --format="%h %ci %ce %s" -1 $1
-}
-
-v8_hash() {
- tags | grep "svn/tags/$1$" | tag_revision
-}
-
-point_merges() {
- echo $1 | grep -o "r[0-9]\+"
-}
-
-hash_to_svn() {
- git svn log -1 --oneline $1 | cut -d" " -f1
-}
-
-tag_version() {
- tags | grep svn/tags/$1 | while read tag; do
- id=$(echo $tag | grep -o "[^/]*$")
- rev=$(echo $tag | tag_revision)
- svn=$(hash_to_svn $rev)
- echo $rev $svn $id
- done
-}
-
-svn_rev() {
- git svn find-rev $2 svn/$1
-}
-
-v8_rev() {
- cd $(git rev-parse --show-toplevel)
- rev=$(git show $1:$VERSION \
- | grep "#define" \
- | grep "$MAJOR\|$MINOR\|$BUILD\|$PATCH" \
- | grep -o "[0-9]\+$" \
- | tr "\\n" ".")
- echo ${rev%?}
-}
-
-merges_to_branch() {
- git cherry -v svn/trunk svn/$1 | while read merge; do
- h=$(echo $merge | cut -d" " -f2)
- svn=$(svn_rev $1 $h)
- merges=$(echo $merge | grep -o "r[0-9]\+")
- rev=$(v8_rev $h)
- echo $rev r$svn $merges
- done
-}
-
-url_for() {
- first=$(svn_rev trunk $(v8_hash $(echo $1 | cut -d":" -f1)))
- last=$(svn_rev trunk $(v8_hash $(echo $1 | cut -d":" -f2)))
- num=$[ $last - $first]
- echo "$BASE_URL?num=$num&start=$last"
-}
-
-########## Option parsing
-
-while getopts ":hi:v:m:p:u:" OPTION ; do
- case $OPTION in
- h) usage
- exit 0
- ;;
- i) tag_version $OPTARG
- ;;
- v) v8_rev $(svn_rev trunk r$OPTARG)
- ;;
- m) merges_to_branch $OPTARG
- ;;
- p) echo $(point_merges "$(tag_log $(v8_hash $OPTARG)^1)")
- ;;
- u) url_for $OPTARG
- ;;
- ?) echo "Illegal option: -$OPTARG"
- usage
- exit 1
- ;;
- esac
-done
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index d43291abe8..d2d6ae9266 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -2,6 +2,9 @@
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
+# This file is automatically generated by mkgrokdump and should not
+# be modified manually.
+
# List of known V8 instance types.
INSTANCE_TYPES = {
0: "INTERNALIZED_STRING_TYPE",
@@ -51,97 +54,100 @@ INSTANCE_TYPES = {
150: "ACCESSOR_PAIR_TYPE",
151: "ACCESS_CHECK_INFO_TYPE",
152: "INTERCEPTOR_INFO_TYPE",
- 153: "CALL_HANDLER_INFO_TYPE",
- 154: "FUNCTION_TEMPLATE_INFO_TYPE",
- 155: "OBJECT_TEMPLATE_INFO_TYPE",
- 156: "ALLOCATION_SITE_TYPE",
- 157: "ALLOCATION_MEMENTO_TYPE",
- 158: "SCRIPT_TYPE",
- 159: "TYPE_FEEDBACK_INFO_TYPE",
- 160: "ALIASED_ARGUMENTS_ENTRY_TYPE",
- 161: "PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE",
- 162: "PROMISE_REACTION_JOB_INFO_TYPE",
- 163: "DEBUG_INFO_TYPE",
- 164: "BREAK_POINT_INFO_TYPE",
- 165: "PROTOTYPE_INFO_TYPE",
- 166: "TUPLE2_TYPE",
- 167: "TUPLE3_TYPE",
- 168: "CONTEXT_EXTENSION_TYPE",
- 169: "CONSTANT_ELEMENTS_PAIR_TYPE",
- 170: "MODULE_TYPE",
- 171: "MODULE_INFO_ENTRY_TYPE",
- 172: "FIXED_ARRAY_TYPE",
- 173: "TRANSITION_ARRAY_TYPE",
- 174: "SHARED_FUNCTION_INFO_TYPE",
- 175: "CELL_TYPE",
- 176: "WEAK_CELL_TYPE",
- 177: "PROPERTY_CELL_TYPE",
- 178: "JS_PROXY_TYPE",
- 179: "JS_GLOBAL_OBJECT_TYPE",
- 180: "JS_GLOBAL_PROXY_TYPE",
- 181: "JS_SPECIAL_API_OBJECT_TYPE",
- 182: "JS_VALUE_TYPE",
- 183: "JS_MESSAGE_OBJECT_TYPE",
- 184: "JS_DATE_TYPE",
- 185: "JS_API_OBJECT_TYPE",
- 186: "JS_OBJECT_TYPE",
- 187: "JS_ARGUMENTS_TYPE",
- 188: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
- 189: "JS_GENERATOR_OBJECT_TYPE",
- 190: "JS_MODULE_NAMESPACE_TYPE",
- 191: "JS_ARRAY_TYPE",
- 192: "JS_ARRAY_BUFFER_TYPE",
- 193: "JS_TYPED_ARRAY_TYPE",
- 194: "JS_DATA_VIEW_TYPE",
- 195: "JS_SET_TYPE",
- 196: "JS_MAP_TYPE",
- 197: "JS_SET_ITERATOR_TYPE",
- 198: "JS_MAP_ITERATOR_TYPE",
- 199: "JS_WEAK_MAP_TYPE",
- 200: "JS_WEAK_SET_TYPE",
- 201: "JS_PROMISE_CAPABILITY_TYPE",
- 202: "JS_PROMISE_TYPE",
- 203: "JS_REGEXP_TYPE",
- 204: "JS_ERROR_TYPE",
- 205: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
- 206: "JS_STRING_ITERATOR_TYPE",
- 207: "JS_TYPED_ARRAY_KEY_ITERATOR_TYPE",
- 208: "JS_FAST_ARRAY_KEY_ITERATOR_TYPE",
- 209: "JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE",
- 210: "JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 211: "JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 212: "JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 213: "JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 214: "JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 215: "JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 216: "JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 217: "JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 218: "JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 219: "JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 220: "JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 221: "JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 222: "JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 223: "JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 224: "JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 225: "JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE",
- 226: "JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE",
- 227: "JS_INT8_ARRAY_VALUE_ITERATOR_TYPE",
- 228: "JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE",
- 229: "JS_INT16_ARRAY_VALUE_ITERATOR_TYPE",
- 230: "JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE",
- 231: "JS_INT32_ARRAY_VALUE_ITERATOR_TYPE",
- 232: "JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE",
- 233: "JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE",
- 234: "JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE",
- 235: "JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE",
- 236: "JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE",
- 237: "JS_FAST_ARRAY_VALUE_ITERATOR_TYPE",
- 238: "JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE",
- 239: "JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
- 240: "JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
- 241: "JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE",
- 242: "JS_BOUND_FUNCTION_TYPE",
- 243: "JS_FUNCTION_TYPE",
+ 153: "FUNCTION_TEMPLATE_INFO_TYPE",
+ 154: "OBJECT_TEMPLATE_INFO_TYPE",
+ 155: "ALLOCATION_SITE_TYPE",
+ 156: "ALLOCATION_MEMENTO_TYPE",
+ 157: "SCRIPT_TYPE",
+ 158: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+ 159: "PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE",
+ 160: "PROMISE_REACTION_JOB_INFO_TYPE",
+ 161: "DEBUG_INFO_TYPE",
+ 162: "STACK_FRAME_INFO_TYPE",
+ 163: "PROTOTYPE_INFO_TYPE",
+ 164: "TUPLE2_TYPE",
+ 165: "TUPLE3_TYPE",
+ 166: "CONTEXT_EXTENSION_TYPE",
+ 167: "MODULE_TYPE",
+ 168: "MODULE_INFO_ENTRY_TYPE",
+ 169: "ASYNC_GENERATOR_REQUEST_TYPE",
+ 170: "FIXED_ARRAY_TYPE",
+ 171: "TRANSITION_ARRAY_TYPE",
+ 172: "SHARED_FUNCTION_INFO_TYPE",
+ 173: "CELL_TYPE",
+ 174: "WEAK_CELL_TYPE",
+ 175: "PROPERTY_CELL_TYPE",
+ 176: "PADDING_TYPE_1",
+ 177: "PADDING_TYPE_2",
+ 178: "PADDING_TYPE_3",
+ 179: "PADDING_TYPE_4",
+ 180: "JS_PROXY_TYPE",
+ 181: "JS_GLOBAL_OBJECT_TYPE",
+ 182: "JS_GLOBAL_PROXY_TYPE",
+ 183: "JS_SPECIAL_API_OBJECT_TYPE",
+ 184: "JS_VALUE_TYPE",
+ 185: "JS_MESSAGE_OBJECT_TYPE",
+ 186: "JS_DATE_TYPE",
+ 187: "JS_API_OBJECT_TYPE",
+ 188: "JS_OBJECT_TYPE",
+ 189: "JS_ARGUMENTS_TYPE",
+ 190: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+ 191: "JS_GENERATOR_OBJECT_TYPE",
+ 192: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
+ 193: "JS_MODULE_NAMESPACE_TYPE",
+ 194: "JS_ARRAY_TYPE",
+ 195: "JS_ARRAY_BUFFER_TYPE",
+ 196: "JS_TYPED_ARRAY_TYPE",
+ 197: "JS_DATA_VIEW_TYPE",
+ 198: "JS_SET_TYPE",
+ 199: "JS_MAP_TYPE",
+ 200: "JS_SET_ITERATOR_TYPE",
+ 201: "JS_MAP_ITERATOR_TYPE",
+ 202: "JS_WEAK_MAP_TYPE",
+ 203: "JS_WEAK_SET_TYPE",
+ 204: "JS_PROMISE_CAPABILITY_TYPE",
+ 205: "JS_PROMISE_TYPE",
+ 206: "JS_REGEXP_TYPE",
+ 207: "JS_ERROR_TYPE",
+ 208: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
+ 209: "JS_STRING_ITERATOR_TYPE",
+ 210: "JS_TYPED_ARRAY_KEY_ITERATOR_TYPE",
+ 211: "JS_FAST_ARRAY_KEY_ITERATOR_TYPE",
+ 212: "JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE",
+ 213: "JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 214: "JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 215: "JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 216: "JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 217: "JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 218: "JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 219: "JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 220: "JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 221: "JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 222: "JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 223: "JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 224: "JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 225: "JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 226: "JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 227: "JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 228: "JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE",
+ 229: "JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE",
+ 230: "JS_INT8_ARRAY_VALUE_ITERATOR_TYPE",
+ 231: "JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE",
+ 232: "JS_INT16_ARRAY_VALUE_ITERATOR_TYPE",
+ 233: "JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 234: "JS_INT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 235: "JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE",
+ 236: "JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE",
+ 237: "JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE",
+ 238: "JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE",
+ 239: "JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE",
+ 240: "JS_FAST_ARRAY_VALUE_ITERATOR_TYPE",
+ 241: "JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE",
+ 242: "JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
+ 243: "JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
+ 244: "JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE",
+ 245: "JS_BOUND_FUNCTION_TYPE",
+ 246: "JS_FUNCTION_TYPE",
}
# List of known V8 maps.
@@ -149,7 +155,7 @@ KNOWN_MAPS = {
0x02201: (137, "FreeSpaceMap"),
0x02259: (131, "MetaMap"),
0x022b1: (130, "NullMap"),
- 0x02309: (172, "FixedArrayMap"),
+ 0x02309: (170, "FixedArrayMap"),
0x02361: (8, "OneByteInternalizedStringMap"),
0x023b9: (148, "OnePointerFillerMap"),
0x02411: (148, "TwoPointerFillerMap"),
@@ -159,100 +165,98 @@ KNOWN_MAPS = {
0x02571: (130, "TheHoleMap"),
0x025c9: (130, "BooleanMap"),
0x02621: (135, "ByteArrayMap"),
- 0x02679: (172, "FixedCOWArrayMap"),
- 0x026d1: (172, "HashTableMap"),
+ 0x02679: (170, "FixedCOWArrayMap"),
+ 0x026d1: (170, "HashTableMap"),
0x02729: (128, "SymbolMap"),
0x02781: (72, "OneByteStringMap"),
- 0x027d9: (172, "ScopeInfoMap"),
- 0x02831: (174, "SharedFunctionInfoMap"),
+ 0x027d9: (170, "ScopeInfoMap"),
+ 0x02831: (172, "SharedFunctionInfoMap"),
0x02889: (132, "CodeMap"),
- 0x028e1: (172, "FunctionContextMap"),
- 0x02939: (175, "CellMap"),
- 0x02991: (176, "WeakCellMap"),
- 0x029e9: (177, "GlobalPropertyCellMap"),
+ 0x028e1: (170, "FunctionContextMap"),
+ 0x02939: (173, "CellMap"),
+ 0x02991: (174, "WeakCellMap"),
+ 0x029e9: (175, "GlobalPropertyCellMap"),
0x02a41: (134, "ForeignMap"),
- 0x02a99: (173, "TransitionArrayMap"),
+ 0x02a99: (171, "TransitionArrayMap"),
0x02af1: (130, "ArgumentsMarkerMap"),
- 0x02b49: (172, "NativeContextMap"),
- 0x02ba1: (172, "ModuleContextMap"),
- 0x02bf9: (172, "EvalContextMap"),
- 0x02c51: (172, "ScriptContextMap"),
- 0x02ca9: (172, "BlockContextMap"),
- 0x02d01: (172, "CatchContextMap"),
- 0x02d59: (172, "WithContextMap"),
- 0x02db1: (147, "FixedDoubleArrayMap"),
- 0x02e09: (133, "MutableHeapNumberMap"),
- 0x02e61: (172, "OrderedHashTableMap"),
- 0x02eb9: (172, "SloppyArgumentsElementsMap"),
- 0x02f11: (183, "JSMessageObjectMap"),
- 0x02f69: (136, "BytecodeArrayMap"),
- 0x02fc1: (172, "ModuleInfoMap"),
- 0x03019: (175, "NoClosuresCellMap"),
- 0x03071: (175, "OneClosureCellMap"),
- 0x030c9: (175, "ManyClosuresCellMap"),
- 0x03121: (64, "StringMap"),
- 0x03179: (73, "ConsOneByteStringMap"),
- 0x031d1: (65, "ConsStringMap"),
- 0x03229: (77, "ThinOneByteStringMap"),
- 0x03281: (69, "ThinStringMap"),
- 0x032d9: (67, "SlicedStringMap"),
- 0x03331: (75, "SlicedOneByteStringMap"),
- 0x03389: (66, "ExternalStringMap"),
- 0x033e1: (82, "ExternalStringWithOneByteDataMap"),
- 0x03439: (74, "ExternalOneByteStringMap"),
- 0x03491: (98, "ShortExternalStringMap"),
- 0x034e9: (114, "ShortExternalStringWithOneByteDataMap"),
- 0x03541: (0, "InternalizedStringMap"),
- 0x03599: (2, "ExternalInternalizedStringMap"),
- 0x035f1: (18, "ExternalInternalizedStringWithOneByteDataMap"),
- 0x03649: (10, "ExternalOneByteInternalizedStringMap"),
- 0x036a1: (34, "ShortExternalInternalizedStringMap"),
- 0x036f9: (50, "ShortExternalInternalizedStringWithOneByteDataMap"),
- 0x03751: (42, "ShortExternalOneByteInternalizedStringMap"),
- 0x037a9: (106, "ShortExternalOneByteStringMap"),
- 0x03801: (172, "FeedbackVectorMap"),
- 0x03859: (130, "ExceptionMap"),
- 0x038b1: (130, "TerminationExceptionMap"),
- 0x03909: (130, "OptimizedOutMap"),
- 0x03961: (130, "StaleRegisterMap"),
- 0x039b9: (172, "DebugEvaluateContextMap"),
- 0x03a11: (172, "ScriptContextTableMap"),
- 0x03a69: (172, "UnseededNumberDictionaryMap"),
- 0x03ac1: (186, "ExternalMap"),
- 0x03b19: (106, "NativeSourceStringMap"),
- 0x03b71: (139, "FixedUint8ArrayMap"),
- 0x03bc9: (138, "FixedInt8ArrayMap"),
- 0x03c21: (141, "FixedUint16ArrayMap"),
- 0x03c79: (140, "FixedInt16ArrayMap"),
- 0x03cd1: (143, "FixedUint32ArrayMap"),
- 0x03d29: (142, "FixedInt32ArrayMap"),
- 0x03d81: (144, "FixedFloat32ArrayMap"),
- 0x03dd9: (145, "FixedFloat64ArrayMap"),
- 0x03e31: (146, "FixedUint8ClampedArrayMap"),
- 0x03e89: (158, "ScriptMap"),
+ 0x02b49: (130, "ExceptionMap"),
+ 0x02ba1: (130, "TerminationExceptionMap"),
+ 0x02bf9: (130, "OptimizedOutMap"),
+ 0x02c51: (130, "StaleRegisterMap"),
+ 0x02ca9: (170, "NativeContextMap"),
+ 0x02d01: (170, "ModuleContextMap"),
+ 0x02d59: (170, "EvalContextMap"),
+ 0x02db1: (170, "ScriptContextMap"),
+ 0x02e09: (170, "BlockContextMap"),
+ 0x02e61: (170, "CatchContextMap"),
+ 0x02eb9: (170, "WithContextMap"),
+ 0x02f11: (147, "FixedDoubleArrayMap"),
+ 0x02f69: (133, "MutableHeapNumberMap"),
+ 0x02fc1: (170, "OrderedHashTableMap"),
+ 0x03019: (170, "SloppyArgumentsElementsMap"),
+ 0x03071: (185, "JSMessageObjectMap"),
+ 0x030c9: (136, "BytecodeArrayMap"),
+ 0x03121: (170, "ModuleInfoMap"),
+ 0x03179: (173, "NoClosuresCellMap"),
+ 0x031d1: (173, "OneClosureCellMap"),
+ 0x03229: (173, "ManyClosuresCellMap"),
+ 0x03281: (64, "StringMap"),
+ 0x032d9: (73, "ConsOneByteStringMap"),
+ 0x03331: (65, "ConsStringMap"),
+ 0x03389: (77, "ThinOneByteStringMap"),
+ 0x033e1: (69, "ThinStringMap"),
+ 0x03439: (67, "SlicedStringMap"),
+ 0x03491: (75, "SlicedOneByteStringMap"),
+ 0x034e9: (66, "ExternalStringMap"),
+ 0x03541: (82, "ExternalStringWithOneByteDataMap"),
+ 0x03599: (74, "ExternalOneByteStringMap"),
+ 0x035f1: (98, "ShortExternalStringMap"),
+ 0x03649: (114, "ShortExternalStringWithOneByteDataMap"),
+ 0x036a1: (0, "InternalizedStringMap"),
+ 0x036f9: (2, "ExternalInternalizedStringMap"),
+ 0x03751: (18, "ExternalInternalizedStringWithOneByteDataMap"),
+ 0x037a9: (10, "ExternalOneByteInternalizedStringMap"),
+ 0x03801: (34, "ShortExternalInternalizedStringMap"),
+ 0x03859: (50, "ShortExternalInternalizedStringWithOneByteDataMap"),
+ 0x038b1: (42, "ShortExternalOneByteInternalizedStringMap"),
+ 0x03909: (106, "ShortExternalOneByteStringMap"),
+ 0x03961: (139, "FixedUint8ArrayMap"),
+ 0x039b9: (138, "FixedInt8ArrayMap"),
+ 0x03a11: (141, "FixedUint16ArrayMap"),
+ 0x03a69: (140, "FixedInt16ArrayMap"),
+ 0x03ac1: (143, "FixedUint32ArrayMap"),
+ 0x03b19: (142, "FixedInt32ArrayMap"),
+ 0x03b71: (144, "FixedFloat32ArrayMap"),
+ 0x03bc9: (145, "FixedFloat64ArrayMap"),
+ 0x03c21: (146, "FixedUint8ClampedArrayMap"),
+ 0x03c79: (157, "ScriptMap"),
+ 0x03cd1: (170, "FeedbackVectorMap"),
+ 0x03d29: (170, "DebugEvaluateContextMap"),
+ 0x03d81: (170, "ScriptContextTableMap"),
+ 0x03dd9: (170, "UnseededNumberDictionaryMap"),
+ 0x03e31: (188, "ExternalMap"),
+ 0x03e89: (106, "NativeSourceStringMap"),
0x03ee1: (152, "InterceptorInfoMap"),
- 0x03f39: (201, "JSPromiseCapabilityMap"),
- 0x03f91: (149, "AccessorInfoMap"),
- 0x03fe9: (150, "AccessorPairMap"),
- 0x04041: (151, "AccessCheckInfoMap"),
- 0x04099: (153, "CallHandlerInfoMap"),
- 0x040f1: (154, "FunctionTemplateInfoMap"),
- 0x04149: (155, "ObjectTemplateInfoMap"),
- 0x041a1: (156, "AllocationSiteMap"),
- 0x041f9: (157, "AllocationMementoMap"),
- 0x04251: (159, "TypeFeedbackInfoMap"),
- 0x042a9: (160, "AliasedArgumentsEntryMap"),
- 0x04301: (161, "PromiseResolveThenableJobInfoMap"),
- 0x04359: (162, "PromiseReactionJobInfoMap"),
- 0x043b1: (163, "DebugInfoMap"),
- 0x04409: (164, "BreakPointInfoMap"),
- 0x04461: (165, "PrototypeInfoMap"),
- 0x044b9: (166, "Tuple2Map"),
- 0x04511: (167, "Tuple3Map"),
- 0x04569: (168, "ContextExtensionMap"),
- 0x045c1: (169, "ConstantElementsPairMap"),
- 0x04619: (170, "ModuleMap"),
- 0x04671: (171, "ModuleInfoEntryMap"),
+ 0x03f39: (156, "AllocationMementoMap"),
+ 0x03f91: (204, "JSPromiseCapabilityMap"),
+ 0x03fe9: (149, "AccessorInfoMap"),
+ 0x04041: (150, "AccessorPairMap"),
+ 0x04099: (151, "AccessCheckInfoMap"),
+ 0x040f1: (153, "FunctionTemplateInfoMap"),
+ 0x04149: (154, "ObjectTemplateInfoMap"),
+ 0x041a1: (155, "AllocationSiteMap"),
+ 0x041f9: (158, "AliasedArgumentsEntryMap"),
+ 0x04251: (159, "PromiseResolveThenableJobInfoMap"),
+ 0x042a9: (160, "PromiseReactionJobInfoMap"),
+ 0x04301: (161, "DebugInfoMap"),
+ 0x04359: (162, "StackFrameInfoMap"),
+ 0x043b1: (163, "PrototypeInfoMap"),
+ 0x04409: (164, "Tuple2Map"),
+ 0x04461: (165, "Tuple3Map"),
+ 0x044b9: (166, "ContextExtensionMap"),
+ 0x04511: (167, "ModuleMap"),
+ 0x04569: (168, "ModuleInfoEntryMap"),
+ 0x045c1: (169, "AsyncGeneratorRequestMap"),
}
# List of known V8 objects.
@@ -268,56 +272,38 @@ KNOWN_OBJECTS = {
("OLD_SPACE", 0x023b1): "TrueValue",
("OLD_SPACE", 0x02421): "FalseValue",
("OLD_SPACE", 0x02471): "empty_string",
- ("OLD_SPACE", 0x02489): "ArgumentsMarker",
- ("OLD_SPACE", 0x024e1): "EmptyByteArray",
- ("OLD_SPACE", 0x024f1): "EmptyWeakCell",
- ("OLD_SPACE", 0x02509): "InfinityValue",
- ("OLD_SPACE", 0x02519): "MinusZeroValue",
- ("OLD_SPACE", 0x02529): "MinusInfinityValue",
- ("OLD_SPACE", 0x04979): "EmptyScopeInfo",
- ("OLD_SPACE", 0x04989): "Exception",
- ("OLD_SPACE", 0x049e1): "TerminationException",
- ("OLD_SPACE", 0x04a41): "OptimizedOut",
- ("OLD_SPACE", 0x04a99): "StaleRegister",
- ("OLD_SPACE", 0x04af1): "EmptyFixedUint8Array",
- ("OLD_SPACE", 0x04b11): "EmptyFixedInt8Array",
- ("OLD_SPACE", 0x04b31): "EmptyFixedUint16Array",
- ("OLD_SPACE", 0x04b51): "EmptyFixedInt16Array",
- ("OLD_SPACE", 0x04b71): "EmptyFixedUint32Array",
- ("OLD_SPACE", 0x04b91): "EmptyFixedInt32Array",
- ("OLD_SPACE", 0x04bb1): "EmptyFixedFloat32Array",
- ("OLD_SPACE", 0x04bd1): "EmptyFixedFloat64Array",
- ("OLD_SPACE", 0x04bf1): "EmptyFixedUint8ClampedArray",
- ("OLD_SPACE", 0x04c11): "EmptyScript",
- ("OLD_SPACE", 0x04c99): "UndefinedCell",
- ("OLD_SPACE", 0x04ca9): "EmptySloppyArgumentsElements",
- ("OLD_SPACE", 0x04cc9): "EmptySlowElementDictionary",
- ("OLD_SPACE", 0x04d19): "EmptyPropertyCell",
- ("OLD_SPACE", 0x04d39): "ArrayProtector",
- ("OLD_SPACE", 0x04d59): "IsConcatSpreadableProtector",
- ("OLD_SPACE", 0x04d69): "SpeciesProtector",
- ("OLD_SPACE", 0x04d79): "StringLengthProtector",
- ("OLD_SPACE", 0x04d99): "FastArrayIterationProtector",
- ("OLD_SPACE", 0x04da9): "ArrayIteratorProtector",
- ("OLD_SPACE", 0x04dc9): "ArrayBufferNeuteringProtector",
- ("OLD_SPACE", 0x04de9): "NumberStringCache",
- ("OLD_SPACE", 0x05df9): "SingleCharacterStringCache",
- ("OLD_SPACE", 0x06669): "StringSplitCache",
- ("OLD_SPACE", 0x06e79): "RegExpMultipleCache",
- ("OLD_SPACE", 0x07689): "NativesSourceCache",
- ("OLD_SPACE", 0x07931): "ExtraNativesSourceCache",
- ("OLD_SPACE", 0x07969): "ExperimentalExtraNativesSourceCache",
- ("OLD_SPACE", 0x07981): "EmptyPropertiesDictionary",
- ("OLD_SPACE", 0x079d1): "ScriptList",
- ("OLD_SPACE", 0x22019): "CodeStubs",
- ("OLD_SPACE", 0x2f199): "WeakObjectToCodeTable",
- ("OLD_SPACE", 0x2f3c1): "WeakNewSpaceObjectToCodeList",
- ("OLD_SPACE", 0x2f451): "NoScriptSharedFunctionInfos",
- ("OLD_SPACE", 0x4abd9): "MessageListeners",
- ("OLD_SPACE", 0x4abf9): "NoOpInterceptorInfo",
- ("OLD_SPACE", 0x531d1): "StringTable",
- ("CODE_SPACE", 0x2cde1): "JsEntryCode",
- ("CODE_SPACE", 0x31241): "JsConstructEntryCode",
+ ("OLD_SPACE", 0x02489): "EmptyScopeInfo",
+ ("OLD_SPACE", 0x02499): "ArgumentsMarker",
+ ("OLD_SPACE", 0x024f1): "Exception",
+ ("OLD_SPACE", 0x02549): "TerminationException",
+ ("OLD_SPACE", 0x025a9): "OptimizedOut",
+ ("OLD_SPACE", 0x02601): "StaleRegister",
+ ("OLD_SPACE", 0x02659): "EmptyByteArray",
+ ("OLD_SPACE", 0x02669): "EmptyFixedUint8Array",
+ ("OLD_SPACE", 0x02689): "EmptyFixedInt8Array",
+ ("OLD_SPACE", 0x026a9): "EmptyFixedUint16Array",
+ ("OLD_SPACE", 0x026c9): "EmptyFixedInt16Array",
+ ("OLD_SPACE", 0x026e9): "EmptyFixedUint32Array",
+ ("OLD_SPACE", 0x02709): "EmptyFixedInt32Array",
+ ("OLD_SPACE", 0x02729): "EmptyFixedFloat32Array",
+ ("OLD_SPACE", 0x02749): "EmptyFixedFloat64Array",
+ ("OLD_SPACE", 0x02769): "EmptyFixedUint8ClampedArray",
+ ("OLD_SPACE", 0x02789): "EmptyScript",
+ ("OLD_SPACE", 0x02811): "UndefinedCell",
+ ("OLD_SPACE", 0x02821): "EmptySloppyArgumentsElements",
+ ("OLD_SPACE", 0x02841): "EmptySlowElementDictionary",
+ ("OLD_SPACE", 0x02891): "EmptyPropertyCell",
+ ("OLD_SPACE", 0x028b1): "EmptyWeakCell",
+ ("OLD_SPACE", 0x028c9): "ArrayProtector",
+ ("OLD_SPACE", 0x028e9): "IsConcatSpreadableProtector",
+ ("OLD_SPACE", 0x028f9): "SpeciesProtector",
+ ("OLD_SPACE", 0x02909): "StringLengthProtector",
+ ("OLD_SPACE", 0x02929): "FastArrayIterationProtector",
+ ("OLD_SPACE", 0x02939): "ArrayIteratorProtector",
+ ("OLD_SPACE", 0x02959): "ArrayBufferNeuteringProtector",
+ ("OLD_SPACE", 0x02979): "InfinityValue",
+ ("OLD_SPACE", 0x02989): "MinusZeroValue",
+ ("OLD_SPACE", 0x02999): "MinusInfinityValue",
}
# List of known V8 Frame Markers.
@@ -340,3 +326,5 @@ FRAME_MARKERS = (
"BUILTIN",
"BUILTIN_EXIT",
)
+
+# This set of constants is generated from a shipping build.
diff --git a/deps/v8/tools/v8heapconst.py.tmpl b/deps/v8/tools/v8heapconst.py.tmpl
deleted file mode 100644
index a773f47c8b..0000000000
--- a/deps/v8/tools/v8heapconst.py.tmpl
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# This file is automatically generated from the V8 source and should not
-# be modified manually, run 'make grokdump' instead to update this file.
-
diff --git a/deps/v8/tools/verify_source_deps.py b/deps/v8/tools/verify_source_deps.py
index 6f804040cc..e3a39c1d17 100755
--- a/deps/v8/tools/verify_source_deps.py
+++ b/deps/v8/tools/verify_source_deps.py
@@ -31,6 +31,7 @@ GYP_FILES = [
os.path.join(V8_BASE, 'test', 'fuzzer', 'fuzzer.gyp'),
os.path.join(V8_BASE, 'test', 'unittests', 'unittests.gyp'),
os.path.join(V8_BASE, 'test', 'inspector', 'inspector.gyp'),
+ os.path.join(V8_BASE, 'test', 'mkgrokdump', 'mkgrokdump.gyp'),
os.path.join(V8_BASE, 'testing', 'gmock.gyp'),
os.path.join(V8_BASE, 'testing', 'gtest.gyp'),
os.path.join(V8_BASE, 'tools', 'parser-shell.gyp'),
@@ -49,11 +50,13 @@ ALL_GYP_PREFIXES = [
os.path.join('test', 'fuzzer'),
os.path.join('test', 'unittests'),
os.path.join('test', 'inspector'),
+ os.path.join('test', 'mkgrokdump'),
]
GYP_UNSUPPORTED_FEATURES = [
'gcmole',
'setup-isolate-deserialize.cc',
+ 'v8-version.h'
]
GN_FILES = [
@@ -64,6 +67,7 @@ GN_FILES = [
os.path.join(V8_BASE, 'test', 'cctest', 'BUILD.gn'),
os.path.join(V8_BASE, 'test', 'unittests', 'BUILD.gn'),
os.path.join(V8_BASE, 'test', 'inspector', 'BUILD.gn'),
+ os.path.join(V8_BASE, 'test', 'mkgrokdump', 'BUILD.gn'),
os.path.join(V8_BASE, 'tools', 'BUILD.gn'),
]
@@ -77,6 +81,7 @@ GN_UNSUPPORTED_FEATURES = [
'qnx',
'solaris',
'vtune',
+ 'v8-version.h',
'x87',
]
@@ -88,6 +93,7 @@ ALL_GN_PREFIXES = [
os.path.join('test', 'cctest'),
os.path.join('test', 'unittests'),
os.path.join('test', 'inspector'),
+ os.path.join('test', 'mkgrokdump'),
]
def pathsplit(path):
diff --git a/deps/v8/tools/update-wasm-fuzzers.sh b/deps/v8/tools/wasm/update-wasm-fuzzers.sh
index a58681f682..ab9f84be28 100755
--- a/deps/v8/tools/update-wasm-fuzzers.sh
+++ b/deps/v8/tools/wasm/update-wasm-fuzzers.sh
@@ -5,52 +5,52 @@
set -e
-TOOLS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+TOOLS_WASM_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-cd ${TOOLS_DIR}/..
+cd ${TOOLS_WASM_DIR}/../..
-rm -rf test/fuzzer/wasm
-rm -rf test/fuzzer/wasm_asmjs
+rm -rf test/fuzzer/wasm_corpus
+rm -rf test/fuzzer/wasm_asmjs_corpus
-make x64.release -j
+tools/dev/gm.py x64.release all
-mkdir -p test/fuzzer/wasm
-mkdir -p test/fuzzer/wasm_asmjs
+mkdir -p test/fuzzer/wasm_corpus
+mkdir -p test/fuzzer/wasm_asmjs_corpus
# asm.js
./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
--mode=release --no-presubmit --extra-flags="--dump-wasm-module \
- --dump-wasm-module-path=./test/fuzzer/wasm_asmjs/" mjsunit/wasm/asm*
+ --dump-wasm-module-path=./test/fuzzer/wasm_asmjs_corpus/" mjsunit/wasm/asm*
./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
--mode=release --no-presubmit --extra-flags="--dump-wasm-module \
- --dump-wasm-module-path=./test/fuzzer/wasm_asmjs/" mjsunit/asm/*
+ --dump-wasm-module-path=./test/fuzzer/wasm_asmjs_corpus/" mjsunit/asm/*
+# WASM
./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
--mode=release --no-presubmit --extra-flags="--dump-wasm-module \
- --dump-wasm-module-path=./test/fuzzer/wasm_asmjs/" mjsunit/regress/asm/*
-# WASM
+ --dump-wasm-module-path=./test/fuzzer/wasm_corpus/" unittests
./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
--mode=release --no-presubmit --extra-flags="--dump-wasm-module \
- --dump-wasm-module-path=./test/fuzzer/wasm/" unittests
+ --dump-wasm-module-path=./test/fuzzer/wasm_corpus/" wasm-spec-tests/*
./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
--mode=release --no-presubmit --extra-flags="--dump-wasm-module \
- --dump-wasm-module-path=./test/fuzzer/wasm/" mjsunit/wasm/*
+ --dump-wasm-module-path=./test/fuzzer/wasm_corpus/" mjsunit/wasm/*
./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
--mode=release --no-presubmit --extra-flags="--dump-wasm-module \
- --dump-wasm-module-path=./test/fuzzer/wasm/" \
+ --dump-wasm-module-path=./test/fuzzer/wasm_corpus/" \
$(cd test/; ls cctest/wasm/test-*.cc | \
sed -es/wasm\\///g | sed -es/[.]cc/\\/\\*/g)
# Delete items over 20k.
-for x in $(find ./test/fuzzer/wasm/ -type f -size +20k)
+for x in $(find ./test/fuzzer/wasm_corpus/ -type f -size +20k)
do
rm $x
done
-for x in $(find ./test/fuzzer/wasm_asmjs/ -type f -size +20k)
+for x in $(find ./test/fuzzer/wasm_asmjs_corpus/ -type f -size +20k)
do
rm $x
done
# Upload changes.
cd test/fuzzer
-upload_to_google_storage.py -a -b v8-wasm-fuzzer wasm
-upload_to_google_storage.py -a -b v8-wasm-asmjs-fuzzer wasm_asmjs
+upload_to_google_storage.py -a -b v8-wasm-fuzzer wasm_corpus
+upload_to_google_storage.py -a -b v8-wasm-asmjs-fuzzer wasm_asmjs_corpus
diff --git a/deps/v8/tools/wasm/update-wasm-spec-tests.sh b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
new file mode 100755
index 0000000000..be277e9ad7
--- /dev/null
+++ b/deps/v8/tools/wasm/update-wasm-spec-tests.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -e
+
+TOOLS_WASM_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+V8_DIR="${TOOLS_WASM_DIR}/../.."
+
+cd ${V8_DIR}
+
+mkdir -p ./test/wasm-spec-tests/tests/
+rm -rf ./test/wasm-spec-tests/tests/*
+
+./tools/dev/gm.py x64.release all
+
+cd ${V8_DIR}/test/wasm-js/interpreter
+make
+
+cd ${V8_DIR}/test/wasm-js/test/core
+
+./run.py --wasm ${V8_DIR}/test/wasm-js/interpreter/wasm --js ${V8_DIR}/out/x64.release/d8
+
+cp ${V8_DIR}/test/wasm-js/test/core/output/*.js ${V8_DIR}/test/wasm-spec-tests/tests
+
+cd ${V8_DIR}/test/wasm-spec-tests
+upload_to_google_storage.py -a -b v8-wasm-spec-tests tests
+
+
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 0c3c0d7feb..4448e29f88 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -7,5 +7,5 @@ A Smi balks into a war and says:
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up......
The autoroller bought a round of Himbeerbrause. Suddenly .....
-The bartender starts to shake the bottles.....
+The bartender starts to shake the bottles........
.